mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2026-01-13 21:57:30 +08:00
Compare commits
94 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9414f13f6d | ||
|
|
cbeda03acb | ||
|
|
cea36d170b | ||
|
|
d84b84eb2f | ||
|
|
c7c380d706 | ||
|
|
622e3f0db6 | ||
|
|
e5a2980da8 | ||
|
|
5b5e190132 | ||
|
|
e8e8f028f3 | ||
|
|
8ab082ceb5 | ||
|
|
864eda68fb | ||
|
|
c5ea42ca3a | ||
|
|
bdf8f103c8 | ||
|
|
7a7089e315 | ||
|
|
9b8420f9b9 | ||
|
|
29d8c4a7ed | ||
|
|
76cc196667 | ||
|
|
61aae7376a | ||
|
|
74e1d0887d | ||
|
|
2a1722bb52 | ||
|
|
7ff6955870 | ||
|
|
008df2d4b7 | ||
|
|
8223c9fbfd | ||
|
|
153c1024f6 | ||
|
|
43a48a4a38 | ||
|
|
0cb3bd8311 | ||
|
|
2f67b63057 | ||
|
|
81ea847989 | ||
|
|
1d07e89e38 | ||
|
|
90cd06d23d | ||
|
|
8d56bf80dd | ||
|
|
7766438853 | ||
|
|
3ec6e1167f | ||
|
|
b83fbc0ace | ||
|
|
040f4daa98 | ||
|
|
d0a1512f23 | ||
|
|
2777af77d8 | ||
|
|
6d15b0dfd1 | ||
|
|
334b9bbe04 | ||
|
|
ed3401a016 | ||
|
|
91b6ed81cc | ||
|
|
c940b5613f | ||
|
|
6b0bb64795 | ||
|
|
116ee6f94b | ||
|
|
af7896b900 | ||
|
|
bb9b73557b | ||
|
|
a5038ac844 | ||
|
|
9e45f801a8 | ||
|
|
313dac0f64 | ||
|
|
76ee67d4d7 | ||
|
|
2a31fa2ad5 | ||
|
|
c4f372feba | ||
|
|
ad164f2c1b | ||
|
|
ca3ddff00e | ||
|
|
b4aeced1f9 | ||
|
|
d27d750bc5 | ||
|
|
a2639053ef | ||
|
|
68a75dc4e3 | ||
|
|
4c67e5b43a | ||
|
|
2383fd06db | ||
|
|
f8519f0bf0 | ||
|
|
2012378341 | ||
|
|
86adb2e11c | ||
|
|
680bda3993 | ||
|
|
acd1ecc09c | ||
|
|
e3d1996254 | ||
|
|
56cf347909 | ||
|
|
2a3955919e | ||
|
|
ca2b0ac28d | ||
|
|
078cf39313 | ||
|
|
48a582820f | ||
|
|
77e024027c | ||
|
|
d391e55a8a | ||
|
|
f878c8ab3b | ||
|
|
33cdcaa558 | ||
|
|
bc9eeb9f30 | ||
|
|
068cf1083c | ||
|
|
ed4353b054 | ||
|
|
528d6d37f2 | ||
|
|
efbe64e5da | ||
|
|
cccf9bb7be | ||
|
|
c242860abc | ||
|
|
cb93eee29d | ||
|
|
5ff173fcc7 | ||
|
|
b78df05f28 | ||
|
|
c13dc6eab5 | ||
|
|
2008d70707 | ||
|
|
723fa11647 | ||
|
|
9586f38157 | ||
|
|
401d66f3dd | ||
|
|
99b431ec92 | ||
|
|
ab3bce33b8 | ||
|
|
0f0e18231d | ||
|
|
4ae9bf8ff4 |
58
.github/workflows/auto-i18n.yml
vendored
58
.github/workflows/auto-i18n.yml
vendored
@ -32,38 +32,37 @@ jobs:
|
||||
with:
|
||||
node-version: 22
|
||||
|
||||
- name: 📦 Install corepack
|
||||
run: corepack enable && corepack prepare yarn@4.9.1 --activate
|
||||
- name: 📦 Install pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
|
||||
- name: 📂 Get yarn cache directory path
|
||||
id: yarn-cache-dir-path
|
||||
run: echo "dir=$(yarn config get cacheFolder)" >> $GITHUB_OUTPUT
|
||||
- name: 📂 Get pnpm store directory
|
||||
id: pnpm-cache
|
||||
shell: bash
|
||||
run: echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: 💾 Cache yarn dependencies
|
||||
- name: 💾 Cache pnpm dependencies
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
${{ steps.yarn-cache-dir-path.outputs.dir }}
|
||||
node_modules
|
||||
key: ${{ runner.os }}-yarn-${{ hashFiles('**/yarn.lock') }}
|
||||
path: ${{ steps.pnpm-cache.outputs.STORE_PATH }}
|
||||
key: ${{ runner.os }}-pnpm-${{ hashFiles('**/pnpm-lock.yaml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-yarn-
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: 📦 Install dependencies
|
||||
run: |
|
||||
yarn install
|
||||
pnpm install
|
||||
|
||||
- name: 🏃♀️ Translate
|
||||
run: yarn i18n:sync && yarn i18n:translate
|
||||
run: pnpm i18n:sync && pnpm i18n:translate
|
||||
|
||||
- name: 🔍 Format
|
||||
run: yarn format
|
||||
run: pnpm format
|
||||
|
||||
- name: 🔍 Check for changes
|
||||
id: git_status
|
||||
run: |
|
||||
# Check if there are any uncommitted changes
|
||||
git reset -- package.json yarn.lock # 不提交 package.json 和 yarn.lock 的更改
|
||||
git reset -- package.json pnpm-lock.yaml # 不提交 package.json 和 pnpm-lock.yaml 的更改
|
||||
git diff --exit-code --quiet || echo "::set-output name=has_changes::true"
|
||||
git status --porcelain
|
||||
|
||||
@ -73,7 +72,7 @@ jobs:
|
||||
|
||||
- name: 🚀 Create Pull Request if changes exist
|
||||
if: steps.git_status.outputs.has_changes == 'true'
|
||||
uses: peter-evans/create-pull-request@v6
|
||||
uses: peter-evans/create-pull-request@v8
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }} # Use the built-in GITHUB_TOKEN for bot actions
|
||||
commit-message: "feat(bot): Weekly automated script run"
|
||||
@ -91,3 +90,30 @@ jobs:
|
||||
- name: 📢 Notify if no changes
|
||||
if: steps.git_status.outputs.has_changes != 'true'
|
||||
run: echo "Bot script ran, but no changes were detected. No PR created."
|
||||
|
||||
- name: Send failure notification to Feishu
|
||||
if: always() && (failure() || cancelled())
|
||||
shell: bash
|
||||
env:
|
||||
FEISHU_WEBHOOK_URL: ${{ secrets.FEISHU_WEBHOOK_URL }}
|
||||
FEISHU_WEBHOOK_SECRET: ${{ secrets.FEISHU_WEBHOOK_SECRET }}
|
||||
RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
JOB_STATUS: ${{ job.status }}
|
||||
run: |
|
||||
# Determine status and color
|
||||
if [ "$JOB_STATUS" = "cancelled" ]; then
|
||||
STATUS_TEXT="已取消"
|
||||
COLOR="orange"
|
||||
else
|
||||
STATUS_TEXT="失败"
|
||||
COLOR="red"
|
||||
fi
|
||||
|
||||
# Build description using printf
|
||||
DESCRIPTION=$(printf "**状态:** %s\n\n**工作流:** [查看详情](%s)" "$STATUS_TEXT" "$RUN_URL")
|
||||
|
||||
# Send notification
|
||||
pnpm tsx scripts/feishu-notify.ts send \
|
||||
-t "自动国际化${STATUS_TEXT}" \
|
||||
-d "$DESCRIPTION" \
|
||||
-c "${COLOR}"
|
||||
|
||||
88
.github/workflows/github-issue-tracker.yml
vendored
88
.github/workflows/github-issue-tracker.yml
vendored
@ -58,14 +58,34 @@ jobs:
|
||||
with:
|
||||
node-version: 22
|
||||
|
||||
- name: Install pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
|
||||
- name: Get pnpm store directory
|
||||
id: pnpm-cache
|
||||
shell: bash
|
||||
run: echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache pnpm dependencies
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ steps.pnpm-cache.outputs.STORE_PATH }}
|
||||
key: ${{ runner.os }}-pnpm-${{ hashFiles('**/pnpm-lock.yaml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install dependencies
|
||||
if: steps.check_time.outputs.should_delay == 'false'
|
||||
run: pnpm install
|
||||
|
||||
- name: Process issue with Claude
|
||||
if: steps.check_time.outputs.should_delay == 'false'
|
||||
uses: anthropics/claude-code-action@main
|
||||
uses: anthropics/claude-code-action@v1
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
allowed_non_write_users: "*"
|
||||
anthropic_api_key: ${{ secrets.CLAUDE_TRANSLATOR_APIKEY }}
|
||||
claude_args: "--allowed-tools Bash(gh issue:*),Bash(node scripts/feishu-notify.js)"
|
||||
claude_args: "--allowed-tools Bash(gh issue:*),Bash(pnpm tsx scripts/feishu-notify.ts*)"
|
||||
prompt: |
|
||||
你是一个GitHub Issue自动化处理助手。请完成以下任务:
|
||||
|
||||
@ -74,9 +94,14 @@ jobs:
|
||||
- 标题:${{ github.event.issue.title }}
|
||||
- 作者:${{ github.event.issue.user.login }}
|
||||
- URL:${{ github.event.issue.html_url }}
|
||||
- 内容:${{ github.event.issue.body }}
|
||||
- 标签:${{ join(github.event.issue.labels.*.name, ', ') }}
|
||||
|
||||
### Issue body
|
||||
|
||||
`````md
|
||||
${{ github.event.issue.body }}
|
||||
`````
|
||||
|
||||
## 任务步骤
|
||||
|
||||
1. **分析并总结issue**
|
||||
@ -86,20 +111,20 @@ jobs:
|
||||
- 重要的技术细节
|
||||
|
||||
2. **发送飞书通知**
|
||||
使用以下命令发送飞书通知(注意:ISSUE_SUMMARY需要用引号包裹):
|
||||
使用CLI工具发送飞书通知,参考以下示例:
|
||||
```bash
|
||||
ISSUE_URL="${{ github.event.issue.html_url }}" \
|
||||
ISSUE_NUMBER="${{ github.event.issue.number }}" \
|
||||
ISSUE_TITLE="${{ github.event.issue.title }}" \
|
||||
ISSUE_AUTHOR="${{ github.event.issue.user.login }}" \
|
||||
ISSUE_LABELS="${{ join(github.event.issue.labels.*.name, ',') }}" \
|
||||
ISSUE_SUMMARY="<你生成的中文总结>" \
|
||||
node scripts/feishu-notify.js
|
||||
pnpm tsx scripts/feishu-notify.ts issue \
|
||||
-u "${{ github.event.issue.html_url }}" \
|
||||
-n "${{ github.event.issue.number }}" \
|
||||
-t "${{ github.event.issue.title }}" \
|
||||
-a "${{ github.event.issue.user.login }}" \
|
||||
-l "${{ join(github.event.issue.labels.*.name, ',') }}" \
|
||||
-m "<你生成的中文总结>"
|
||||
```
|
||||
|
||||
## 注意事项
|
||||
- 总结必须使用简体中文
|
||||
- ISSUE_SUMMARY 在传递给 node 命令时需要正确转义特殊字符
|
||||
- 命令行参数需要正确转义特殊字符
|
||||
- 如果issue内容为空,也要提供一个简短的说明
|
||||
|
||||
请开始执行任务!
|
||||
@ -125,13 +150,32 @@ jobs:
|
||||
with:
|
||||
node-version: 22
|
||||
|
||||
- name: Install pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
|
||||
- name: Get pnpm store directory
|
||||
id: pnpm-cache
|
||||
shell: bash
|
||||
run: echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache pnpm dependencies
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ steps.pnpm-cache.outputs.STORE_PATH }}
|
||||
key: ${{ runner.os }}-pnpm-${{ hashFiles('**/pnpm-lock.yaml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install
|
||||
|
||||
- name: Process pending issues with Claude
|
||||
uses: anthropics/claude-code-action@main
|
||||
uses: anthropics/claude-code-action@v1
|
||||
with:
|
||||
anthropic_api_key: ${{ secrets.CLAUDE_TRANSLATOR_APIKEY }}
|
||||
allowed_non_write_users: "*"
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
claude_args: "--allowed-tools Bash(gh issue:*),Bash(gh api:*),Bash(node scripts/feishu-notify.js)"
|
||||
claude_args: "--allowed-tools Bash(gh issue:*),Bash(gh api:*),Bash(pnpm tsx scripts/feishu-notify.ts*)"
|
||||
prompt: |
|
||||
你是一个GitHub Issue自动化处理助手。请完成以下任务:
|
||||
|
||||
@ -153,15 +197,15 @@ jobs:
|
||||
- 重要的技术细节
|
||||
|
||||
3. **发送飞书通知**
|
||||
对于每个issue,使用以下命令发送飞书通知:
|
||||
使用CLI工具发送飞书通知,参考以下示例:
|
||||
```bash
|
||||
ISSUE_URL="<issue的html_url>" \
|
||||
ISSUE_NUMBER="<issue编号>" \
|
||||
ISSUE_TITLE="<issue标题>" \
|
||||
ISSUE_AUTHOR="<issue作者>" \
|
||||
ISSUE_LABELS="<逗号分隔的标签列表,排除pending-feishu-notification>" \
|
||||
ISSUE_SUMMARY="<你生成的中文总结>" \
|
||||
node scripts/feishu-notify.js
|
||||
pnpm tsx scripts/feishu-notify.ts issue \
|
||||
-u "<issue的html_url>" \
|
||||
-n "<issue编号>" \
|
||||
-t "<issue标题>" \
|
||||
-a "<issue作者>" \
|
||||
-l "<逗号分隔的标签列表,排除pending-feishu-notification>" \
|
||||
-m "<你生成的中文总结>"
|
||||
```
|
||||
|
||||
4. **移除标签**
|
||||
|
||||
29
.github/workflows/nightly-build.yml
vendored
29
.github/workflows/nightly-build.yml
vendored
@ -65,25 +65,24 @@ jobs:
|
||||
run: |
|
||||
brew install python-setuptools
|
||||
|
||||
- name: Install corepack
|
||||
run: corepack enable && corepack prepare yarn@4.9.1 --activate
|
||||
- name: Install pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
|
||||
- name: Get yarn cache directory path
|
||||
id: yarn-cache-dir-path
|
||||
run: echo "dir=$(yarn config get cacheFolder)" >> $GITHUB_OUTPUT
|
||||
- name: Get pnpm store directory
|
||||
id: pnpm-cache
|
||||
shell: bash
|
||||
run: echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache yarn dependencies
|
||||
- name: Cache pnpm dependencies
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
${{ steps.yarn-cache-dir-path.outputs.dir }}
|
||||
node_modules
|
||||
key: ${{ runner.os }}-yarn-${{ hashFiles('**/yarn.lock') }}
|
||||
path: ${{ steps.pnpm-cache.outputs.STORE_PATH }}
|
||||
key: ${{ runner.os }}-pnpm-${{ hashFiles('**/pnpm-lock.yaml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-yarn-
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install Dependencies
|
||||
run: yarn install
|
||||
run: pnpm install
|
||||
|
||||
- name: Generate date tag
|
||||
id: date
|
||||
@ -94,7 +93,7 @@ jobs:
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
run: |
|
||||
sudo apt-get install -y rpm
|
||||
yarn build:linux
|
||||
pnpm build:linux
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
NODE_OPTIONS: --max-old-space-size=8192
|
||||
@ -106,7 +105,7 @@ jobs:
|
||||
- name: Build Mac
|
||||
if: matrix.os == 'macos-latest'
|
||||
run: |
|
||||
yarn build:mac
|
||||
pnpm build:mac
|
||||
env:
|
||||
CSC_LINK: ${{ secrets.CSC_LINK }}
|
||||
CSC_KEY_PASSWORD: ${{ secrets.CSC_KEY_PASSWORD }}
|
||||
@ -123,7 +122,7 @@ jobs:
|
||||
- name: Build Windows
|
||||
if: matrix.os == 'windows-latest'
|
||||
run: |
|
||||
yarn build:win
|
||||
pnpm build:win
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
NODE_OPTIONS: --max-old-space-size=8192
|
||||
|
||||
33
.github/workflows/pr-ci.yml
vendored
33
.github/workflows/pr-ci.yml
vendored
@ -28,37 +28,36 @@ jobs:
|
||||
with:
|
||||
node-version: 22
|
||||
|
||||
- name: Install corepack
|
||||
run: corepack enable && corepack prepare yarn@4.9.1 --activate
|
||||
- name: Install pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
|
||||
- name: Get yarn cache directory path
|
||||
id: yarn-cache-dir-path
|
||||
run: echo "dir=$(yarn config get cacheFolder)" >> $GITHUB_OUTPUT
|
||||
- name: Get pnpm store directory
|
||||
id: pnpm-cache
|
||||
shell: bash
|
||||
run: echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache yarn dependencies
|
||||
- name: Cache pnpm dependencies
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
${{ steps.yarn-cache-dir-path.outputs.dir }}
|
||||
node_modules
|
||||
key: ${{ runner.os }}-yarn-${{ hashFiles('**/yarn.lock') }}
|
||||
path: ${{ steps.pnpm-cache.outputs.STORE_PATH }}
|
||||
key: ${{ runner.os }}-pnpm-${{ hashFiles('**/pnpm-lock.yaml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-yarn-
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install Dependencies
|
||||
run: yarn install
|
||||
run: pnpm install
|
||||
|
||||
- name: Lint Check
|
||||
run: yarn test:lint
|
||||
run: pnpm test:lint
|
||||
|
||||
- name: Format Check
|
||||
run: yarn format:check
|
||||
run: pnpm format:check
|
||||
|
||||
- name: Type Check
|
||||
run: yarn typecheck
|
||||
run: pnpm typecheck
|
||||
|
||||
- name: i18n Check
|
||||
run: yarn i18n:check
|
||||
run: pnpm i18n:check
|
||||
|
||||
- name: Test
|
||||
run: yarn test
|
||||
run: pnpm test
|
||||
|
||||
29
.github/workflows/release.yml
vendored
29
.github/workflows/release.yml
vendored
@ -56,31 +56,30 @@ jobs:
|
||||
run: |
|
||||
brew install python-setuptools
|
||||
|
||||
- name: Install corepack
|
||||
run: corepack enable && corepack prepare yarn@4.9.1 --activate
|
||||
- name: Install pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
|
||||
- name: Get yarn cache directory path
|
||||
id: yarn-cache-dir-path
|
||||
run: echo "dir=$(yarn config get cacheFolder)" >> $GITHUB_OUTPUT
|
||||
- name: Get pnpm store directory
|
||||
id: pnpm-cache
|
||||
shell: bash
|
||||
run: echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache yarn dependencies
|
||||
- name: Cache pnpm dependencies
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
${{ steps.yarn-cache-dir-path.outputs.dir }}
|
||||
node_modules
|
||||
key: ${{ runner.os }}-yarn-${{ hashFiles('**/yarn.lock') }}
|
||||
path: ${{ steps.pnpm-cache.outputs.STORE_PATH }}
|
||||
key: ${{ runner.os }}-pnpm-${{ hashFiles('**/pnpm-lock.yaml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-yarn-
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install Dependencies
|
||||
run: yarn install
|
||||
run: pnpm install
|
||||
|
||||
- name: Build Linux
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
run: |
|
||||
sudo apt-get install -y rpm
|
||||
yarn build:linux
|
||||
pnpm build:linux
|
||||
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -94,7 +93,7 @@ jobs:
|
||||
if: matrix.os == 'macos-latest'
|
||||
run: |
|
||||
sudo -H pip install setuptools
|
||||
yarn build:mac
|
||||
pnpm build:mac
|
||||
env:
|
||||
CSC_LINK: ${{ secrets.CSC_LINK }}
|
||||
CSC_KEY_PASSWORD: ${{ secrets.CSC_KEY_PASSWORD }}
|
||||
@ -111,7 +110,7 @@ jobs:
|
||||
- name: Build Windows
|
||||
if: matrix.os == 'windows-latest'
|
||||
run: |
|
||||
yarn build:win
|
||||
pnpm build:win
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
NODE_OPTIONS: --max-old-space-size=8192
|
||||
|
||||
43
.github/workflows/sync-to-gitcode.yml
vendored
43
.github/workflows/sync-to-gitcode.yml
vendored
@ -48,9 +48,8 @@ jobs:
|
||||
with:
|
||||
node-version: 22
|
||||
|
||||
- name: Install corepack
|
||||
shell: bash
|
||||
run: corepack enable && corepack prepare yarn@4.9.1 --activate
|
||||
- name: Install pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
|
||||
- name: Clean node_modules
|
||||
if: ${{ github.event.inputs.clean == 'true' }}
|
||||
@ -59,11 +58,11 @@ jobs:
|
||||
|
||||
- name: Install Dependencies
|
||||
shell: bash
|
||||
run: yarn install
|
||||
run: pnpm install
|
||||
|
||||
- name: Build Windows with code signing
|
||||
shell: bash
|
||||
run: yarn build:win
|
||||
run: pnpm build:win
|
||||
env:
|
||||
WIN_SIGN: true
|
||||
CHERRY_CERT_PATH: ${{ secrets.CHERRY_CERT_PATH }}
|
||||
@ -80,7 +79,7 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Built Windows artifacts:"
|
||||
ls -la dist/*.exe dist/*.blockmap dist/latest*.yml
|
||||
ls -la dist/*.exe dist/latest*.yml
|
||||
|
||||
- name: Download GitHub release assets
|
||||
shell: bash
|
||||
@ -113,12 +112,10 @@ jobs:
|
||||
fi
|
||||
|
||||
# Remove unsigned Windows files from downloaded assets
|
||||
# *.exe, *.exe.blockmap, latest.yml (Windows only)
|
||||
rm -f release-assets/*.exe release-assets/*.exe.blockmap release-assets/latest.yml 2>/dev/null || true
|
||||
rm -f release-assets/*.exe release-assets/latest.yml 2>/dev/null || true
|
||||
|
||||
# Copy signed Windows files with error checking
|
||||
cp dist/*.exe release-assets/ || { echo "ERROR: Failed to copy .exe files"; exit 1; }
|
||||
cp dist/*.exe.blockmap release-assets/ || { echo "ERROR: Failed to copy .blockmap files"; exit 1; }
|
||||
cp dist/latest.yml release-assets/ || { echo "ERROR: Failed to copy latest.yml"; exit 1; }
|
||||
|
||||
echo "Final release assets:"
|
||||
@ -303,3 +300,31 @@ jobs:
|
||||
run: |
|
||||
rm -f /tmp/release_payload.json /tmp/upload_headers.txt release_body.txt
|
||||
rm -rf release-assets/
|
||||
|
||||
- name: Send failure notification to Feishu
|
||||
if: always() && (failure() || cancelled())
|
||||
shell: bash
|
||||
env:
|
||||
FEISHU_WEBHOOK_URL: ${{ secrets.FEISHU_WEBHOOK_URL }}
|
||||
FEISHU_WEBHOOK_SECRET: ${{ secrets.FEISHU_WEBHOOK_SECRET }}
|
||||
TAG_NAME: ${{ steps.get-tag.outputs.tag }}
|
||||
RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
JOB_STATUS: ${{ job.status }}
|
||||
run: |
|
||||
# Determine status and color
|
||||
if [ "$JOB_STATUS" = "cancelled" ]; then
|
||||
STATUS_TEXT="已取消"
|
||||
COLOR="orange"
|
||||
else
|
||||
STATUS_TEXT="失败"
|
||||
COLOR="red"
|
||||
fi
|
||||
|
||||
# Build description using printf
|
||||
DESCRIPTION=$(printf "**标签:** %s\n\n**状态:** %s\n\n**工作流:** [查看详情](%s)" "$TAG_NAME" "$STATUS_TEXT" "$RUN_URL")
|
||||
|
||||
# Send notification
|
||||
pnpm tsx scripts/feishu-notify.ts send \
|
||||
-t "GitCode 同步${STATUS_TEXT}" \
|
||||
-d "$DESCRIPTION" \
|
||||
-c "${COLOR}"
|
||||
|
||||
@ -154,14 +154,15 @@ jobs:
|
||||
with:
|
||||
node-version: 22
|
||||
|
||||
- name: Enable Corepack
|
||||
- name: Enable corepack
|
||||
if: steps.check.outputs.should_run == 'true'
|
||||
run: corepack enable && corepack prepare yarn@4.9.1 --activate
|
||||
working-directory: main
|
||||
run: corepack enable pnpm
|
||||
|
||||
- name: Install dependencies
|
||||
if: steps.check.outputs.should_run == 'true'
|
||||
working-directory: main
|
||||
run: yarn install --immutable
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Update upgrade config
|
||||
if: steps.check.outputs.should_run == 'true'
|
||||
@ -170,7 +171,7 @@ jobs:
|
||||
RELEASE_TAG: ${{ steps.meta.outputs.tag }}
|
||||
IS_PRERELEASE: ${{ steps.check.outputs.is_prerelease }}
|
||||
run: |
|
||||
yarn tsx scripts/update-app-upgrade-config.ts \
|
||||
pnpm tsx scripts/update-app-upgrade-config.ts \
|
||||
--tag "$RELEASE_TAG" \
|
||||
--config ../cs/app-upgrade-config.json \
|
||||
--is-prerelease "$IS_PRERELEASE"
|
||||
|
||||
@ -1 +1 @@
|
||||
yarn lint-staged
|
||||
pnpm lint-staged
|
||||
|
||||
2
.npmrc
2
.npmrc
@ -1 +1 @@
|
||||
electron_mirror=https://npmmirror.com/mirrors/electron/
|
||||
electron_mirror=https://npmmirror.com/mirrors/electron/
|
||||
|
||||
@ -1,35 +0,0 @@
|
||||
diff --git a/sdk.mjs b/sdk.mjs
|
||||
index dea7766a3432a1e809f12d6daba4f2834a219689..e0b02ef73da177ba32b903887d7bbbeaa08cc6d3 100755
|
||||
--- a/sdk.mjs
|
||||
+++ b/sdk.mjs
|
||||
@@ -6250,7 +6250,7 @@ function createAbortController(maxListeners = DEFAULT_MAX_LISTENERS) {
|
||||
}
|
||||
|
||||
// ../src/transport/ProcessTransport.ts
|
||||
-import { spawn } from "child_process";
|
||||
+import { fork } from "child_process";
|
||||
import { createInterface } from "readline";
|
||||
|
||||
// ../src/utils/fsOperations.ts
|
||||
@@ -6644,18 +6644,11 @@ class ProcessTransport {
|
||||
const errorMessage = isNativeBinary(pathToClaudeCodeExecutable) ? `Claude Code native binary not found at ${pathToClaudeCodeExecutable}. Please ensure Claude Code is installed via native installer or specify a valid path with options.pathToClaudeCodeExecutable.` : `Claude Code executable not found at ${pathToClaudeCodeExecutable}. Is options.pathToClaudeCodeExecutable set?`;
|
||||
throw new ReferenceError(errorMessage);
|
||||
}
|
||||
- const isNative = isNativeBinary(pathToClaudeCodeExecutable);
|
||||
- const spawnCommand = isNative ? pathToClaudeCodeExecutable : executable;
|
||||
- const spawnArgs = isNative ? [...executableArgs, ...args] : [...executableArgs, pathToClaudeCodeExecutable, ...args];
|
||||
- const spawnMessage = isNative ? `Spawning Claude Code native binary: ${spawnCommand} ${spawnArgs.join(" ")}` : `Spawning Claude Code process: ${spawnCommand} ${spawnArgs.join(" ")}`;
|
||||
- logForSdkDebugging(spawnMessage);
|
||||
- if (stderr) {
|
||||
- stderr(spawnMessage);
|
||||
- }
|
||||
+ logForSdkDebugging(`Forking Claude Code Node.js process: ${pathToClaudeCodeExecutable} ${args.join(" ")}`);
|
||||
const stderrMode = env.DEBUG_CLAUDE_AGENT_SDK || stderr ? "pipe" : "ignore";
|
||||
- this.child = spawn(spawnCommand, spawnArgs, {
|
||||
+ this.child = fork(pathToClaudeCodeExecutable, args, {
|
||||
cwd,
|
||||
- stdio: ["pipe", "pipe", stderrMode],
|
||||
+ stdio: stderrMode === "pipe" ? ["pipe", "pipe", "pipe", "ipc"] : ["pipe", "pipe", "ignore", "ipc"],
|
||||
signal: this.abortController.signal,
|
||||
env
|
||||
});
|
||||
BIN
.yarn/releases/yarn-4.9.1.cjs
vendored
BIN
.yarn/releases/yarn-4.9.1.cjs
vendored
Binary file not shown.
@ -1,9 +0,0 @@
|
||||
enableImmutableInstalls: false
|
||||
|
||||
httpTimeout: 300000
|
||||
|
||||
nodeLinker: node-modules
|
||||
|
||||
yarnPath: .yarn/releases/yarn-4.9.1.cjs
|
||||
npmRegistryServer: https://registry.npmjs.org
|
||||
npmPublishRegistry: https://registry.npmjs.org
|
||||
26
CLAUDE.md
26
CLAUDE.md
@ -10,7 +10,7 @@ This file provides guidance to AI coding assistants when working with code in th
|
||||
- **Log centrally**: Route all logging through `loggerService` with the right context—no `console.log`.
|
||||
- **Research via subagent**: Lean on `subagent` for external docs, APIs, news, and references.
|
||||
- **Always propose before executing**: Before making any changes, clearly explain your planned approach and wait for explicit user approval to ensure alignment and prevent unwanted modifications.
|
||||
- **Lint, test, and format before completion**: Coding tasks are only complete after running `yarn lint`, `yarn test`, and `yarn format` successfully.
|
||||
- **Lint, test, and format before completion**: Coding tasks are only complete after running `pnpm lint`, `pnpm test`, and `pnpm format` successfully.
|
||||
- **Write conventional commits**: Commit small, focused changes using Conventional Commit messages (e.g., `feat:`, `fix:`, `refactor:`, `docs:`).
|
||||
|
||||
## Pull Request Workflow (CRITICAL)
|
||||
@ -24,18 +24,18 @@ When creating a Pull Request, you MUST:
|
||||
|
||||
## Development Commands
|
||||
|
||||
- **Install**: `yarn install` - Install all project dependencies
|
||||
- **Development**: `yarn dev` - Runs Electron app in development mode with hot reload
|
||||
- **Debug**: `yarn debug` - Starts with debugging enabled, use `chrome://inspect` to attach debugger
|
||||
- **Build Check**: `yarn build:check` - **REQUIRED** before commits (lint + test + typecheck)
|
||||
- If having i18n sort issues, run `yarn i18n:sync` first to sync template
|
||||
- If having formatting issues, run `yarn format` first
|
||||
- **Test**: `yarn test` - Run all tests (Vitest) across main and renderer processes
|
||||
- **Install**: `pnpm install` - Install all project dependencies
|
||||
- **Development**: `pnpm dev` - Runs Electron app in development mode with hot reload
|
||||
- **Debug**: `pnpm debug` - Starts with debugging enabled, use `chrome://inspect` to attach debugger
|
||||
- **Build Check**: `pnpm build:check` - **REQUIRED** before commits (lint + test + typecheck)
|
||||
- If having i18n sort issues, run `pnpm i18n:sync` first to sync template
|
||||
- If having formatting issues, run `pnpm format` first
|
||||
- **Test**: `pnpm test` - Run all tests (Vitest) across main and renderer processes
|
||||
- **Single Test**:
|
||||
- `yarn test:main` - Run tests for main process only
|
||||
- `yarn test:renderer` - Run tests for renderer process only
|
||||
- **Lint**: `yarn lint` - Fix linting issues and run TypeScript type checking
|
||||
- **Format**: `yarn format` - Auto-format code using Biome
|
||||
- `pnpm test:main` - Run tests for main process only
|
||||
- `pnpm test:renderer` - Run tests for renderer process only
|
||||
- **Lint**: `pnpm lint` - Fix linting issues and run TypeScript type checking
|
||||
- **Format**: `pnpm format` - Auto-format code using Biome
|
||||
|
||||
## Project Architecture
|
||||
|
||||
@ -49,7 +49,7 @@ When creating a Pull Request, you MUST:
|
||||
|
||||
- **AI Core** (`src/renderer/src/aiCore/`): Middleware pipeline for multiple AI providers.
|
||||
- **Services** (`src/main/services/`): MCPService, KnowledgeService, WindowService, etc.
|
||||
- **Build System**: Electron-Vite with experimental rolldown-vite, yarn workspaces.
|
||||
- **Build System**: Electron-Vite with experimental rolldown-vite, pnpm workspaces.
|
||||
- **State Management**: Redux Toolkit (`src/renderer/src/store/`) for predictable state.
|
||||
|
||||
### Logging
|
||||
|
||||
14
README.md
14
README.md
@ -34,7 +34,7 @@
|
||||
</a>
|
||||
</h1>
|
||||
|
||||
<p align="center">English | <a href="./docs/zh/README.md">中文</a> | <a href="https://cherry-ai.com">Official Site</a> | <a href="https://docs.cherry-ai.com/cherry-studio-wen-dang/en-us">Documents</a> | <a href="./docs/en/guides/development.md">Development</a> | <a href="https://github.com/CherryHQ/cherry-studio/issues">Feedback</a><br></p>
|
||||
<p align="center">English | <a href="./docs/zh/README.md">中文</a> | <a href="https://cherry-ai.com">Official Site</a> | <a href="https://docs.cherry-ai.com/docs/en-us">Documents</a> | <a href="./docs/en/guides/development.md">Development</a> | <a href="https://github.com/CherryHQ/cherry-studio/issues">Feedback</a><br></p>
|
||||
|
||||
<div align="center">
|
||||
|
||||
@ -242,12 +242,12 @@ The Enterprise Edition addresses core challenges in team collaboration by centra
|
||||
|
||||
## Version Comparison
|
||||
|
||||
| Feature | Community Edition | Enterprise Edition |
|
||||
| :---------------- | :----------------------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Open Source** | ✅ Yes | ⭕️ Partially released to customers |
|
||||
| Feature | Community Edition | Enterprise Edition |
|
||||
| :---------------- | :----------------------------------------------------------------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Open Source** | ✅ Yes | ⭕️ Partially released to customers |
|
||||
| **Cost** | [AGPL-3.0 License](https://github.com/CherryHQ/cherry-studio?tab=AGPL-3.0-1-ov-file) | Buyout / Subscription Fee |
|
||||
| **Admin Backend** | — | ● Centralized **Model** Access<br>● **Employee** Management<br>● Shared **Knowledge Base**<br>● **Access** Control<br>● **Data** Backup |
|
||||
| **Server** | — | ✅ Dedicated Private Deployment |
|
||||
| **Admin Backend** | — | ● Centralized **Model** Access<br>● **Employee** Management<br>● Shared **Knowledge Base**<br>● **Access** Control<br>● **Data** Backup |
|
||||
| **Server** | — | ✅ Dedicated Private Deployment |
|
||||
|
||||
## Get the Enterprise Edition
|
||||
|
||||
@ -275,7 +275,7 @@ We believe the Enterprise Edition will become your team's AI productivity engine
|
||||
|
||||
# 📊 GitHub Stats
|
||||
|
||||

|
||||

|
||||
|
||||
# ⭐️ Star History
|
||||
|
||||
|
||||
@ -50,7 +50,8 @@
|
||||
"!*.json",
|
||||
"!src/main/integration/**",
|
||||
"!**/tailwind.css",
|
||||
"!**/package.json"
|
||||
"!**/package.json",
|
||||
"!.zed/**"
|
||||
],
|
||||
"indentStyle": "space",
|
||||
"indentWidth": 2,
|
||||
|
||||
@ -11,7 +11,7 @@
|
||||
### Install
|
||||
|
||||
```bash
|
||||
yarn
|
||||
pnpm install
|
||||
```
|
||||
|
||||
### Development
|
||||
@ -20,35 +20,35 @@ yarn
|
||||
|
||||
Download and install [Node.js v22.x.x](https://nodejs.org/en/download)
|
||||
|
||||
### Setup Yarn
|
||||
### Setup pnpm
|
||||
|
||||
```bash
|
||||
corepack enable
|
||||
corepack prepare yarn@4.9.1 --activate
|
||||
corepack prepare pnpm@10.27.0 --activate
|
||||
```
|
||||
|
||||
### Install Dependencies
|
||||
|
||||
```bash
|
||||
yarn install
|
||||
pnpm install
|
||||
```
|
||||
|
||||
### ENV
|
||||
|
||||
```bash
|
||||
copy .env.example .env
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
### Start
|
||||
|
||||
```bash
|
||||
yarn dev
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
### Debug
|
||||
|
||||
```bash
|
||||
yarn debug
|
||||
pnpm debug
|
||||
```
|
||||
|
||||
Then input chrome://inspect in browser
|
||||
@ -56,18 +56,18 @@ Then input chrome://inspect in browser
|
||||
### Test
|
||||
|
||||
```bash
|
||||
yarn test
|
||||
pnpm test
|
||||
```
|
||||
|
||||
### Build
|
||||
|
||||
```bash
|
||||
# For windows
|
||||
$ yarn build:win
|
||||
$ pnpm build:win
|
||||
|
||||
# For macOS
|
||||
$ yarn build:mac
|
||||
$ pnpm build:mac
|
||||
|
||||
# For Linux
|
||||
$ yarn build:linux
|
||||
$ pnpm build:linux
|
||||
```
|
||||
|
||||
@ -116,7 +116,7 @@ This script checks:
|
||||
- Whether keys are properly sorted
|
||||
|
||||
```bash
|
||||
yarn i18n:check
|
||||
pnpm i18n:check
|
||||
```
|
||||
|
||||
### `i18n:sync` - Synchronize JSON Structure and Sort Order
|
||||
@ -128,7 +128,7 @@ This script uses `zh-cn.json` as the source of truth to sync structure across al
|
||||
3. Sorting keys automatically
|
||||
|
||||
```bash
|
||||
yarn i18n:sync
|
||||
pnpm i18n:sync
|
||||
```
|
||||
|
||||
### `i18n:translate` - Automatically Translate Pending Texts
|
||||
@ -148,20 +148,20 @@ MODEL="qwen-plus-latest"
|
||||
Alternatively, add these variables directly to your `.env` file.
|
||||
|
||||
```bash
|
||||
yarn i18n:translate
|
||||
pnpm i18n:translate
|
||||
```
|
||||
|
||||
### Workflow
|
||||
|
||||
1. During development, first add the required text in `zh-cn.json`
|
||||
2. Confirm it displays correctly in the Chinese environment
|
||||
3. Run `yarn i18n:sync` to propagate the keys to other language files
|
||||
4. Run `yarn i18n:translate` to perform machine translation
|
||||
3. Run `pnpm i18n:sync` to propagate the keys to other language files
|
||||
4. Run `pnpm i18n:translate` to perform machine translation
|
||||
5. Grab a coffee and let the magic happen!
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Use Chinese as Source Language**: All development starts in Chinese, then translates to other languages.
|
||||
2. **Run Check Script Before Commit**: Use `yarn i18n:check` to catch i18n issues early.
|
||||
2. **Run Check Script Before Commit**: Use `pnpm i18n:check` to catch i18n issues early.
|
||||
3. **Translate in Small Increments**: Avoid accumulating a large backlog of untranslated content.
|
||||
4. **Keep Keys Semantically Clear**: Keys should clearly express their purpose, e.g., `user.profile.avatar.upload.error`
|
||||
|
||||
@ -37,8 +37,8 @@ The `x-files/app-upgrade-config/app-upgrade-config.json` file is synchronized by
|
||||
|
||||
1. **Guard + metadata preparation** – the `Check if should proceed` and `Prepare metadata` steps compute the target tag, prerelease flag, whether the tag is the newest release, and a `safe_tag` slug used for branch names. When any rule fails, the workflow stops without touching the config.
|
||||
2. **Checkout source branches** – the default branch is checked out into `main/`, while the long-lived `x-files/app-upgrade-config` branch lives in `cs/`. All modifications happen in the latter directory.
|
||||
3. **Install toolchain** – Node.js 22, Corepack, and frozen Yarn dependencies are installed inside `main/`.
|
||||
4. **Run the update script** – `yarn tsx scripts/update-app-upgrade-config.ts --tag <tag> --config ../cs/app-upgrade-config.json --is-prerelease <flag>` updates the JSON in-place.
|
||||
3. **Install toolchain** – Node.js 22, Corepack, and frozen pnpm dependencies are installed inside `main/`.
|
||||
4. **Run the update script** – `pnpm tsx scripts/update-app-upgrade-config.ts --tag <tag> --config ../cs/app-upgrade-config.json --is-prerelease <flag>` updates the JSON in-place.
|
||||
- The script normalizes the tag (e.g., strips `v` prefix), detects the release channel (`latest`, `rc`, `beta`), and loads segment rules from `config/app-upgrade-segments.json`.
|
||||
- It validates that prerelease flags and semantic suffixes agree, enforces locked segments, builds mirror feed URLs, and performs release-availability checks (GitHub HEAD request for every channel; GitCode GET for latest channels, falling back to `https://releases.cherry-ai.com` when gitcode is delayed).
|
||||
- After updating the relevant channel entry, the script rewrites the config with semver-sort order and a new `lastUpdated` timestamp.
|
||||
@ -223,10 +223,10 @@ interface ChannelConfig {
|
||||
Starting from this change, `.github/workflows/update-app-upgrade-config.yml` listens to GitHub release events (published + prerelease). The workflow:
|
||||
|
||||
1. Checks out the default branch (for scripts) and the `x-files/app-upgrade-config` branch (where the config is hosted).
|
||||
2. Runs `yarn tsx scripts/update-app-upgrade-config.ts --tag <tag> --config ../cs/app-upgrade-config.json` to regenerate the config directly inside the `x-files/app-upgrade-config` working tree.
|
||||
2. Runs `pnpm tsx scripts/update-app-upgrade-config.ts --tag <tag> --config ../cs/app-upgrade-config.json` to regenerate the config directly inside the `x-files/app-upgrade-config` working tree.
|
||||
3. If the file changed, it opens a PR against `x-files/app-upgrade-config` via `peter-evans/create-pull-request`, with the generated diff limited to `app-upgrade-config.json`.
|
||||
|
||||
You can run the same script locally via `yarn update:upgrade-config --tag v2.1.6 --config ../cs/app-upgrade-config.json` (add `--dry-run` to preview) to reproduce or debug whatever the workflow does. Passing `--skip-release-checks` along with `--dry-run` lets you bypass the release-page existence check (useful when the GitHub/GitCode pages aren’t published yet). Running without `--config` continues to update the copy in your current working directory (main branch) for documentation purposes.
|
||||
You can run the same script locally via `pnpm update:upgrade-config --tag v2.1.6 --config ../cs/app-upgrade-config.json` (add `--dry-run` to preview) to reproduce or debug whatever the workflow does. Passing `--skip-release-checks` along with `--dry-run` lets you bypass the release-page existence check (useful when the GitHub/GitCode pages aren't published yet). Running without `--config` continues to update the copy in your current working directory (main branch) for documentation purposes.
|
||||
|
||||
## Version Matching Logic
|
||||
|
||||
|
||||
155
docs/en/references/feishu-notify.md
Normal file
155
docs/en/references/feishu-notify.md
Normal file
@ -0,0 +1,155 @@
|
||||
# Feishu Notification Script
|
||||
|
||||
`scripts/feishu-notify.ts` is a CLI tool for sending notifications to Feishu (Lark) Webhook. This script is primarily used in GitHub Actions workflows to enable automatic notifications.
|
||||
|
||||
## Features
|
||||
|
||||
- Subcommand-based CLI structure for different notification types
|
||||
- HMAC-SHA256 signature verification
|
||||
- Sends Feishu interactive card messages
|
||||
- Full TypeScript type support
|
||||
- Credentials via environment variables for security
|
||||
|
||||
## Usage
|
||||
|
||||
### Prerequisites
|
||||
|
||||
```bash
|
||||
pnpm install
|
||||
```
|
||||
|
||||
### CLI Structure
|
||||
|
||||
```bash
|
||||
pnpm tsx scripts/feishu-notify.ts [command] [options]
|
||||
```
|
||||
|
||||
### Environment Variables (Required)
|
||||
|
||||
| Variable | Description |
|
||||
|----------|-------------|
|
||||
| `FEISHU_WEBHOOK_URL` | Feishu Webhook URL |
|
||||
| `FEISHU_WEBHOOK_SECRET` | Feishu Webhook signing secret |
|
||||
|
||||
## Commands
|
||||
|
||||
### `send` - Send Simple Notification
|
||||
|
||||
Send a generic notification without business-specific logic.
|
||||
|
||||
```bash
|
||||
pnpm tsx scripts/feishu-notify.ts send [options]
|
||||
```
|
||||
|
||||
| Option | Short | Description | Required |
|
||||
|--------|-------|-------------|----------|
|
||||
| `--title` | `-t` | Card title | Yes |
|
||||
| `--description` | `-d` | Card description (supports markdown) | Yes |
|
||||
| `--color` | `-c` | Header color template | No (default: turquoise) |
|
||||
|
||||
**Available colors:** `blue`, `wathet`, `turquoise`, `green`, `yellow`, `orange`, `red`, `carmine`, `violet`, `purple`, `indigo`, `grey`, `default`
|
||||
|
||||
#### Example
|
||||
|
||||
```bash
|
||||
# Use $'...' syntax for proper newlines
|
||||
pnpm tsx scripts/feishu-notify.ts send \
|
||||
-t "Deployment Completed" \
|
||||
-d $'**Status:** Success\n\n**Environment:** Production\n\n**Version:** v1.2.3' \
|
||||
-c green
|
||||
```
|
||||
|
||||
```bash
|
||||
# Send an error alert (red color)
|
||||
pnpm tsx scripts/feishu-notify.ts send \
|
||||
-t "Error Alert" \
|
||||
-d $'**Error Type:** Connection failed\n\n**Severity:** High\n\nPlease check the system status' \
|
||||
-c red
|
||||
```
|
||||
|
||||
**Note:** For proper newlines in the description, use bash's `$'...'` syntax. Do not use literal `\n` in double quotes, as it will be displayed as-is in the Feishu card.
|
||||
|
||||
### `issue` - Send GitHub Issue Notification
|
||||
|
||||
```bash
|
||||
pnpm tsx scripts/feishu-notify.ts issue [options]
|
||||
```
|
||||
|
||||
| Option | Short | Description | Required |
|
||||
|--------|-------|-------------|----------|
|
||||
| `--url` | `-u` | GitHub issue URL | Yes |
|
||||
| `--number` | `-n` | Issue number | Yes |
|
||||
| `--title` | `-t` | Issue title | Yes |
|
||||
| `--summary` | `-m` | Issue summary | Yes |
|
||||
| `--author` | `-a` | Issue author | No (default: "Unknown") |
|
||||
| `--labels` | `-l` | Issue labels (comma-separated) | No |
|
||||
|
||||
#### Example
|
||||
|
||||
```bash
|
||||
pnpm tsx scripts/feishu-notify.ts issue \
|
||||
-u "https://github.com/owner/repo/issues/123" \
|
||||
-n "123" \
|
||||
-t "Bug: Something is broken" \
|
||||
-m "This is a bug report about a feature" \
|
||||
-a "username" \
|
||||
-l "bug,high-priority"
|
||||
```
|
||||
|
||||
## Usage in GitHub Actions
|
||||
|
||||
This script is primarily used in `.github/workflows/github-issue-tracker.yml`:
|
||||
|
||||
```yaml
|
||||
- name: Install dependencies
|
||||
run: pnpm install
|
||||
|
||||
- name: Send notification
|
||||
run: |
|
||||
pnpm tsx scripts/feishu-notify.ts issue \
|
||||
-u "${{ github.event.issue.html_url }}" \
|
||||
-n "${{ github.event.issue.number }}" \
|
||||
-t "${{ github.event.issue.title }}" \
|
||||
-a "${{ github.event.issue.user.login }}" \
|
||||
-l "${{ join(github.event.issue.labels.*.name, ',') }}" \
|
||||
-m "Issue summary content"
|
||||
env:
|
||||
FEISHU_WEBHOOK_URL: ${{ secrets.FEISHU_WEBHOOK_URL }}
|
||||
FEISHU_WEBHOOK_SECRET: ${{ secrets.FEISHU_WEBHOOK_SECRET }}
|
||||
```
|
||||
|
||||
## Feishu Card Message Format
|
||||
|
||||
The `issue` command sends an interactive card containing:
|
||||
|
||||
- **Header**: `#<issue_number> - <issue_title>`
|
||||
- **Author**: Issue creator
|
||||
- **Labels**: Issue labels (if any)
|
||||
- **Summary**: Issue content summary
|
||||
- **Action Button**: "View Issue" button linking to the GitHub Issue page
|
||||
|
||||
## Configuring Feishu Webhook
|
||||
|
||||
1. Add a custom bot to your Feishu group
|
||||
2. Obtain the Webhook URL and signing secret
|
||||
3. Configure them in GitHub Secrets:
|
||||
- `FEISHU_WEBHOOK_URL`: Webhook address
|
||||
- `FEISHU_WEBHOOK_SECRET`: Signing secret
|
||||
|
||||
## Error Handling
|
||||
|
||||
The script exits with a non-zero code when:
|
||||
|
||||
- Required environment variables are missing (`FEISHU_WEBHOOK_URL`, `FEISHU_WEBHOOK_SECRET`)
|
||||
- Required command options are missing
|
||||
- Feishu API returns a non-2xx status code
|
||||
- Network request fails
|
||||
|
||||
## Extending with New Commands
|
||||
|
||||
The CLI is designed to support multiple notification types. To add a new command:
|
||||
|
||||
1. Define the command options interface
|
||||
2. Create a card builder function
|
||||
3. Add a new command handler
|
||||
4. Register the command with `program.command()`
|
||||
129
docs/en/references/fuzzy-search.md
Normal file
129
docs/en/references/fuzzy-search.md
Normal file
@ -0,0 +1,129 @@
|
||||
# Fuzzy Search for File List
|
||||
|
||||
This document describes the fuzzy search implementation for file listing in Cherry Studio.
|
||||
|
||||
## Overview
|
||||
|
||||
The fuzzy search feature allows users to find files by typing partial or approximate file names/paths. It uses a two-tier file filtering strategy (ripgrep glob pre-filtering with greedy substring fallback) combined with subsequence-based scoring for optimal performance and flexibility.
|
||||
|
||||
## Features
|
||||
|
||||
- **Ripgrep Glob Pre-filtering**: Primary filtering using glob patterns for fast native-level filtering
|
||||
- **Greedy Substring Matching**: Fallback file filtering strategy when ripgrep glob pre-filtering returns no results
|
||||
- **Subsequence-based Segment Scoring**: During scoring, path segments gain additional weight when query characters appear in order
|
||||
- **Relevance Scoring**: Results are sorted by a relevance score derived from multiple factors
|
||||
|
||||
## Matching Strategies
|
||||
|
||||
### 1. Ripgrep Glob Pre-filtering (Primary)
|
||||
|
||||
The query is converted to a glob pattern for ripgrep to do initial filtering:
|
||||
|
||||
```
|
||||
Query: "updater"
|
||||
Glob: "*u*p*d*a*t*e*r*"
|
||||
```
|
||||
|
||||
This leverages ripgrep's native performance for the initial file filtering.
|
||||
|
||||
### 2. Greedy Substring Matching (Fallback)
|
||||
|
||||
When the glob pre-filter returns no results, the system falls back to greedy substring matching. This allows more flexible matching:
|
||||
|
||||
```
|
||||
Query: "updatercontroller"
|
||||
File: "packages/update/src/node/updateController.ts"
|
||||
|
||||
Matching process:
|
||||
1. Find "update" (longest match from start)
|
||||
2. Remaining "rcontroller" → find "r" then "controller"
|
||||
3. All parts matched → Success
|
||||
```
|
||||
|
||||
## Scoring Algorithm
|
||||
|
||||
Results are ranked by a relevance score based on named constants defined in `FileStorage.ts`:
|
||||
|
||||
| Constant | Value | Description |
|
||||
|----------|-------|-------------|
|
||||
| `SCORE_FILENAME_STARTS` | 100 | Filename starts with query (highest priority) |
|
||||
| `SCORE_FILENAME_CONTAINS` | 80 | Filename contains exact query substring |
|
||||
| `SCORE_SEGMENT_MATCH` | 60 | Per path segment that matches query |
|
||||
| `SCORE_WORD_BOUNDARY` | 20 | Query matches start of a word |
|
||||
| `SCORE_CONSECUTIVE_CHAR` | 15 | Per consecutive character match |
|
||||
| `PATH_LENGTH_PENALTY_FACTOR` | 4 | Logarithmic penalty for longer paths |
|
||||
|
||||
### Scoring Strategy
|
||||
|
||||
The scoring prioritizes:
|
||||
1. **Filename matches** (highest): Files where the query appears in the filename are most relevant
|
||||
2. **Path segment matches**: Multiple matching segments indicate stronger relevance
|
||||
3. **Word boundaries**: Matching at word starts (e.g., "upd" matching "update") is preferred
|
||||
4. **Consecutive matches**: Longer consecutive character sequences score higher
|
||||
5. **Path length**: Shorter paths are preferred (logarithmic penalty prevents long paths from dominating)
|
||||
|
||||
### Example Scoring
|
||||
|
||||
For query `updater`:
|
||||
|
||||
| File | Score Factors |
|
||||
|------|---------------|
|
||||
| `RCUpdater.js` | Short path + filename contains "updater" |
|
||||
| `updateController.ts` | Multiple segment matches |
|
||||
| `UpdaterHelper.plist` | Long path penalty |
|
||||
|
||||
## Configuration
|
||||
|
||||
### DirectoryListOptions
|
||||
|
||||
```typescript
|
||||
interface DirectoryListOptions {
|
||||
recursive?: boolean // Default: true
|
||||
maxDepth?: number // Default: 10
|
||||
includeHidden?: boolean // Default: false
|
||||
includeFiles?: boolean // Default: true
|
||||
includeDirectories?: boolean // Default: true
|
||||
maxEntries?: number // Default: 20
|
||||
searchPattern?: string // Default: '.'
|
||||
fuzzy?: boolean // Default: true
|
||||
}
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```typescript
|
||||
// Basic fuzzy search
|
||||
const files = await window.api.file.listDirectory(dirPath, {
|
||||
searchPattern: 'updater',
|
||||
fuzzy: true,
|
||||
maxEntries: 20
|
||||
})
|
||||
|
||||
// Disable fuzzy search (exact glob matching)
|
||||
const files = await window.api.file.listDirectory(dirPath, {
|
||||
searchPattern: 'update',
|
||||
fuzzy: false
|
||||
})
|
||||
```
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
1. **Ripgrep Pre-filtering**: Most queries are handled by ripgrep's native glob matching, which is extremely fast
|
||||
2. **Fallback Only When Needed**: Greedy substring matching (which loads all files) only runs when glob matching returns empty results
|
||||
3. **Result Limiting**: Only top 20 results are returned by default
|
||||
4. **Excluded Directories**: Common large directories are automatically excluded:
|
||||
- `node_modules`
|
||||
- `.git`
|
||||
- `dist`, `build`
|
||||
- `.next`, `.nuxt`
|
||||
- `coverage`, `.cache`
|
||||
|
||||
## Implementation Details
|
||||
|
||||
The implementation is located in `src/main/services/FileStorage.ts`:
|
||||
|
||||
- `queryToGlobPattern()`: Converts query to ripgrep glob pattern
|
||||
- `isFuzzyMatch()`: Subsequence matching algorithm
|
||||
- `isGreedySubstringMatch()`: Greedy substring matching fallback
|
||||
- `getFuzzyMatchScore()`: Calculates relevance score
|
||||
- `listDirectoryWithRipgrep()`: Main search orchestration
|
||||
@ -34,7 +34,7 @@
|
||||
</a>
|
||||
</h1>
|
||||
<p align="center">
|
||||
<a href="https://github.com/CherryHQ/cherry-studio">English</a> | 中文 | <a href="https://cherry-ai.com">官方网站</a> | <a href="https://docs.cherry-ai.com/cherry-studio-wen-dang/zh-cn">文档</a> | <a href="./guides/development.md">开发</a> | <a href="https://github.com/CherryHQ/cherry-studio/issues">反馈</a><br>
|
||||
<a href="https://github.com/CherryHQ/cherry-studio">English</a> | 中文 | <a href="https://cherry-ai.com">官方网站</a> | <a href="https://docs.cherry-ai.com">文档</a> | <a href="./guides/development.md">开发</a> | <a href="https://github.com/CherryHQ/cherry-studio/issues">反馈</a><br>
|
||||
</p>
|
||||
|
||||
<!-- 题头徽章组合 -->
|
||||
@ -281,7 +281,7 @@ https://docs.cherry-ai.com
|
||||
|
||||
# 📊 GitHub 统计
|
||||
|
||||

|
||||

|
||||
|
||||
# ⭐️ Star 记录
|
||||
|
||||
|
||||
@ -11,7 +11,7 @@
|
||||
### Install
|
||||
|
||||
```bash
|
||||
yarn
|
||||
pnpm install
|
||||
```
|
||||
|
||||
### Development
|
||||
@ -20,35 +20,35 @@ yarn
|
||||
|
||||
Download and install [Node.js v22.x.x](https://nodejs.org/en/download)
|
||||
|
||||
### Setup Yarn
|
||||
### Setup pnpm
|
||||
|
||||
```bash
|
||||
corepack enable
|
||||
corepack prepare yarn@4.9.1 --activate
|
||||
corepack prepare pnpm@10.27.0 --activate
|
||||
```
|
||||
|
||||
### Install Dependencies
|
||||
|
||||
```bash
|
||||
yarn install
|
||||
pnpm install
|
||||
```
|
||||
|
||||
### ENV
|
||||
|
||||
```bash
|
||||
copy .env.example .env
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
### Start
|
||||
|
||||
```bash
|
||||
yarn dev
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
### Debug
|
||||
|
||||
```bash
|
||||
yarn debug
|
||||
pnpm debug
|
||||
```
|
||||
|
||||
Then input chrome://inspect in browser
|
||||
@ -56,18 +56,18 @@ Then input chrome://inspect in browser
|
||||
### Test
|
||||
|
||||
```bash
|
||||
yarn test
|
||||
pnpm test
|
||||
```
|
||||
|
||||
### Build
|
||||
|
||||
```bash
|
||||
# For windows
|
||||
$ yarn build:win
|
||||
$ pnpm build:win
|
||||
|
||||
# For macOS
|
||||
$ yarn build:mac
|
||||
$ pnpm build:mac
|
||||
|
||||
# For Linux
|
||||
$ yarn build:linux
|
||||
$ pnpm build:linux
|
||||
```
|
||||
|
||||
@ -111,7 +111,7 @@ export const getThemeModeLabel = (key: string): string => {
|
||||
- 是否已经有序
|
||||
|
||||
```bash
|
||||
yarn i18n:check
|
||||
pnpm i18n:check
|
||||
```
|
||||
|
||||
### `i18n:sync` - 同步 json 结构与排序
|
||||
@ -123,7 +123,7 @@ yarn i18n:check
|
||||
3. 自动排序
|
||||
|
||||
```bash
|
||||
yarn i18n:sync
|
||||
pnpm i18n:sync
|
||||
```
|
||||
|
||||
### `i18n:translate` - 自动翻译待翻译文本
|
||||
@ -143,19 +143,19 @@ MODEL="qwen-plus-latest"
|
||||
你也可以通过直接编辑`.env`文件来添加环境变量。
|
||||
|
||||
```bash
|
||||
yarn i18n:translate
|
||||
pnpm i18n:translate
|
||||
```
|
||||
|
||||
### 工作流
|
||||
|
||||
1. 开发阶段,先在`zh-cn.json`中添加所需文案
|
||||
2. 确认在中文环境下显示无误后,使用`yarn i18n:sync`将文案同步到其他语言文件
|
||||
3. 使用`yarn i18n:translate`进行自动翻译
|
||||
2. 确认在中文环境下显示无误后,使用`pnpm i18n:sync`将文案同步到其他语言文件
|
||||
3. 使用`pnpm i18n:translate`进行自动翻译
|
||||
4. 喝杯咖啡,等翻译完成吧!
|
||||
|
||||
## 最佳实践
|
||||
|
||||
1. **以中文为源语言**:所有开发首先使用中文,再翻译为其他语言
|
||||
2. **提交前运行检查脚本**:使用`yarn i18n:check`检查 i18n 是否有问题
|
||||
2. **提交前运行检查脚本**:使用`pnpm i18n:check`检查 i18n 是否有问题
|
||||
3. **小步提交翻译**:避免积累大量未翻译文本
|
||||
4. **保持 key 语义明确**:key 应能清晰表达其用途,如`user.profile.avatar.upload.error`
|
||||
|
||||
@ -37,8 +37,8 @@
|
||||
|
||||
1. **检查与元数据准备**:`Check if should proceed` 和 `Prepare metadata` 步骤会计算 tag、prerelease 标志、是否最新版本以及用于分支名的 `safe_tag`。若任意校验失败,工作流立即退出。
|
||||
2. **检出分支**:默认分支被检出到 `main/`,长期维护的 `x-files/app-upgrade-config` 分支则在 `cs/` 中,所有改动都发生在 `cs/`。
|
||||
3. **安装工具链**:安装 Node.js 22、启用 Corepack,并在 `main/` 目录执行 `yarn install --immutable`。
|
||||
4. **运行更新脚本**:执行 `yarn tsx scripts/update-app-upgrade-config.ts --tag <tag> --config ../cs/app-upgrade-config.json --is-prerelease <flag>`。
|
||||
3. **安装工具链**:安装 Node.js 22、启用 Corepack,并在 `main/` 目录执行 `pnpm install --frozen-lockfile`。
|
||||
4. **运行更新脚本**:执行 `pnpm tsx scripts/update-app-upgrade-config.ts --tag <tag> --config ../cs/app-upgrade-config.json --is-prerelease <flag>`。
|
||||
- 脚本会标准化 tag(去掉 `v` 前缀等)、识别渠道、加载 `config/app-upgrade-segments.json` 中的分段规则。
|
||||
- 校验 prerelease 标志与语义后缀是否匹配、强制锁定的 segment 是否满足、生成镜像的下载地址,并检查 release 是否已经在 GitHub/GitCode 可用(latest 渠道在 GitCode 不可用时会回退到 `https://releases.cherry-ai.com`)。
|
||||
- 更新对应的渠道配置后,脚本会按 semver 排序写回 JSON,并刷新 `lastUpdated`。
|
||||
@ -223,10 +223,10 @@ interface ChannelConfig {
|
||||
`.github/workflows/update-app-upgrade-config.yml` 会在 GitHub Release(包含正常发布与 Pre Release)触发:
|
||||
|
||||
1. 同时 Checkout 仓库默认分支(用于脚本)和 `x-files/app-upgrade-config` 分支(真实托管配置的分支)。
|
||||
2. 在默认分支目录执行 `yarn tsx scripts/update-app-upgrade-config.ts --tag <tag> --config ../cs/app-upgrade-config.json`,直接重写 `x-files/app-upgrade-config` 分支里的配置文件。
|
||||
2. 在默认分支目录执行 `pnpm tsx scripts/update-app-upgrade-config.ts --tag <tag> --config ../cs/app-upgrade-config.json`,直接重写 `x-files/app-upgrade-config` 分支里的配置文件。
|
||||
3. 如果 `app-upgrade-config.json` 有变化,则通过 `peter-evans/create-pull-request` 自动创建一个指向 `x-files/app-upgrade-config` 的 PR,Diff 仅包含该文件。
|
||||
|
||||
如需本地调试,可执行 `yarn update:upgrade-config --tag v2.1.6 --config ../cs/app-upgrade-config.json`(加 `--dry-run` 仅打印结果)来复现 CI 行为。若需要暂时跳过 GitHub/GitCode Release 页面是否就绪的校验,可在 `--dry-run` 的同时附加 `--skip-release-checks`。不加 `--config` 时默认更新当前工作目录(通常是 main 分支)下的副本,方便文档/审查。
|
||||
如需本地调试,可执行 `pnpm update:upgrade-config --tag v2.1.6 --config ../cs/app-upgrade-config.json`(加 `--dry-run` 仅打印结果)来复现 CI 行为。若需要暂时跳过 GitHub/GitCode Release 页面是否就绪的校验,可在 `--dry-run` 的同时附加 `--skip-release-checks`。不加 `--config` 时默认更新当前工作目录(通常是 main 分支)下的副本,方便文档/审查。
|
||||
|
||||
## 版本匹配逻辑
|
||||
|
||||
|
||||
155
docs/zh/references/feishu-notify.md
Normal file
155
docs/zh/references/feishu-notify.md
Normal file
@ -0,0 +1,155 @@
|
||||
# 飞书通知脚本
|
||||
|
||||
`scripts/feishu-notify.ts` 是一个 CLI 工具,用于向飞书 Webhook 发送通知。该脚本主要在 GitHub Actions 工作流中使用,实现自动通知功能。
|
||||
|
||||
## 功能特性
|
||||
|
||||
- 基于子命令的 CLI 结构,支持不同类型的通知
|
||||
- 使用 HMAC-SHA256 签名验证
|
||||
- 发送飞书交互式卡片消息
|
||||
- 完整的 TypeScript 类型支持
|
||||
- 通过环境变量传递凭证,确保安全性
|
||||
|
||||
## 使用方式
|
||||
|
||||
### 前置依赖
|
||||
|
||||
```bash
|
||||
pnpm install
|
||||
```
|
||||
|
||||
### CLI 结构
|
||||
|
||||
```bash
|
||||
pnpm tsx scripts/feishu-notify.ts [command] [options]
|
||||
```
|
||||
|
||||
### 环境变量(必需)
|
||||
|
||||
| 变量 | 说明 |
|
||||
|------|------|
|
||||
| `FEISHU_WEBHOOK_URL` | 飞书 Webhook URL |
|
||||
| `FEISHU_WEBHOOK_SECRET` | 飞书 Webhook 签名密钥 |
|
||||
|
||||
## 命令
|
||||
|
||||
### `send` - 发送简单通知
|
||||
|
||||
发送通用通知,不涉及具体业务逻辑。
|
||||
|
||||
```bash
|
||||
pnpm tsx scripts/feishu-notify.ts send [options]
|
||||
```
|
||||
|
||||
| 参数 | 短选项 | 说明 | 必需 |
|
||||
|------|--------|------|------|
|
||||
| `--title` | `-t` | 卡片标题 | 是 |
|
||||
| `--description` | `-d` | 卡片描述(支持 markdown) | 是 |
|
||||
| `--color` | `-c` | 标题栏颜色模板 | 否(默认:turquoise) |
|
||||
|
||||
**可用颜色:** `blue`(蓝色), `wathet`(浅蓝), `turquoise`(青绿), `green`(绿色), `yellow`(黄色), `orange`(橙色), `red`(红色), `carmine`(深红), `violet`(紫罗兰), `purple`(紫色), `indigo`(靛蓝), `grey`(灰色), `default`(默认)
|
||||
|
||||
#### 示例
|
||||
|
||||
```bash
|
||||
# 使用 $'...' 语法实现正确的换行
|
||||
pnpm tsx scripts/feishu-notify.ts send \
|
||||
-t "部署完成" \
|
||||
-d $'**状态:** 成功\n\n**环境:** 生产环境\n\n**版本:** v1.2.3' \
|
||||
-c green
|
||||
```
|
||||
|
||||
```bash
|
||||
# 发送错误警报(红色)
|
||||
pnpm tsx scripts/feishu-notify.ts send \
|
||||
-t "错误警报" \
|
||||
-d $'**错误类型:** 连接失败\n\n**严重程度:** 高\n\n请及时检查系统状态' \
|
||||
-c red
|
||||
```
|
||||
|
||||
**注意:** 如需在描述中换行,请使用 bash 的 `$'...'` 语法。不要在双引号中使用字面量 `\n`,否则会原样显示在飞书卡片中。
|
||||
|
||||
### `issue` - 发送 GitHub Issue 通知
|
||||
|
||||
```bash
|
||||
pnpm tsx scripts/feishu-notify.ts issue [options]
|
||||
```
|
||||
|
||||
| 参数 | 短选项 | 说明 | 必需 |
|
||||
|------|--------|------|------|
|
||||
| `--url` | `-u` | GitHub Issue URL | 是 |
|
||||
| `--number` | `-n` | Issue 编号 | 是 |
|
||||
| `--title` | `-t` | Issue 标题 | 是 |
|
||||
| `--summary` | `-m` | Issue 摘要 | 是 |
|
||||
| `--author` | `-a` | Issue 作者 | 否(默认:"Unknown") |
|
||||
| `--labels` | `-l` | Issue 标签(逗号分隔) | 否 |
|
||||
|
||||
#### 示例
|
||||
|
||||
```bash
|
||||
pnpm tsx scripts/feishu-notify.ts issue \
|
||||
-u "https://github.com/owner/repo/issues/123" \
|
||||
-n "123" \
|
||||
-t "Bug: Something is broken" \
|
||||
-m "这是一个关于某功能的 bug 报告" \
|
||||
-a "username" \
|
||||
-l "bug,high-priority"
|
||||
```
|
||||
|
||||
## 在 GitHub Actions 中使用
|
||||
|
||||
该脚本主要在 `.github/workflows/github-issue-tracker.yml` 工作流中使用:
|
||||
|
||||
```yaml
|
||||
- name: Install dependencies
|
||||
run: pnpm install
|
||||
|
||||
- name: Send notification
|
||||
run: |
|
||||
pnpm tsx scripts/feishu-notify.ts issue \
|
||||
-u "${{ github.event.issue.html_url }}" \
|
||||
-n "${{ github.event.issue.number }}" \
|
||||
-t "${{ github.event.issue.title }}" \
|
||||
-a "${{ github.event.issue.user.login }}" \
|
||||
-l "${{ join(github.event.issue.labels.*.name, ',') }}" \
|
||||
-m "Issue 摘要内容"
|
||||
env:
|
||||
FEISHU_WEBHOOK_URL: ${{ secrets.FEISHU_WEBHOOK_URL }}
|
||||
FEISHU_WEBHOOK_SECRET: ${{ secrets.FEISHU_WEBHOOK_SECRET }}
|
||||
```
|
||||
|
||||
## 飞书卡片消息格式
|
||||
|
||||
`issue` 命令发送的交互式卡片包含以下内容:
|
||||
|
||||
- **标题**: `#<issue编号> - <issue标题>`
|
||||
- **作者**: Issue 创建者
|
||||
- **标签**: Issue 标签列表(如有)
|
||||
- **摘要**: Issue 内容摘要
|
||||
- **操作按钮**: "View Issue" 按钮,点击跳转到 GitHub Issue 页面
|
||||
|
||||
## 配置飞书 Webhook
|
||||
|
||||
1. 在飞书群组中添加自定义机器人
|
||||
2. 获取 Webhook URL 和签名密钥
|
||||
3. 将 URL 和密钥配置到 GitHub Secrets:
|
||||
- `FEISHU_WEBHOOK_URL`: Webhook 地址
|
||||
- `FEISHU_WEBHOOK_SECRET`: 签名密钥
|
||||
|
||||
## 错误处理
|
||||
|
||||
脚本在以下情况会返回非零退出码:
|
||||
|
||||
- 缺少必需的环境变量(`FEISHU_WEBHOOK_URL`、`FEISHU_WEBHOOK_SECRET`)
|
||||
- 缺少必需的命令参数
|
||||
- 飞书 API 返回非 2xx 状态码
|
||||
- 网络请求失败
|
||||
|
||||
## 扩展新命令
|
||||
|
||||
CLI 设计支持多种通知类型。添加新命令的步骤:
|
||||
|
||||
1. 定义命令选项接口
|
||||
2. 创建卡片构建函数
|
||||
3. 添加新的命令处理函数
|
||||
4. 使用 `program.command()` 注册命令
|
||||
129
docs/zh/references/fuzzy-search.md
Normal file
129
docs/zh/references/fuzzy-search.md
Normal file
@ -0,0 +1,129 @@
|
||||
# 文件列表模糊搜索
|
||||
|
||||
本文档描述了 Cherry Studio 中文件列表的模糊搜索实现。
|
||||
|
||||
## 概述
|
||||
|
||||
模糊搜索功能允许用户通过输入部分或近似的文件名/路径来查找文件。它使用两层文件过滤策略(ripgrep glob 预过滤 + 贪婪子串匹配回退),结合基于子序列的评分,以获得最佳性能和灵活性。
|
||||
|
||||
## 功能特性
|
||||
|
||||
- **Ripgrep Glob 预过滤**:使用 glob 模式进行快速原生级过滤的主要过滤策略
|
||||
- **贪婪子串匹配**:当 ripgrep glob 预过滤无结果时的回退文件过滤策略
|
||||
- **基于子序列的段评分**:评分时,当查询字符按顺序出现时,路径段获得额外权重
|
||||
- **相关性评分**:结果按多因素相关性分数排序
|
||||
|
||||
## 匹配策略
|
||||
|
||||
### 1. Ripgrep Glob 预过滤(主要)
|
||||
|
||||
查询被转换为 glob 模式供 ripgrep 进行初始过滤:
|
||||
|
||||
```
|
||||
查询: "updater"
|
||||
Glob: "*u*p*d*a*t*e*r*"
|
||||
```
|
||||
|
||||
这利用了 ripgrep 的原生性能进行初始文件过滤。
|
||||
|
||||
### 2. 贪婪子串匹配(回退)
|
||||
|
||||
当 glob 预过滤无结果时,系统回退到贪婪子串匹配。这允许更灵活的匹配:
|
||||
|
||||
```
|
||||
查询: "updatercontroller"
|
||||
文件: "packages/update/src/node/updateController.ts"
|
||||
|
||||
匹配过程:
|
||||
1. 找到 "update"(从开头的最长匹配)
|
||||
2. 剩余 "rcontroller" → 找到 "r" 然后 "controller"
|
||||
3. 所有部分都匹配 → 成功
|
||||
```
|
||||
|
||||
## 评分算法
|
||||
|
||||
结果根据 `FileStorage.ts` 中定义的命名常量进行相关性分数排名:
|
||||
|
||||
| 常量 | 值 | 描述 |
|
||||
|------|-----|------|
|
||||
| `SCORE_FILENAME_STARTS` | 100 | 文件名以查询开头(最高优先级)|
|
||||
| `SCORE_FILENAME_CONTAINS` | 80 | 文件名包含精确查询子串 |
|
||||
| `SCORE_SEGMENT_MATCH` | 60 | 每个匹配查询的路径段 |
|
||||
| `SCORE_WORD_BOUNDARY` | 20 | 查询匹配单词开头 |
|
||||
| `SCORE_CONSECUTIVE_CHAR` | 15 | 每个连续字符匹配 |
|
||||
| `PATH_LENGTH_PENALTY_FACTOR` | 4 | 较长路径的对数惩罚 |
|
||||
|
||||
### 评分策略
|
||||
|
||||
评分优先级:
|
||||
1. **文件名匹配**(最高):查询出现在文件名中的文件最相关
|
||||
2. **路径段匹配**:多个匹配段表示更强的相关性
|
||||
3. **词边界**:在单词开头匹配(如 "upd" 匹配 "update")更优先
|
||||
4. **连续匹配**:更长的连续字符序列得分更高
|
||||
5. **路径长度**:较短路径更优先(对数惩罚防止长路径主导评分)
|
||||
|
||||
### 评分示例
|
||||
|
||||
对于查询 `updater`:
|
||||
|
||||
| 文件 | 评分因素 |
|
||||
|------|----------|
|
||||
| `RCUpdater.js` | 短路径 + 文件名包含 "updater" |
|
||||
| `updateController.ts` | 多个路径段匹配 |
|
||||
| `UpdaterHelper.plist` | 长路径惩罚 |
|
||||
|
||||
## 配置
|
||||
|
||||
### DirectoryListOptions
|
||||
|
||||
```typescript
|
||||
interface DirectoryListOptions {
|
||||
recursive?: boolean // 默认: true
|
||||
maxDepth?: number // 默认: 10
|
||||
includeHidden?: boolean // 默认: false
|
||||
includeFiles?: boolean // 默认: true
|
||||
includeDirectories?: boolean // 默认: true
|
||||
maxEntries?: number // 默认: 20
|
||||
searchPattern?: string // 默认: '.'
|
||||
fuzzy?: boolean // 默认: true
|
||||
}
|
||||
```
|
||||
|
||||
## 使用方法
|
||||
|
||||
```typescript
|
||||
// 基本模糊搜索
|
||||
const files = await window.api.file.listDirectory(dirPath, {
|
||||
searchPattern: 'updater',
|
||||
fuzzy: true,
|
||||
maxEntries: 20
|
||||
})
|
||||
|
||||
// 禁用模糊搜索(精确 glob 匹配)
|
||||
const files = await window.api.file.listDirectory(dirPath, {
|
||||
searchPattern: 'update',
|
||||
fuzzy: false
|
||||
})
|
||||
```
|
||||
|
||||
## 性能考虑
|
||||
|
||||
1. **Ripgrep 预过滤**:大多数查询由 ripgrep 的原生 glob 匹配处理,速度极快
|
||||
2. **仅在需要时回退**:贪婪子串匹配(加载所有文件)仅在 glob 匹配返回空结果时运行
|
||||
3. **结果限制**:默认只返回前 20 个结果
|
||||
4. **排除目录**:自动排除常见的大型目录:
|
||||
- `node_modules`
|
||||
- `.git`
|
||||
- `dist`、`build`
|
||||
- `.next`、`.nuxt`
|
||||
- `coverage`、`.cache`
|
||||
|
||||
## 实现细节
|
||||
|
||||
实现位于 `src/main/services/FileStorage.ts`:
|
||||
|
||||
- `queryToGlobPattern()`:将查询转换为 ripgrep glob 模式
|
||||
- `isFuzzyMatch()`:子序列匹配算法
|
||||
- `isGreedySubstringMatch()`:贪婪子串匹配回退
|
||||
- `getFuzzyMatchScore()`:计算相关性分数
|
||||
- `listDirectoryWithRipgrep()`:主搜索协调
|
||||
@ -28,6 +28,12 @@ files:
|
||||
- "!**/{tsconfig.json,tsconfig.tsbuildinfo,tsconfig.node.json,tsconfig.web.json}"
|
||||
- "!**/{.editorconfig,.jekyll-metadata}"
|
||||
- "!src"
|
||||
- "!config"
|
||||
- "!patches"
|
||||
- "!app-upgrade-config.json"
|
||||
- "!**/node_modules/**/*.cpp"
|
||||
- "!**/node_modules/node-addon-api/**"
|
||||
- "!**/node_modules/prebuild-install/**"
|
||||
- "!scripts"
|
||||
- "!local"
|
||||
- "!docs"
|
||||
@ -90,6 +96,7 @@ nsis:
|
||||
oneClick: false
|
||||
include: build/nsis-installer.nsh
|
||||
buildUniversalInstaller: false
|
||||
differentialPackage: false
|
||||
portable:
|
||||
artifactName: ${productName}-${version}-${arch}-portable.${ext}
|
||||
buildUniversalInstaller: false
|
||||
@ -105,6 +112,8 @@ mac:
|
||||
target:
|
||||
- target: dmg
|
||||
- target: zip
|
||||
dmg:
|
||||
writeUpdateInfo: false
|
||||
linux:
|
||||
artifactName: ${productName}-${version}-${arch}.${ext}
|
||||
target:
|
||||
@ -134,68 +143,30 @@ artifactBuildCompleted: scripts/artifact-build-completed.js
|
||||
releaseInfo:
|
||||
releaseNotes: |
|
||||
<!--LANG:en-->
|
||||
Cherry Studio 1.7.7 - New Models & UI Improvements
|
||||
Cherry Studio 1.7.13 - Security & Bug Fixes
|
||||
|
||||
This release adds new AI model support, OpenRouter integration, and UI redesigns.
|
||||
|
||||
✨ New Features
|
||||
- [Models] Add GLM-4.7 and MiniMax-M2.1 model support
|
||||
- [Provider] Add OpenRouter provider support
|
||||
- [OVMS] Upgrade to 2025.4 with Qwen3-4B-int4-ov preset model
|
||||
- [OVMS] Close OVMS process when app quits
|
||||
- [Search] Show keyword-adjacent snippets in history search
|
||||
- [Painting] Add extend_params support for DMX painting
|
||||
- [UI] Add MCP logo and replace Hammer icon
|
||||
|
||||
🎨 UI Improvements
|
||||
- [Notes] Move notes settings to popup in NotesPage for quick access
|
||||
- [WebSearch] Redesign settings with two-column layout and "Set as Default" button
|
||||
- [Display] Improve font selector for long font names
|
||||
- [Transfer] Rename LanDrop to LanTransfer
|
||||
🔒 Security
|
||||
- [Plugin] Fix security vulnerability in DXT plugin system on Windows
|
||||
|
||||
🐛 Bug Fixes
|
||||
- [API] Correct aihubmix Anthropic API path
|
||||
- [OpenRouter] Support GPT-5.1/5.2 reasoning effort 'none' and improve error handling
|
||||
- [Thinking] Fix interleaved thinking support
|
||||
- [Memory] Fix retrieval issues and enable database backup
|
||||
- [Settings] Update default assistant settings to disable temperature
|
||||
- [OpenAI] Add persistent server configuration support
|
||||
- [Azure] Normalize Azure endpoint
|
||||
- [MCP] Check system npx/uvx before falling back to bundled binaries
|
||||
- [Prompt] Improve language instruction clarity
|
||||
- [Models] Include GPT5.2 series in verbosity check
|
||||
- [URL] Enhance urlContext validation for supported providers and models
|
||||
- [Agent] Fix Agent not working when Node.js is not installed on system
|
||||
- [Chat] Fix app crash when opening certain agents
|
||||
- [Chat] Fix reasoning process not displaying correctly for some providers
|
||||
- [Chat] Fix memory leak issue during streaming conversations
|
||||
- [MCP] Fix timeout field not accepting string format in MCP configuration
|
||||
- [Settings] Add careers section in About page
|
||||
|
||||
<!--LANG:zh-CN-->
|
||||
Cherry Studio 1.7.7 - 新模型与界面改进
|
||||
Cherry Studio 1.7.13 - 安全与问题修复
|
||||
|
||||
本次更新添加了新 AI 模型支持、OpenRouter 集成以及界面重新设计。
|
||||
|
||||
✨ 新功能
|
||||
- [模型] 添加 GLM-4.7 和 MiniMax-M2.1 模型支持
|
||||
- [服务商] 添加 OpenRouter 服务商支持
|
||||
- [OVMS] 升级至 2025.4,新增 Qwen3-4B-int4-ov 预设模型
|
||||
- [OVMS] 应用退出时关闭 OVMS 进程
|
||||
- [搜索] 历史搜索显示关键词上下文片段
|
||||
- [绘图] DMX 绘图添加扩展参数支持
|
||||
- [界面] 添加 MCP 图标并替换锤子图标
|
||||
|
||||
🎨 界面改进
|
||||
- [笔记] 将笔记设置移至笔记页弹窗,快速访问无需离开当前页面
|
||||
- [网页搜索] 采用两栏布局重新设计设置界面,添加"设为默认"按钮
|
||||
- [显示] 改进长字体名称的字体选择器
|
||||
- [传输] LanDrop 重命名为 LanTransfer
|
||||
🔒 安全修复
|
||||
- [插件] 修复 Windows 系统 DXT 插件的安全漏洞
|
||||
|
||||
🐛 问题修复
|
||||
- [API] 修复 aihubmix Anthropic API 路径
|
||||
- [OpenRouter] 支持 GPT-5.1/5.2 reasoning effort 'none' 并改进错误处理
|
||||
- [思考] 修复交错思考支持
|
||||
- [记忆] 修复检索问题并启用数据库备份
|
||||
- [设置] 更新默认助手设置禁用温度
|
||||
- [OpenAI] 添加持久化服务器配置支持
|
||||
- [Azure] 规范化 Azure 端点
|
||||
- [MCP] 优先检查系统 npx/uvx 再回退到内置二进制文件
|
||||
- [提示词] 改进语言指令清晰度
|
||||
- [模型] GPT5.2 系列添加到 verbosity 检查
|
||||
- [URL] 增强 urlContext 对支持的服务商和模型的验证
|
||||
- [Agent] 修复系统未安装 Node.js 时 Agent 功能无法使用的问题
|
||||
- [对话] 修复打开某些智能体时应用崩溃的问题
|
||||
- [对话] 修复部分服务商推理过程无法正确显示的问题
|
||||
- [对话] 修复流式对话时的内存泄漏问题
|
||||
- [MCP] 修复 MCP 配置的 timeout 字段不支持字符串格式的问题
|
||||
- [设置] 关于页面新增招聘入口
|
||||
<!--LANG:END-->
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
import react from '@vitejs/plugin-react-swc'
|
||||
import { CodeInspectorPlugin } from 'code-inspector-plugin'
|
||||
import { defineConfig, externalizeDepsPlugin } from 'electron-vite'
|
||||
import { defineConfig } from 'electron-vite'
|
||||
import { resolve } from 'path'
|
||||
import { visualizer } from 'rollup-plugin-visualizer'
|
||||
|
||||
@ -17,7 +17,7 @@ const isProd = process.env.NODE_ENV === 'production'
|
||||
|
||||
export default defineConfig({
|
||||
main: {
|
||||
plugins: [externalizeDepsPlugin(), ...visualizerPlugin('main')],
|
||||
plugins: [...visualizerPlugin('main')],
|
||||
resolve: {
|
||||
alias: {
|
||||
'@main': resolve('src/main'),
|
||||
@ -51,8 +51,7 @@ export default defineConfig({
|
||||
plugins: [
|
||||
react({
|
||||
tsDecorators: true
|
||||
}),
|
||||
externalizeDepsPlugin()
|
||||
})
|
||||
],
|
||||
resolve: {
|
||||
alias: {
|
||||
@ -68,18 +67,7 @@ export default defineConfig({
|
||||
plugins: [
|
||||
(async () => (await import('@tailwindcss/vite')).default())(),
|
||||
react({
|
||||
tsDecorators: true,
|
||||
plugins: [
|
||||
[
|
||||
'@swc/plugin-styled-components',
|
||||
{
|
||||
displayName: true, // 开发环境下启用组件名称
|
||||
fileName: false, // 不在类名中包含文件名
|
||||
pure: true, // 优化性能
|
||||
ssr: false // 不需要服务端渲染
|
||||
}
|
||||
]
|
||||
]
|
||||
tsDecorators: true
|
||||
}),
|
||||
...(isDev ? [CodeInspectorPlugin({ bundler: 'vite' })] : []), // 只在开发环境下启用 CodeInspectorPlugin
|
||||
...visualizerPlugin('renderer')
|
||||
|
||||
@ -84,7 +84,7 @@ export default defineConfig([
|
||||
{
|
||||
selector: 'CallExpression[callee.object.name="console"]',
|
||||
message:
|
||||
'❗CherryStudio uses unified LoggerService: 📖 docs/technical/how-to-use-logger-en.md\n❗CherryStudio 使用统一的日志服务:📖 docs/technical/how-to-use-logger-zh.md\n\n'
|
||||
'❗CherryStudio uses unified LoggerService: 📖 docs/en/guides/logging.md\n❗CherryStudio 使用统一的日志服务:📖 docs/zh/guides/logging.md\n\n'
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
337
package.json
337
package.json
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "CherryStudio",
|
||||
"version": "1.7.7",
|
||||
"version": "1.7.13",
|
||||
"private": true,
|
||||
"description": "A powerful AI assistant for producer.",
|
||||
"main": "./out/main/index.js",
|
||||
@ -9,27 +9,13 @@
|
||||
"engines": {
|
||||
"node": ">=22.0.0"
|
||||
},
|
||||
"workspaces": {
|
||||
"packages": [
|
||||
"local",
|
||||
"packages/*"
|
||||
],
|
||||
"installConfig": {
|
||||
"hoistingLimits": [
|
||||
"packages/database",
|
||||
"packages/mcp-trace/trace-core",
|
||||
"packages/mcp-trace/trace-node",
|
||||
"packages/mcp-trace/trace-web",
|
||||
"packages/extension-table-plus"
|
||||
]
|
||||
}
|
||||
},
|
||||
"scripts": {
|
||||
"start": "electron-vite preview",
|
||||
"dev": "dotenv electron-vite dev",
|
||||
"dev:watch": "dotenv electron-vite dev -- -w",
|
||||
"debug": "electron-vite -- --inspect --sourcemap --remote-debugging-port=9222",
|
||||
"build": "npm run typecheck && electron-vite build",
|
||||
"build:check": "yarn lint && yarn test",
|
||||
"build:check": "pnpm lint && pnpm test",
|
||||
"build:unpack": "dotenv npm run build && electron-builder --dir",
|
||||
"build:win": "dotenv npm run build && electron-builder --win --x64 --arm64",
|
||||
"build:win:x64": "dotenv npm run build && electron-builder --win --x64",
|
||||
@ -41,68 +27,63 @@
|
||||
"build:linux:arm64": "dotenv npm run build && electron-builder --linux --arm64",
|
||||
"build:linux:x64": "dotenv npm run build && electron-builder --linux --x64",
|
||||
"release": "node scripts/version.js",
|
||||
"publish": "yarn build:check && yarn release patch push",
|
||||
"publish": "pnpm build:check && pnpm release patch push",
|
||||
"pulish:artifacts": "cd packages/artifacts && npm publish && cd -",
|
||||
"agents:generate": "NODE_ENV='development' drizzle-kit generate --config src/main/services/agents/drizzle.config.ts",
|
||||
"agents:push": "NODE_ENV='development' drizzle-kit push --config src/main/services/agents/drizzle.config.ts",
|
||||
"agents:studio": "NODE_ENV='development' drizzle-kit studio --config src/main/services/agents/drizzle.config.ts",
|
||||
"agents:drop": "NODE_ENV='development' drizzle-kit drop --config src/main/services/agents/drizzle.config.ts",
|
||||
"generate:icons": "electron-icon-builder --input=./build/logo.png --output=build",
|
||||
"analyze:renderer": "VISUALIZER_RENDERER=true yarn build",
|
||||
"analyze:main": "VISUALIZER_MAIN=true yarn build",
|
||||
"analyze:renderer": "VISUALIZER_RENDERER=true pnpm build",
|
||||
"analyze:main": "VISUALIZER_MAIN=true pnpm build",
|
||||
"typecheck": "concurrently -n \"node,web\" -c \"cyan,magenta\" \"npm run typecheck:node\" \"npm run typecheck:web\"",
|
||||
"typecheck:node": "tsgo --noEmit -p tsconfig.node.json --composite false",
|
||||
"typecheck:web": "tsgo --noEmit -p tsconfig.web.json --composite false",
|
||||
"i18n:check": "dotenv -e .env -- tsx scripts/check-i18n.ts",
|
||||
"i18n:sync": "dotenv -e .env -- tsx scripts/sync-i18n.ts",
|
||||
"i18n:translate": "dotenv -e .env -- tsx scripts/auto-translate-i18n.ts",
|
||||
"i18n:all": "yarn i18n:check && yarn i18n:sync && yarn i18n:translate",
|
||||
"i18n:all": "pnpm i18n:sync && pnpm i18n:translate",
|
||||
"update:languages": "tsx scripts/update-languages.ts",
|
||||
"update:upgrade-config": "tsx scripts/update-app-upgrade-config.ts",
|
||||
"test": "vitest run --silent",
|
||||
"test:main": "vitest run --project main",
|
||||
"test:renderer": "vitest run --project renderer",
|
||||
"test:aicore": "vitest run --project aiCore",
|
||||
"test:update": "yarn test:renderer --update",
|
||||
"test:update": "pnpm test:renderer --update",
|
||||
"test:coverage": "vitest run --coverage --silent",
|
||||
"test:ui": "vitest --ui",
|
||||
"test:watch": "vitest",
|
||||
"test:e2e": "yarn playwright test",
|
||||
"test:e2e": "pnpm playwright test",
|
||||
"test:lint": "oxlint --deny-warnings && eslint . --ext .js,.jsx,.cjs,.mjs,.ts,.tsx,.cts,.mts --cache",
|
||||
"test:scripts": "vitest scripts",
|
||||
"lint": "oxlint --fix && eslint . --ext .js,.jsx,.cjs,.mjs,.ts,.tsx,.cts,.mts --fix --cache && yarn typecheck && yarn i18n:check && yarn format:check",
|
||||
"lint": "oxlint --fix && eslint . --ext .js,.jsx,.cjs,.mjs,.ts,.tsx,.cts,.mts --fix --cache && pnpm typecheck && pnpm i18n:check && pnpm format:check",
|
||||
"format": "biome format --write && biome lint --write",
|
||||
"format:check": "biome format && biome lint",
|
||||
"prepare": "git config blame.ignoreRevsFile .git-blame-ignore-revs && husky",
|
||||
"claude": "dotenv -e .env -- claude",
|
||||
"release:aicore:alpha": "yarn workspace @cherrystudio/ai-core version prerelease --preid alpha --immediate && yarn workspace @cherrystudio/ai-core build && yarn workspace @cherrystudio/ai-core npm publish --tag alpha --access public",
|
||||
"release:aicore:beta": "yarn workspace @cherrystudio/ai-core version prerelease --preid beta --immediate && yarn workspace @cherrystudio/ai-core build && yarn workspace @cherrystudio/ai-core npm publish --tag beta --access public",
|
||||
"release:aicore": "yarn workspace @cherrystudio/ai-core version patch --immediate && yarn workspace @cherrystudio/ai-core build && yarn workspace @cherrystudio/ai-core npm publish --access public",
|
||||
"release:ai-sdk-provider": "yarn workspace @cherrystudio/ai-sdk-provider version patch --immediate && yarn workspace @cherrystudio/ai-sdk-provider build && yarn workspace @cherrystudio/ai-sdk-provider npm publish --access public"
|
||||
"release:aicore:alpha": "pnpm --filter @cherrystudio/ai-core version prerelease --preid alpha && pnpm --filter @cherrystudio/ai-core build && pnpm --filter @cherrystudio/ai-core publish --tag alpha --access public",
|
||||
"release:aicore:beta": "pnpm --filter @cherrystudio/ai-core version prerelease --preid beta && pnpm --filter @cherrystudio/ai-core build && pnpm --filter @cherrystudio/ai-core publish --tag beta --access public",
|
||||
"release:aicore": "pnpm --filter @cherrystudio/ai-core version patch && pnpm --filter @cherrystudio/ai-core build && pnpm --filter @cherrystudio/ai-core publish --access public",
|
||||
"release:ai-sdk-provider": "pnpm --filter @cherrystudio/ai-sdk-provider version patch && pnpm --filter @cherrystudio/ai-sdk-provider build && pnpm --filter @cherrystudio/ai-sdk-provider publish --access public"
|
||||
},
|
||||
"dependencies": {
|
||||
"@anthropic-ai/claude-agent-sdk": "patch:@anthropic-ai/claude-agent-sdk@npm%3A0.1.62#~/.yarn/patches/@anthropic-ai-claude-agent-sdk-npm-0.1.62-23ae56f8c8.patch",
|
||||
"@anthropic-ai/claude-agent-sdk": "0.1.76",
|
||||
"@libsql/client": "0.14.0",
|
||||
"@libsql/win32-x64-msvc": "^0.4.7",
|
||||
"@napi-rs/system-ocr": "patch:@napi-rs/system-ocr@npm%3A1.0.2#~/.yarn/patches/@napi-rs-system-ocr-npm-1.0.2-59e7a78e8b.patch",
|
||||
"@paymoapp/electron-shutdown-handler": "^1.1.2",
|
||||
"@strongtz/win32-arm64-msvc": "^0.4.7",
|
||||
"bonjour-service": "^1.3.0",
|
||||
"emoji-picker-element-data": "^1",
|
||||
"express": "^5.1.0",
|
||||
"font-list": "^2.0.0",
|
||||
"graceful-fs": "^4.2.11",
|
||||
"gray-matter": "^4.0.3",
|
||||
"js-yaml": "^4.1.0",
|
||||
"@napi-rs/system-ocr": "1.0.2",
|
||||
"@paymoapp/electron-shutdown-handler": "1.1.2",
|
||||
"express": "5.1.0",
|
||||
"font-list": "2.0.0",
|
||||
"graceful-fs": "4.2.11",
|
||||
"gray-matter": "4.0.3",
|
||||
"jsdom": "26.1.0",
|
||||
"node-stream-zip": "^1.15.0",
|
||||
"officeparser": "^4.2.0",
|
||||
"os-proxy-config": "^1.1.2",
|
||||
"selection-hook": "^1.0.12",
|
||||
"sharp": "^0.34.3",
|
||||
"swagger-jsdoc": "^6.2.8",
|
||||
"swagger-ui-express": "^5.0.1",
|
||||
"tesseract.js": "patch:tesseract.js@npm%3A6.0.1#~/.yarn/patches/tesseract.js-npm-6.0.1-2562a7e46d.patch",
|
||||
"node-stream-zip": "1.15.0",
|
||||
"officeparser": "4.2.0",
|
||||
"os-proxy-config": "1.1.2",
|
||||
"selection-hook": "1.0.12",
|
||||
"sharp": "0.34.3",
|
||||
"swagger-jsdoc": "6.2.8",
|
||||
"swagger-ui-express": "5.0.1",
|
||||
"tesseract.js": "6.0.1",
|
||||
"turndown": "7.2.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
@ -111,37 +92,48 @@
|
||||
"@agentic/tavily": "^7.3.3",
|
||||
"@ai-sdk/amazon-bedrock": "^3.0.61",
|
||||
"@ai-sdk/anthropic": "^2.0.49",
|
||||
"@ai-sdk/azure": "2.0.87",
|
||||
"@ai-sdk/cerebras": "^1.0.31",
|
||||
"@ai-sdk/gateway": "^2.0.15",
|
||||
"@ai-sdk/google": "patch:@ai-sdk/google@npm%3A2.0.49#~/.yarn/patches/@ai-sdk-google-npm-2.0.49-84720f41bd.patch",
|
||||
"@ai-sdk/google": "2.0.49",
|
||||
"@ai-sdk/google-vertex": "^3.0.94",
|
||||
"@ai-sdk/huggingface": "^0.0.10",
|
||||
"@ai-sdk/mistral": "^2.0.24",
|
||||
"@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.85#~/.yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch",
|
||||
"@ai-sdk/openai": "2.0.85",
|
||||
"@ai-sdk/perplexity": "^2.0.20",
|
||||
"@ai-sdk/provider": "2.0.0",
|
||||
"@ai-sdk/provider-utils": "3.0.17",
|
||||
"@ai-sdk/test-server": "^0.0.1",
|
||||
"@ai-sdk/xai": "2.0.36",
|
||||
"@ant-design/cssinjs": "1.23.0",
|
||||
"@ant-design/icons": "5.6.1",
|
||||
"@ant-design/v5-patch-for-react-19": "^1.0.3",
|
||||
"@anthropic-ai/sdk": "^0.41.0",
|
||||
"@anthropic-ai/vertex-sdk": "patch:@anthropic-ai/vertex-sdk@npm%3A0.11.4#~/.yarn/patches/@anthropic-ai-vertex-sdk-npm-0.11.4-c19cb41edb.patch",
|
||||
"@anthropic-ai/vertex-sdk": "0.11.4",
|
||||
"@aws-sdk/client-bedrock": "^3.910.0",
|
||||
"@aws-sdk/client-bedrock-runtime": "^3.910.0",
|
||||
"@aws-sdk/client-s3": "^3.910.0",
|
||||
"@biomejs/biome": "2.2.4",
|
||||
"@cherrystudio/ai-core": "workspace:^1.0.9",
|
||||
"@cherrystudio/embedjs": "^0.1.31",
|
||||
"@cherrystudio/embedjs-libsql": "^0.1.31",
|
||||
"@cherrystudio/embedjs-loader-csv": "^0.1.31",
|
||||
"@cherrystudio/embedjs-loader-image": "^0.1.31",
|
||||
"@cherrystudio/embedjs-loader-markdown": "^0.1.31",
|
||||
"@cherrystudio/embedjs-loader-msoffice": "^0.1.31",
|
||||
"@cherrystudio/embedjs-loader-pdf": "^0.1.31",
|
||||
"@cherrystudio/embedjs-loader-sitemap": "^0.1.31",
|
||||
"@cherrystudio/embedjs-loader-web": "^0.1.31",
|
||||
"@cherrystudio/embedjs-loader-xml": "^0.1.31",
|
||||
"@cherrystudio/embedjs-ollama": "^0.1.31",
|
||||
"@cherrystudio/embedjs-openai": "^0.1.31",
|
||||
"@cherrystudio/embedjs": "0.1.31",
|
||||
"@cherrystudio/embedjs-interfaces": "0.1.31",
|
||||
"@cherrystudio/embedjs-libsql": "0.1.31",
|
||||
"@cherrystudio/embedjs-loader-csv": "0.1.31",
|
||||
"@cherrystudio/embedjs-loader-image": "0.1.31",
|
||||
"@cherrystudio/embedjs-loader-markdown": "0.1.31",
|
||||
"@cherrystudio/embedjs-loader-msoffice": "0.1.31",
|
||||
"@cherrystudio/embedjs-loader-pdf": "0.1.31",
|
||||
"@cherrystudio/embedjs-loader-sitemap": "0.1.31",
|
||||
"@cherrystudio/embedjs-loader-web": "0.1.31",
|
||||
"@cherrystudio/embedjs-loader-xml": "0.1.31",
|
||||
"@cherrystudio/embedjs-ollama": "0.1.31",
|
||||
"@cherrystudio/embedjs-openai": "0.1.31",
|
||||
"@cherrystudio/embedjs-utils": "0.1.31",
|
||||
"@cherrystudio/extension-table-plus": "workspace:^",
|
||||
"@cherrystudio/openai": "^6.12.0",
|
||||
"@cherrystudio/openai": "6.15.0",
|
||||
"@codemirror/lang-json": "6.0.1",
|
||||
"@codemirror/lint": "6.8.5",
|
||||
"@codemirror/view": "6.38.1",
|
||||
"@dnd-kit/core": "^6.3.1",
|
||||
"@dnd-kit/modifiers": "^9.0.0",
|
||||
"@dnd-kit/sortable": "^10.0.0",
|
||||
@ -154,18 +146,21 @@
|
||||
"@emotion/is-prop-valid": "^1.3.1",
|
||||
"@eslint-react/eslint-plugin": "^1.36.1",
|
||||
"@eslint/js": "^9.22.0",
|
||||
"@google/genai": "patch:@google/genai@npm%3A1.0.1#~/.yarn/patches/@google-genai-npm-1.0.1-e26f0f9af7.patch",
|
||||
"@floating-ui/dom": "1.7.3",
|
||||
"@google/genai": "1.0.1",
|
||||
"@hello-pangea/dnd": "^18.0.1",
|
||||
"@kangfenmao/keyv-storage": "^0.1.0",
|
||||
"@kangfenmao/keyv-storage": "^0.1.3",
|
||||
"@langchain/community": "^1.0.0",
|
||||
"@langchain/core": "patch:@langchain/core@npm%3A1.0.2#~/.yarn/patches/@langchain-core-npm-1.0.2-183ef83fe4.patch",
|
||||
"@langchain/openai": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
||||
"@langchain/core": "1.0.2",
|
||||
"@langchain/openai": "1.0.0",
|
||||
"@langchain/textsplitters": "0.1.0",
|
||||
"@mistralai/mistralai": "^1.7.5",
|
||||
"@modelcontextprotocol/sdk": "^1.23.0",
|
||||
"@modelcontextprotocol/sdk": "1.23.0",
|
||||
"@mozilla/readability": "^0.6.0",
|
||||
"@notionhq/client": "^2.2.15",
|
||||
"@openrouter/ai-sdk-provider": "^1.2.8",
|
||||
"@opentelemetry/api": "^1.9.0",
|
||||
"@opentelemetry/context-async-hooks": "2.0.1",
|
||||
"@opentelemetry/core": "2.0.0",
|
||||
"@opentelemetry/exporter-trace-otlp-http": "^0.200.0",
|
||||
"@opentelemetry/sdk-trace-base": "^2.0.0",
|
||||
@ -176,6 +171,7 @@
|
||||
"@radix-ui/react-context-menu": "^2.2.16",
|
||||
"@reduxjs/toolkit": "^2.2.5",
|
||||
"@shikijs/markdown-it": "^3.12.0",
|
||||
"@swc/core": "^1.15.8",
|
||||
"@swc/plugin-styled-components": "^8.0.4",
|
||||
"@tailwindcss/vite": "^4.1.13",
|
||||
"@tanstack/react-query": "^5.85.5",
|
||||
@ -184,21 +180,25 @@
|
||||
"@testing-library/jest-dom": "^6.6.3",
|
||||
"@testing-library/react": "^16.3.0",
|
||||
"@testing-library/user-event": "^14.6.1",
|
||||
"@tiptap/extension-collaboration": "^3.2.0",
|
||||
"@tiptap/extension-drag-handle": "patch:@tiptap/extension-drag-handle@npm%3A3.2.0#~/.yarn/patches/@tiptap-extension-drag-handle-npm-3.2.0-5a9ebff7c9.patch",
|
||||
"@tiptap/extension-drag-handle-react": "^3.2.0",
|
||||
"@tiptap/extension-image": "^3.2.0",
|
||||
"@tiptap/extension-list": "^3.2.0",
|
||||
"@tiptap/extension-mathematics": "^3.2.0",
|
||||
"@tiptap/extension-mention": "^3.2.0",
|
||||
"@tiptap/extension-node-range": "^3.2.0",
|
||||
"@tiptap/extension-table-of-contents": "^3.2.0",
|
||||
"@tiptap/extension-typography": "^3.2.0",
|
||||
"@tiptap/extension-underline": "^3.2.0",
|
||||
"@tiptap/pm": "^3.2.0",
|
||||
"@tiptap/react": "^3.2.0",
|
||||
"@tiptap/starter-kit": "^3.2.0",
|
||||
"@tiptap/suggestion": "^3.2.0",
|
||||
"@tiptap/core": "3.2.0",
|
||||
"@tiptap/extension-code-block": "3.2.0",
|
||||
"@tiptap/extension-collaboration": "3.2.0",
|
||||
"@tiptap/extension-drag-handle": "3.2.0",
|
||||
"@tiptap/extension-drag-handle-react": "3.2.0",
|
||||
"@tiptap/extension-heading": "3.2.0",
|
||||
"@tiptap/extension-image": "3.2.0",
|
||||
"@tiptap/extension-link": "3.2.0",
|
||||
"@tiptap/extension-list": "3.2.0",
|
||||
"@tiptap/extension-mathematics": "3.2.0",
|
||||
"@tiptap/extension-mention": "3.2.0",
|
||||
"@tiptap/extension-node-range": "3.2.0",
|
||||
"@tiptap/extension-table-of-contents": "3.2.0",
|
||||
"@tiptap/extension-typography": "3.2.0",
|
||||
"@tiptap/extension-underline": "3.2.0",
|
||||
"@tiptap/pm": "3.2.0",
|
||||
"@tiptap/react": "3.2.0",
|
||||
"@tiptap/starter-kit": "3.2.0",
|
||||
"@tiptap/suggestion": "3.2.0",
|
||||
"@tiptap/y-tiptap": "^3.0.0",
|
||||
"@truto/turndown-plugin-gfm": "^1.0.2",
|
||||
"@tryfabric/martian": "^1.2.4",
|
||||
@ -209,14 +209,17 @@
|
||||
"@types/dotenv": "^8.2.3",
|
||||
"@types/express": "^5",
|
||||
"@types/fs-extra": "^11",
|
||||
"@types/hast": "^3.0.4",
|
||||
"@types/he": "^1",
|
||||
"@types/html-to-text": "^9",
|
||||
"@types/js-yaml": "^4.0.9",
|
||||
"@types/json-schema": "7.0.15",
|
||||
"@types/lodash": "^4.17.5",
|
||||
"@types/markdown-it": "^14",
|
||||
"@types/md5": "^2.3.5",
|
||||
"@types/mdast": "4.0.4",
|
||||
"@types/mime-types": "^3",
|
||||
"@types/node": "^22.17.1",
|
||||
"@types/node": "22.17.2",
|
||||
"@types/pako": "^1.0.2",
|
||||
"@types/react": "^19.2.7",
|
||||
"@types/react-dom": "^19.2.3",
|
||||
@ -227,9 +230,10 @@
|
||||
"@types/swagger-ui-express": "^4.1.8",
|
||||
"@types/tinycolor2": "^1",
|
||||
"@types/turndown": "^5.0.5",
|
||||
"@types/unist": "3.0.3",
|
||||
"@types/uuid": "^10.0.0",
|
||||
"@types/word-extractor": "^1",
|
||||
"@typescript/native-preview": "latest",
|
||||
"@typescript/native-preview": "7.0.0-dev.20250915.1",
|
||||
"@uiw/codemirror-extensions-langs": "^4.25.1",
|
||||
"@uiw/codemirror-themes-all": "^4.25.1",
|
||||
"@uiw/react-codemirror": "^4.25.1",
|
||||
@ -241,12 +245,16 @@
|
||||
"@viz-js/lang-dot": "^1.0.5",
|
||||
"@viz-js/viz": "^3.14.0",
|
||||
"@xyflow/react": "^12.4.4",
|
||||
"adm-zip": "0.4.16",
|
||||
"ai": "^5.0.98",
|
||||
"antd": "patch:antd@npm%3A5.27.0#~/.yarn/patches/antd-npm-5.27.0-aa91c36546.patch",
|
||||
"antd": "5.27.0",
|
||||
"archiver": "^7.0.1",
|
||||
"async-mutex": "^0.5.0",
|
||||
"axios": "^1.7.3",
|
||||
"bonjour-service": "1.3.0",
|
||||
"browser-image-compression": "^2.0.2",
|
||||
"builder-util-runtime": "9.5.0",
|
||||
"chalk": "4.1.2",
|
||||
"chardet": "^2.1.0",
|
||||
"check-disk-space": "3.4.0",
|
||||
"cheerio": "^1.1.2",
|
||||
@ -255,8 +263,11 @@
|
||||
"cli-progress": "^3.12.0",
|
||||
"clsx": "^2.1.1",
|
||||
"code-inspector-plugin": "^0.20.14",
|
||||
"codemirror-lang-mermaid": "0.5.0",
|
||||
"color": "^5.0.0",
|
||||
"commander": "^14.0.2",
|
||||
"concurrently": "^9.2.1",
|
||||
"cors": "2.8.5",
|
||||
"country-flag-emoji-polyfill": "0.1.8",
|
||||
"dayjs": "^1.11.11",
|
||||
"dexie": "^4.0.8",
|
||||
@ -264,6 +275,7 @@
|
||||
"diff": "^8.0.2",
|
||||
"docx": "^9.0.2",
|
||||
"dompurify": "^3.2.6",
|
||||
"dotenv": "16.6.1",
|
||||
"dotenv-cli": "^7.4.2",
|
||||
"drizzle-kit": "^0.31.4",
|
||||
"drizzle-orm": "^0.44.5",
|
||||
@ -272,12 +284,13 @@
|
||||
"electron-devtools-installer": "^3.2.0",
|
||||
"electron-reload": "^2.0.0-alpha.1",
|
||||
"electron-store": "^8.2.0",
|
||||
"electron-updater": "patch:electron-updater@npm%3A6.7.0#~/.yarn/patches/electron-updater-npm-6.7.0-47b11bb0d4.patch",
|
||||
"electron-vite": "4.0.1",
|
||||
"electron-updater": "6.7.0",
|
||||
"electron-vite": "5.0.0",
|
||||
"electron-window-state": "^5.0.3",
|
||||
"emittery": "^1.0.3",
|
||||
"emoji-picker-element": "^1.22.1",
|
||||
"epub": "patch:epub@npm%3A1.3.0#~/.yarn/patches/epub-npm-1.3.0-8325494ffe.patch",
|
||||
"emoji-picker-element-data": "1",
|
||||
"epub": "1.3.0",
|
||||
"eslint": "^9.22.0",
|
||||
"eslint-plugin-import-zod": "^1.2.0",
|
||||
"eslint-plugin-oxlint": "^1.15.0",
|
||||
@ -288,6 +301,7 @@
|
||||
"fast-diff": "^1.3.0",
|
||||
"fast-xml-parser": "^5.2.0",
|
||||
"fetch-socks": "1.3.2",
|
||||
"form-data": "4.0.4",
|
||||
"framer-motion": "^12.23.12",
|
||||
"franc-min": "^6.2.0",
|
||||
"fs-extra": "^11.2.0",
|
||||
@ -304,6 +318,11 @@
|
||||
"isbinaryfile": "5.0.4",
|
||||
"jaison": "^2.0.2",
|
||||
"jest-styled-components": "^7.2.0",
|
||||
"js-base64": "3.7.7",
|
||||
"js-yaml": "4.1.0",
|
||||
"json-schema": "0.4.0",
|
||||
"katex": "0.16.22",
|
||||
"ky": "1.8.1",
|
||||
"linguist-languages": "^8.1.0",
|
||||
"lint-staged": "^15.5.0",
|
||||
"lodash": "^4.17.21",
|
||||
@ -311,19 +330,27 @@
|
||||
"lucide-react": "^0.525.0",
|
||||
"macos-release": "^3.4.0",
|
||||
"markdown-it": "^14.1.0",
|
||||
"md5": "2.3.0",
|
||||
"mermaid": "^11.10.1",
|
||||
"mime": "^4.0.4",
|
||||
"mime-types": "^3.0.1",
|
||||
"motion": "^12.10.5",
|
||||
"nanoid": "3.3.11",
|
||||
"notion-helper": "^1.3.22",
|
||||
"npx-scope-finder": "^1.2.0",
|
||||
"ollama-ai-provider-v2": "patch:ollama-ai-provider-v2@npm%3A1.5.5#~/.yarn/patches/ollama-ai-provider-v2-npm-1.5.5-8bef249af9.patch",
|
||||
"ollama-ai-provider-v2": "1.5.5",
|
||||
"open": "^8.4.2",
|
||||
"oxlint": "^1.22.0",
|
||||
"oxlint-tsgolint": "^0.2.0",
|
||||
"p-queue": "^8.1.0",
|
||||
"pako": "1.0.11",
|
||||
"pdf-lib": "^1.17.1",
|
||||
"pdf-parse": "^1.1.1",
|
||||
"prosemirror-model": "1.25.2",
|
||||
"proxy-agent": "^6.5.0",
|
||||
"rc-input": "1.8.0",
|
||||
"rc-select": "14.16.6",
|
||||
"rc-virtual-list": "3.18.6",
|
||||
"react": "^19.2.0",
|
||||
"react-dom": "^19.2.0",
|
||||
"react-error-boundary": "^6.0.0",
|
||||
@ -350,8 +377,11 @@
|
||||
"remark-gfm": "^4.0.1",
|
||||
"remark-github-blockquote-alert": "^2.0.0",
|
||||
"remark-math": "^6.0.0",
|
||||
"remark-parse": "11.0.0",
|
||||
"remark-stringify": "11.0.0",
|
||||
"remove-markdown": "^0.6.2",
|
||||
"rollup-plugin-visualizer": "^5.12.0",
|
||||
"semver": "7.7.1",
|
||||
"shiki": "^3.12.0",
|
||||
"strict-url-sanitise": "^0.0.1",
|
||||
"string-width": "^7.2.0",
|
||||
@ -366,11 +396,12 @@
|
||||
"tsx": "^4.20.3",
|
||||
"turndown-plugin-gfm": "^1.0.2",
|
||||
"tw-animate-css": "^1.3.8",
|
||||
"typescript": "~5.8.2",
|
||||
"typescript": "~5.8.3",
|
||||
"undici": "6.21.2",
|
||||
"unified": "^11.0.5",
|
||||
"unist-util-visit": "5.0.0",
|
||||
"uuid": "^13.0.0",
|
||||
"vite": "npm:rolldown-vite@7.1.5",
|
||||
"vite": "npm:rolldown-vite@7.3.0",
|
||||
"vitest": "^3.2.4",
|
||||
"webdav": "^5.8.0",
|
||||
"winston": "^3.17.0",
|
||||
@ -383,44 +414,68 @@
|
||||
"zipread": "^1.3.3",
|
||||
"zod": "^4.1.5"
|
||||
},
|
||||
"resolutions": {
|
||||
"@smithy/types": "4.7.1",
|
||||
"@codemirror/language": "6.11.3",
|
||||
"@codemirror/lint": "6.8.5",
|
||||
"@codemirror/view": "6.38.1",
|
||||
"@langchain/core@npm:^0.3.26": "patch:@langchain/core@npm%3A1.0.2#~/.yarn/patches/@langchain-core-npm-1.0.2-183ef83fe4.patch",
|
||||
"atomically@npm:^1.7.0": "patch:atomically@npm%3A1.7.0#~/.yarn/patches/atomically-npm-1.7.0-e742e5293b.patch",
|
||||
"esbuild": "^0.25.0",
|
||||
"file-stream-rotator@npm:^0.6.1": "patch:file-stream-rotator@npm%3A0.6.1#~/.yarn/patches/file-stream-rotator-npm-0.6.1-eab45fb13d.patch",
|
||||
"libsql@npm:^0.4.4": "patch:libsql@npm%3A0.4.7#~/.yarn/patches/libsql-npm-0.4.7-444e260fb1.patch",
|
||||
"node-abi": "4.24.0",
|
||||
"openai@npm:^4.77.0": "npm:@cherrystudio/openai@6.5.0",
|
||||
"openai@npm:^4.87.3": "npm:@cherrystudio/openai@6.5.0",
|
||||
"pdf-parse@npm:1.1.1": "patch:pdf-parse@npm%3A1.1.1#~/.yarn/patches/pdf-parse-npm-1.1.1-04a6109b2a.patch",
|
||||
"pkce-challenge@npm:^4.1.0": "patch:pkce-challenge@npm%3A4.1.0#~/.yarn/patches/pkce-challenge-npm-4.1.0-fbc51695a3.patch",
|
||||
"tar-fs": "^2.1.4",
|
||||
"undici": "6.21.2",
|
||||
"vite": "npm:rolldown-vite@7.1.5",
|
||||
"tesseract.js@npm:*": "patch:tesseract.js@npm%3A6.0.1#~/.yarn/patches/tesseract.js-npm-6.0.1-2562a7e46d.patch",
|
||||
"@ai-sdk/openai@npm:^2.0.52": "patch:@ai-sdk/openai@npm%3A2.0.52#~/.yarn/patches/@ai-sdk-openai-npm-2.0.52-b36d949c76.patch",
|
||||
"@img/sharp-darwin-arm64": "0.34.3",
|
||||
"@img/sharp-darwin-x64": "0.34.3",
|
||||
"@img/sharp-linux-arm": "0.34.3",
|
||||
"@img/sharp-linux-arm64": "0.34.3",
|
||||
"@img/sharp-linux-x64": "0.34.3",
|
||||
"@img/sharp-win32-x64": "0.34.3",
|
||||
"openai@npm:5.12.2": "npm:@cherrystudio/openai@6.5.0",
|
||||
"@langchain/openai@npm:>=0.1.0 <0.6.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
||||
"@langchain/openai@npm:^0.3.16": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
||||
"@langchain/openai@npm:>=0.2.0 <0.7.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
||||
"@ai-sdk/openai@npm:^2.0.42": "patch:@ai-sdk/openai@npm%3A2.0.85#~/.yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch",
|
||||
"@ai-sdk/google@npm:^2.0.40": "patch:@ai-sdk/google@npm%3A2.0.40#~/.yarn/patches/@ai-sdk-google-npm-2.0.40-47e0eeee83.patch",
|
||||
"@ai-sdk/openai-compatible@npm:^1.0.27": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch",
|
||||
"@ai-sdk/google@npm:2.0.49": "patch:@ai-sdk/google@npm%3A2.0.49#~/.yarn/patches/@ai-sdk-google-npm-2.0.49-84720f41bd.patch",
|
||||
"@ai-sdk/openai-compatible@npm:1.0.27": "patch:@ai-sdk/openai-compatible@npm%3A1.0.28#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.28-5705188855.patch",
|
||||
"@ai-sdk/openai-compatible@npm:^1.0.19": "patch:@ai-sdk/openai-compatible@npm%3A1.0.28#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.28-5705188855.patch"
|
||||
"pnpm": {
|
||||
"overrides": {
|
||||
"@smithy/types": "4.7.1",
|
||||
"@codemirror/language": "6.11.3",
|
||||
"@codemirror/lint": "6.8.5",
|
||||
"@codemirror/view": "6.38.1",
|
||||
"esbuild": "^0.25.0",
|
||||
"node-abi": "4.24.0",
|
||||
"openai": "npm:@cherrystudio/openai@6.15.0",
|
||||
"tar-fs": "^2.1.4",
|
||||
"undici": "6.21.2",
|
||||
"vite": "npm:rolldown-vite@7.3.0",
|
||||
"@img/sharp-darwin-arm64": "0.34.3",
|
||||
"@img/sharp-darwin-x64": "0.34.3",
|
||||
"@img/sharp-linux-arm": "0.34.3",
|
||||
"@img/sharp-linux-arm64": "0.34.3",
|
||||
"@img/sharp-linux-x64": "0.34.3",
|
||||
"@img/sharp-win32-x64": "0.34.3",
|
||||
"@langchain/core": "1.0.2",
|
||||
"@ai-sdk/openai-compatible@1.0.27": "1.0.28",
|
||||
"@ai-sdk/openai-compatible@1.0.30": "1.0.28"
|
||||
},
|
||||
"patchedDependencies": {
|
||||
"@napi-rs/system-ocr@1.0.2": "patches/@napi-rs-system-ocr-npm-1.0.2-59e7a78e8b.patch",
|
||||
"tesseract.js@6.0.1": "patches/tesseract.js-npm-6.0.1-2562a7e46d.patch",
|
||||
"@ai-sdk/google@2.0.49": "patches/@ai-sdk-google-npm-2.0.49-84720f41bd.patch",
|
||||
"@ai-sdk/openai@2.0.85": "patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch",
|
||||
"@anthropic-ai/vertex-sdk@0.11.4": "patches/@anthropic-ai-vertex-sdk-npm-0.11.4-c19cb41edb.patch",
|
||||
"@google/genai@1.0.1": "patches/@google-genai-npm-1.0.1-e26f0f9af7.patch",
|
||||
"@langchain/core@1.0.2": "patches/@langchain-core-npm-1.0.2-183ef83fe4.patch",
|
||||
"@langchain/openai@1.0.0": "patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
||||
"@tiptap/extension-drag-handle@3.2.0": "patches/@tiptap-extension-drag-handle-npm-3.2.0-5a9ebff7c9.patch",
|
||||
"antd@5.27.0": "patches/antd-npm-5.27.0-aa91c36546.patch",
|
||||
"electron-updater@6.7.0": "patches/electron-updater-npm-6.7.0-47b11bb0d4.patch",
|
||||
"epub@1.3.0": "patches/epub-npm-1.3.0-8325494ffe.patch",
|
||||
"ollama-ai-provider-v2@1.5.5": "patches/ollama-ai-provider-v2-npm-1.5.5-8bef249af9.patch",
|
||||
"atomically@1.7.0": "patches/atomically-npm-1.7.0-e742e5293b.patch",
|
||||
"file-stream-rotator@0.6.1": "patches/file-stream-rotator-npm-0.6.1-eab45fb13d.patch",
|
||||
"libsql@0.4.7": "patches/libsql-npm-0.4.7-444e260fb1.patch",
|
||||
"pdf-parse@1.1.1": "patches/pdf-parse-npm-1.1.1-04a6109b2a.patch",
|
||||
"@ai-sdk/openai-compatible@1.0.28": "patches/@ai-sdk__openai-compatible@1.0.28.patch",
|
||||
"@anthropic-ai/claude-agent-sdk@0.1.76": "patches/@anthropic-ai__claude-agent-sdk@0.1.76.patch",
|
||||
"@openrouter/ai-sdk-provider": "patches/@openrouter__ai-sdk-provider.patch"
|
||||
},
|
||||
"onlyBuiltDependencies": [
|
||||
"@kangfenmao/keyv-storage",
|
||||
"@paymoapp/electron-shutdown-handler",
|
||||
"@scarf/scarf",
|
||||
"@swc/core",
|
||||
"electron",
|
||||
"electron-winstaller",
|
||||
"esbuild",
|
||||
"msw",
|
||||
"protobufjs",
|
||||
"registry-js",
|
||||
"selection-hook",
|
||||
"sharp",
|
||||
"tesseract.js",
|
||||
"zipfile"
|
||||
]
|
||||
},
|
||||
"packageManager": "yarn@4.9.1",
|
||||
"packageManager": "pnpm@10.27.0",
|
||||
"lint-staged": {
|
||||
"*.{js,jsx,ts,tsx,cjs,mjs,cts,mts}": [
|
||||
"biome format --write --no-errors-on-unmatched",
|
||||
@ -429,5 +484,27 @@
|
||||
"*.{json,yml,yaml,css,html}": [
|
||||
"biome format --write --no-errors-on-unmatched"
|
||||
]
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@img/sharp-darwin-arm64": "0.34.3",
|
||||
"@img/sharp-darwin-x64": "0.34.3",
|
||||
"@img/sharp-libvips-darwin-arm64": "1.2.0",
|
||||
"@img/sharp-libvips-darwin-x64": "1.2.0",
|
||||
"@img/sharp-libvips-linux-arm64": "1.2.0",
|
||||
"@img/sharp-libvips-linux-x64": "1.2.0",
|
||||
"@img/sharp-linux-arm64": "0.34.3",
|
||||
"@img/sharp-linux-x64": "0.34.3",
|
||||
"@img/sharp-win32-arm64": "0.34.3",
|
||||
"@img/sharp-win32-x64": "0.34.3",
|
||||
"@libsql/darwin-arm64": "0.4.7",
|
||||
"@libsql/darwin-x64": "0.4.7",
|
||||
"@libsql/linux-arm64-gnu": "0.4.7",
|
||||
"@libsql/linux-x64-gnu": "0.4.7",
|
||||
"@libsql/win32-x64-msvc": "0.4.7",
|
||||
"@napi-rs/system-ocr-darwin-arm64": "1.0.2",
|
||||
"@napi-rs/system-ocr-darwin-x64": "1.0.2",
|
||||
"@napi-rs/system-ocr-win32-arm64-msvc": "1.0.2",
|
||||
"@napi-rs/system-ocr-win32-x64-msvc": "1.0.2",
|
||||
"@strongtz/win32-arm64-msvc": "0.4.7"
|
||||
}
|
||||
}
|
||||
|
||||
@ -8,7 +8,7 @@ It exposes the CherryIN OpenAI-compatible entrypoints and dynamically routes Ant
|
||||
```bash
|
||||
npm install ai @cherrystudio/ai-sdk-provider @ai-sdk/anthropic @ai-sdk/google @ai-sdk/openai
|
||||
# or
|
||||
yarn add ai @cherrystudio/ai-sdk-provider @ai-sdk/anthropic @ai-sdk/google @ai-sdk/openai
|
||||
pnpm add ai @cherrystudio/ai-sdk-provider @ai-sdk/anthropic @ai-sdk/google @ai-sdk/openai
|
||||
```
|
||||
|
||||
> **Note**: This package requires peer dependencies `ai`, `@ai-sdk/anthropic`, `@ai-sdk/google`, and `@ai-sdk/openai` to be installed.
|
||||
|
||||
@ -41,7 +41,7 @@
|
||||
"ai": "^5.0.26"
|
||||
},
|
||||
"dependencies": {
|
||||
"@ai-sdk/openai-compatible": "patch:@ai-sdk/openai-compatible@npm%3A1.0.28#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.28-5705188855.patch",
|
||||
"@ai-sdk/openai-compatible": "1.0.28",
|
||||
"@ai-sdk/provider": "^2.0.0",
|
||||
"@ai-sdk/provider-utils": "^3.0.17"
|
||||
},
|
||||
|
||||
@ -42,7 +42,7 @@
|
||||
"@ai-sdk/anthropic": "^2.0.49",
|
||||
"@ai-sdk/azure": "^2.0.87",
|
||||
"@ai-sdk/deepseek": "^1.0.31",
|
||||
"@ai-sdk/openai-compatible": "patch:@ai-sdk/openai-compatible@npm%3A1.0.28#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.28-5705188855.patch",
|
||||
"@ai-sdk/openai-compatible": "1.0.28",
|
||||
"@ai-sdk/provider": "^2.0.0",
|
||||
"@ai-sdk/provider-utils": "^3.0.17",
|
||||
"@ai-sdk/xai": "^2.0.36",
|
||||
|
||||
@ -21,9 +21,6 @@ const TOOL_USE_TAG_CONFIG: TagConfig = {
|
||||
separator: '\n'
|
||||
}
|
||||
|
||||
/**
|
||||
* 默认系统提示符模板
|
||||
*/
|
||||
export const DEFAULT_SYSTEM_PROMPT = `In this environment you have access to a set of tools you can use to answer the user's question. \
|
||||
You can use one or more tools per message, and will receive the result of that tool use in the user's response. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
|
||||
|
||||
@ -38,10 +35,16 @@ Tool use is formatted using XML-style tags. The tool name is enclosed in opening
|
||||
|
||||
The tool name should be the exact name of the tool you are using, and the arguments should be a JSON object containing the parameters required by that tool. For example:
|
||||
<tool_use>
|
||||
<name>python_interpreter</name>
|
||||
<arguments>{"code": "5 + 3 + 1294.678"}</arguments>
|
||||
<name>search</name>
|
||||
<arguments>{ "query": "browser,fetch" }</arguments>
|
||||
</tool_use>
|
||||
|
||||
<tool_use>
|
||||
<name>exec</name>
|
||||
<arguments>{ "code": "const page = await CherryBrowser_fetch({ url: "https://example.com" })\nreturn page" }</arguments>
|
||||
</tool_use>
|
||||
|
||||
|
||||
The user will respond with the result of the tool use, which should be formatted as follows:
|
||||
|
||||
<tool_use_result>
|
||||
@ -59,13 +62,6 @@ For example, if the result of the tool use is an image file, you can use it in t
|
||||
|
||||
Always adhere to this format for the tool use to ensure proper parsing and execution.
|
||||
|
||||
## Tool Use Examples
|
||||
{{ TOOL_USE_EXAMPLES }}
|
||||
|
||||
## Tool Use Available Tools
|
||||
Above example were using notional tools that might not exist for you. You only have access to these tools:
|
||||
{{ AVAILABLE_TOOLS }}
|
||||
|
||||
## Tool Use Rules
|
||||
Here are the rules you should always follow to solve your task:
|
||||
1. Always use the right arguments for the tools. Never use variable names as the action arguments, use the value instead.
|
||||
@ -74,6 +70,8 @@ Here are the rules you should always follow to solve your task:
|
||||
4. Never re-do a tool call that you previously did with the exact same parameters.
|
||||
5. For tool use, MAKE SURE use XML tag format as shown in the examples above. Do not use any other format.
|
||||
|
||||
{{ TOOLS_INFO }}
|
||||
|
||||
## Response rules
|
||||
|
||||
Respond in the language of the user's query, unless the user instructions specify additional requirements for the language to be used.
|
||||
@ -154,7 +152,8 @@ User: <tool_use_result>
|
||||
<name>search</name>
|
||||
<result>26 million (2019)</result>
|
||||
</tool_use_result>
|
||||
Assistant: The population of Shanghai is 26 million, while Guangzhou has a population of 15 million. Therefore, Shanghai has the highest population.`
|
||||
|
||||
A: The population of Shanghai is 26 million, while Guangzhou has a population of 15 million. Therefore, Shanghai has the highest population.`
|
||||
|
||||
/**
|
||||
* 构建可用工具部分(提取自 Cherry Studio)
|
||||
@ -184,13 +183,30 @@ ${result}
|
||||
/**
|
||||
* 默认的系统提示符构建函数(提取自 Cherry Studio)
|
||||
*/
|
||||
function defaultBuildSystemPrompt(userSystemPrompt: string, tools: ToolSet): string {
|
||||
function defaultBuildSystemPrompt(userSystemPrompt: string, tools: ToolSet, mcpMode?: string): string {
|
||||
const availableTools = buildAvailableTools(tools)
|
||||
if (availableTools === null) return userSystemPrompt
|
||||
|
||||
const fullPrompt = DEFAULT_SYSTEM_PROMPT.replace('{{ TOOL_USE_EXAMPLES }}', DEFAULT_TOOL_USE_EXAMPLES)
|
||||
if (mcpMode == 'auto') {
|
||||
return DEFAULT_SYSTEM_PROMPT.replace('{{ TOOLS_INFO }}', '').replace(
|
||||
'{{ USER_SYSTEM_PROMPT }}',
|
||||
userSystemPrompt || ''
|
||||
)
|
||||
}
|
||||
const toolsInfo = `
|
||||
## Tool Use Examples
|
||||
{{ TOOL_USE_EXAMPLES }}
|
||||
|
||||
## Tool Use Available Tools
|
||||
Above example were using notional tools that might not exist for you. You only have access to these tools:
|
||||
{{ AVAILABLE_TOOLS }}`
|
||||
.replace('{{ TOOL_USE_EXAMPLES }}', DEFAULT_TOOL_USE_EXAMPLES)
|
||||
.replace('{{ AVAILABLE_TOOLS }}', availableTools)
|
||||
.replace('{{ USER_SYSTEM_PROMPT }}', userSystemPrompt || '')
|
||||
|
||||
const fullPrompt = DEFAULT_SYSTEM_PROMPT.replace('{{ TOOLS_INFO }}', toolsInfo).replace(
|
||||
'{{ USER_SYSTEM_PROMPT }}',
|
||||
userSystemPrompt || ''
|
||||
)
|
||||
|
||||
return fullPrompt
|
||||
}
|
||||
@ -223,7 +239,17 @@ function defaultParseToolUse(content: string, tools: ToolSet): { results: ToolUs
|
||||
// Find all tool use blocks
|
||||
while ((match = toolUsePattern.exec(contentToProcess)) !== null) {
|
||||
const fullMatch = match[0]
|
||||
const toolName = match[2].trim()
|
||||
let toolName = match[2].trim()
|
||||
switch (toolName.toLowerCase()) {
|
||||
case 'search':
|
||||
toolName = 'mcp__CherryHub__search'
|
||||
break
|
||||
case 'exec':
|
||||
toolName = 'mcp__CherryHub__exec'
|
||||
break
|
||||
default:
|
||||
break
|
||||
}
|
||||
const toolArgs = match[4].trim()
|
||||
|
||||
// Try to parse the arguments as JSON
|
||||
@ -255,7 +281,12 @@ function defaultParseToolUse(content: string, tools: ToolSet): { results: ToolUs
|
||||
}
|
||||
|
||||
export const createPromptToolUsePlugin = (config: PromptToolUseConfig = {}) => {
|
||||
const { enabled = true, buildSystemPrompt = defaultBuildSystemPrompt, parseToolUse = defaultParseToolUse } = config
|
||||
const {
|
||||
enabled = true,
|
||||
buildSystemPrompt = defaultBuildSystemPrompt,
|
||||
parseToolUse = defaultParseToolUse,
|
||||
mcpMode
|
||||
} = config
|
||||
|
||||
return definePlugin({
|
||||
name: 'built-in:prompt-tool-use',
|
||||
@ -285,7 +316,7 @@ export const createPromptToolUsePlugin = (config: PromptToolUseConfig = {}) => {
|
||||
|
||||
// 构建系统提示符(只包含非 provider-defined 工具)
|
||||
const userSystemPrompt = typeof params.system === 'string' ? params.system : ''
|
||||
const systemPrompt = buildSystemPrompt(userSystemPrompt, promptTools)
|
||||
const systemPrompt = buildSystemPrompt(userSystemPrompt, promptTools, mcpMode)
|
||||
let systemMessage: string | null = systemPrompt
|
||||
if (config.createSystemMessage) {
|
||||
// 🎯 如果用户提供了自定义处理函数,使用它
|
||||
|
||||
@ -23,6 +23,7 @@ export interface PromptToolUseConfig extends BaseToolUsePluginConfig {
|
||||
// 自定义工具解析函数(可选,有默认实现)
|
||||
parseToolUse?: (content: string, tools: ToolSet) => { results: ToolUseResult[]; content: string }
|
||||
createSystemMessage?: (systemPrompt: string, originalParams: any, context: AiRequestContext) => string | null
|
||||
mcpMode?: string
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@ -68,8 +68,8 @@
|
||||
],
|
||||
"devDependencies": {
|
||||
"@biomejs/biome": "2.2.4",
|
||||
"@tiptap/core": "^3.2.0",
|
||||
"@tiptap/pm": "^3.2.0",
|
||||
"@tiptap/core": "3.2.0",
|
||||
"@tiptap/pm": "3.2.0",
|
||||
"eslint": "^9.22.0",
|
||||
"eslint-plugin-react-hooks": "^5.2.0",
|
||||
"eslint-plugin-simple-import-sort": "^12.1.1",
|
||||
@ -89,5 +89,5 @@
|
||||
"build": "tsdown",
|
||||
"lint": "biome format ./src/ --write && eslint --fix ./src/"
|
||||
},
|
||||
"packageManager": "yarn@4.9.1"
|
||||
"packageManager": "pnpm@10.27.0"
|
||||
}
|
||||
|
||||
@ -364,6 +364,7 @@ export enum IpcChannel {
|
||||
OCR_ListProviders = 'ocr:list-providers',
|
||||
|
||||
// OVMS
|
||||
Ovms_IsSupported = 'ovms:is-supported',
|
||||
Ovms_AddModel = 'ovms:add-model',
|
||||
Ovms_StopAddModel = 'ovms:stop-addmodel',
|
||||
Ovms_GetModels = 'ovms:get-models',
|
||||
|
||||
138
packages/shared/__tests__/utils.test.ts
Normal file
138
packages/shared/__tests__/utils.test.ts
Normal file
@ -0,0 +1,138 @@
|
||||
import { describe, expect, it } from 'vitest'
|
||||
|
||||
import { isBase64ImageDataUrl, isDataUrl, parseDataUrl } from '../utils'
|
||||
|
||||
describe('parseDataUrl', () => {
|
||||
it('parses a standard base64 image data URL', () => {
|
||||
const result = parseDataUrl('data:image/png;base64,iVBORw0KGgo=')
|
||||
expect(result).toEqual({
|
||||
mediaType: 'image/png',
|
||||
isBase64: true,
|
||||
data: 'iVBORw0KGgo='
|
||||
})
|
||||
})
|
||||
|
||||
it('parses a base64 data URL with additional parameters', () => {
|
||||
const result = parseDataUrl('data:image/jpeg;name=foo;base64,/9j/4AAQ')
|
||||
expect(result).toEqual({
|
||||
mediaType: 'image/jpeg',
|
||||
isBase64: true,
|
||||
data: '/9j/4AAQ'
|
||||
})
|
||||
})
|
||||
|
||||
it('parses a plain text data URL (non-base64)', () => {
|
||||
const result = parseDataUrl('data:text/plain,Hello%20World')
|
||||
expect(result).toEqual({
|
||||
mediaType: 'text/plain',
|
||||
isBase64: false,
|
||||
data: 'Hello%20World'
|
||||
})
|
||||
})
|
||||
|
||||
it('parses a data URL with empty media type', () => {
|
||||
const result = parseDataUrl('data:;base64,SGVsbG8=')
|
||||
expect(result).toEqual({
|
||||
mediaType: undefined,
|
||||
isBase64: true,
|
||||
data: 'SGVsbG8='
|
||||
})
|
||||
})
|
||||
|
||||
it('returns null for non-data URLs', () => {
|
||||
const result = parseDataUrl('https://example.com/image.png')
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
|
||||
it('returns null for malformed data URL without comma', () => {
|
||||
const result = parseDataUrl('data:image/png;base64')
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
|
||||
it('handles empty string', () => {
|
||||
const result = parseDataUrl('')
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
|
||||
it('handles large base64 data without performance issues', () => {
|
||||
// Simulate a 4K image base64 string (about 1MB)
|
||||
const largeData = 'A'.repeat(1024 * 1024)
|
||||
const dataUrl = `data:image/png;base64,${largeData}`
|
||||
|
||||
const start = performance.now()
|
||||
const result = parseDataUrl(dataUrl)
|
||||
const duration = performance.now() - start
|
||||
|
||||
expect(result).not.toBeNull()
|
||||
expect(result?.mediaType).toBe('image/png')
|
||||
expect(result?.isBase64).toBe(true)
|
||||
expect(result?.data).toBe(largeData)
|
||||
// Should complete in under 10ms (string operations are fast)
|
||||
expect(duration).toBeLessThan(10)
|
||||
})
|
||||
|
||||
it('parses SVG data URL', () => {
|
||||
const result = parseDataUrl('data:image/svg+xml;base64,PHN2Zz4=')
|
||||
expect(result).toEqual({
|
||||
mediaType: 'image/svg+xml',
|
||||
isBase64: true,
|
||||
data: 'PHN2Zz4='
|
||||
})
|
||||
})
|
||||
|
||||
it('parses JSON data URL', () => {
|
||||
const result = parseDataUrl('data:application/json,{"key":"value"}')
|
||||
expect(result).toEqual({
|
||||
mediaType: 'application/json',
|
||||
isBase64: false,
|
||||
data: '{"key":"value"}'
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('isDataUrl', () => {
|
||||
it('returns true for valid data URLs', () => {
|
||||
expect(isDataUrl('data:image/png;base64,ABC')).toBe(true)
|
||||
expect(isDataUrl('data:text/plain,hello')).toBe(true)
|
||||
expect(isDataUrl('data:,simple')).toBe(true)
|
||||
})
|
||||
|
||||
it('returns false for non-data URLs', () => {
|
||||
expect(isDataUrl('https://example.com')).toBe(false)
|
||||
expect(isDataUrl('file:///path/to/file')).toBe(false)
|
||||
expect(isDataUrl('')).toBe(false)
|
||||
})
|
||||
|
||||
it('returns false for malformed data URLs', () => {
|
||||
expect(isDataUrl('data:')).toBe(false)
|
||||
expect(isDataUrl('data:image/png')).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('isBase64ImageDataUrl', () => {
|
||||
it('returns true for base64 image data URLs', () => {
|
||||
expect(isBase64ImageDataUrl('data:image/png;base64,ABC')).toBe(true)
|
||||
expect(isBase64ImageDataUrl('data:image/jpeg;base64,/9j/')).toBe(true)
|
||||
expect(isBase64ImageDataUrl('data:image/gif;base64,R0lG')).toBe(true)
|
||||
expect(isBase64ImageDataUrl('data:image/webp;base64,UklG')).toBe(true)
|
||||
})
|
||||
|
||||
it('returns false for non-base64 image data URLs', () => {
|
||||
expect(isBase64ImageDataUrl('data:image/svg+xml,<svg></svg>')).toBe(false)
|
||||
})
|
||||
|
||||
it('returns false for non-image data URLs', () => {
|
||||
expect(isBase64ImageDataUrl('data:text/plain;base64,SGVsbG8=')).toBe(false)
|
||||
expect(isBase64ImageDataUrl('data:application/json,{}')).toBe(false)
|
||||
})
|
||||
|
||||
it('returns false for regular URLs', () => {
|
||||
expect(isBase64ImageDataUrl('https://example.com/image.png')).toBe(false)
|
||||
expect(isBase64ImageDataUrl('file:///image.png')).toBe(false)
|
||||
})
|
||||
|
||||
it('returns false for malformed data URLs', () => {
|
||||
expect(isBase64ImageDataUrl('data:image/png')).toBe(false)
|
||||
expect(isBase64ImageDataUrl('')).toBe(false)
|
||||
})
|
||||
})
|
||||
@ -4,7 +4,7 @@
|
||||
*
|
||||
* ⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️
|
||||
* THIS FILE IS AUTOMATICALLY GENERATED BY A SCRIPT. DO NOT EDIT IT MANUALLY!
|
||||
* Run `yarn update:languages` to update this file.
|
||||
* Run `pnpm update:languages` to update this file.
|
||||
* ⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️
|
||||
*
|
||||
*/
|
||||
|
||||
116
packages/shared/mcp.ts
Normal file
116
packages/shared/mcp.ts
Normal file
@ -0,0 +1,116 @@
|
||||
/**
|
||||
* Convert a string to camelCase, ensuring it's a valid JavaScript identifier.
|
||||
*
|
||||
* - Normalizes to lowercase first, then capitalizes word boundaries
|
||||
* - Non-alphanumeric characters are treated as word separators
|
||||
* - Non-ASCII characters are dropped (ASCII-only output)
|
||||
* - If result starts with a digit, prefixes with underscore
|
||||
*
|
||||
* @example
|
||||
* toCamelCase('my-server') // 'myServer'
|
||||
* toCamelCase('MY_SERVER') // 'myServer'
|
||||
* toCamelCase('123tool') // '_123tool'
|
||||
*/
|
||||
export function toCamelCase(str: string): string {
|
||||
let result = str
|
||||
.trim()
|
||||
.toLowerCase()
|
||||
.replace(/[^a-z0-9]+(.)/g, (_, char) => char.toUpperCase())
|
||||
.replace(/[^a-zA-Z0-9]/g, '')
|
||||
|
||||
if (result && !/^[a-zA-Z_]/.test(result)) {
|
||||
result = '_' + result
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
export type McpToolNameOptions = {
|
||||
/** Prefix added before the name (e.g., 'mcp__'). Must be JS-identifier-safe. */
|
||||
prefix?: string
|
||||
/** Delimiter between server and tool parts (e.g., '_' or '__'). Must be JS-identifier-safe. */
|
||||
delimiter?: string
|
||||
/** Maximum length of the final name. Suffix numbers for uniqueness are included in this limit. */
|
||||
maxLength?: number
|
||||
/** Mutable Set for collision detection. The final name will be added to this Set. */
|
||||
existingNames?: Set<string>
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a valid JavaScript function name from server and tool names.
|
||||
* Uses camelCase for both parts.
|
||||
*
|
||||
* @param serverName - The MCP server name (optional)
|
||||
* @param toolName - The tool name
|
||||
* @param options - Configuration options
|
||||
* @returns A valid JS identifier
|
||||
*/
|
||||
export function buildMcpToolName(
|
||||
serverName: string | undefined,
|
||||
toolName: string,
|
||||
options: McpToolNameOptions = {}
|
||||
): string {
|
||||
const { prefix = '', delimiter = '_', maxLength, existingNames } = options
|
||||
|
||||
const serverPart = serverName ? toCamelCase(serverName) : ''
|
||||
const toolPart = toCamelCase(toolName)
|
||||
const baseName = serverPart ? `${prefix}${serverPart}${delimiter}${toolPart}` : `${prefix}${toolPart}`
|
||||
|
||||
if (!existingNames) {
|
||||
return maxLength ? truncateToLength(baseName, maxLength) : baseName
|
||||
}
|
||||
|
||||
let name = maxLength ? truncateToLength(baseName, maxLength) : baseName
|
||||
let counter = 1
|
||||
|
||||
while (existingNames.has(name)) {
|
||||
const suffix = String(counter)
|
||||
const truncatedBase = maxLength ? truncateToLength(baseName, maxLength - suffix.length) : baseName
|
||||
name = `${truncatedBase}${suffix}`
|
||||
counter++
|
||||
}
|
||||
|
||||
existingNames.add(name)
|
||||
return name
|
||||
}
|
||||
|
||||
function truncateToLength(str: string, maxLength: number): string {
|
||||
if (str.length <= maxLength) {
|
||||
return str
|
||||
}
|
||||
return str.slice(0, maxLength).replace(/_+$/, '')
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a unique function name from server name and tool name.
|
||||
* Format: serverName_toolName (camelCase)
|
||||
*
|
||||
* @example
|
||||
* generateMcpToolFunctionName('github', 'search_issues') // 'github_searchIssues'
|
||||
*/
|
||||
export function generateMcpToolFunctionName(
|
||||
serverName: string | undefined,
|
||||
toolName: string,
|
||||
existingNames?: Set<string>
|
||||
): string {
|
||||
return buildMcpToolName(serverName, toolName, { existingNames })
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds a valid JavaScript function name for MCP tool calls.
|
||||
* Format: mcp__{serverName}__{toolName}
|
||||
*
|
||||
* @param serverName - The MCP server name
|
||||
* @param toolName - The tool name from the server
|
||||
* @returns A valid JS identifier in format mcp__{server}__{tool}, max 63 chars
|
||||
*
|
||||
* @example
|
||||
* buildFunctionCallToolName('github', 'search_issues') // 'mcp__github__searchIssues'
|
||||
*/
|
||||
export function buildFunctionCallToolName(serverName: string, toolName: string): string {
|
||||
return buildMcpToolName(serverName, toolName, {
|
||||
prefix: 'mcp__',
|
||||
delimiter: '__',
|
||||
maxLength: 63
|
||||
})
|
||||
}
|
||||
@ -35,3 +35,134 @@ export const defaultAppHeaders = () => {
|
||||
// return value
|
||||
// }
|
||||
// }
|
||||
|
||||
/**
|
||||
* Extracts the trailing API version segment from a URL path.
|
||||
*
|
||||
* This function extracts API version patterns (e.g., `v1`, `v2beta`) from the end of a URL.
|
||||
* Only versions at the end of the path are extracted, not versions in the middle.
|
||||
* The returned version string does not include leading or trailing slashes.
|
||||
*
|
||||
* @param {string} url - The URL string to parse.
|
||||
* @returns {string | undefined} The trailing API version found (e.g., 'v1', 'v2beta'), or undefined if none found.
|
||||
*
|
||||
* @example
|
||||
* getTrailingApiVersion('https://api.example.com/v1') // 'v1'
|
||||
* getTrailingApiVersion('https://api.example.com/v2beta/') // 'v2beta'
|
||||
* getTrailingApiVersion('https://api.example.com/v1/chat') // undefined (version not at end)
|
||||
* getTrailingApiVersion('https://gateway.ai.cloudflare.com/v1/xxx/v1beta') // 'v1beta'
|
||||
* getTrailingApiVersion('https://api.example.com') // undefined
|
||||
*/
|
||||
export function getTrailingApiVersion(url: string): string | undefined {
|
||||
const match = url.match(TRAILING_VERSION_REGEX)
|
||||
|
||||
if (match) {
|
||||
// Extract version without leading slash and trailing slash
|
||||
return match[0].replace(/^\//, '').replace(/\/$/, '')
|
||||
}
|
||||
|
||||
return undefined
|
||||
}
|
||||
|
||||
/**
|
||||
* Matches an API version at the end of a URL (with optional trailing slash).
|
||||
* Used to detect and extract versions only from the trailing position.
|
||||
*/
|
||||
const TRAILING_VERSION_REGEX = /\/v\d+(?:alpha|beta)?\/?$/i
|
||||
|
||||
/**
|
||||
* Removes the trailing API version segment from a URL path.
|
||||
*
|
||||
* This function removes API version patterns (e.g., `/v1`, `/v2beta`) from the end of a URL.
|
||||
* Only versions at the end of the path are removed, not versions in the middle.
|
||||
*
|
||||
* @param {string} url - The URL string to process.
|
||||
* @returns {string} The URL with the trailing API version removed, or the original URL if no trailing version found.
|
||||
*
|
||||
* @example
|
||||
* withoutTrailingApiVersion('https://api.example.com/v1') // 'https://api.example.com'
|
||||
* withoutTrailingApiVersion('https://api.example.com/v2beta/') // 'https://api.example.com'
|
||||
* withoutTrailingApiVersion('https://api.example.com/v1/chat') // 'https://api.example.com/v1/chat' (no change)
|
||||
* withoutTrailingApiVersion('https://api.example.com') // 'https://api.example.com'
|
||||
*/
|
||||
export function withoutTrailingApiVersion(url: string): string {
|
||||
return url.replace(TRAILING_VERSION_REGEX, '')
|
||||
}
|
||||
|
||||
export interface DataUrlParts {
|
||||
/** The media type (e.g., 'image/png', 'text/plain') */
|
||||
mediaType?: string
|
||||
/** Whether the data is base64 encoded */
|
||||
isBase64: boolean
|
||||
/** The data portion (everything after the comma). This is the raw string, not decoded. */
|
||||
data: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a data URL into its component parts without using regex on the data portion.
|
||||
* This is memory-safe for large data URLs (e.g., 4K images) as it uses indexOf instead of regex.
|
||||
*
|
||||
* Data URL format: data:[<mediatype>][;base64],<data>
|
||||
*
|
||||
* @param url - The data URL string to parse
|
||||
* @returns DataUrlParts if valid, null if invalid
|
||||
*
|
||||
* @example
|
||||
* parseDataUrl('data:image/png;base64,iVBORw0KGgo...')
|
||||
* // { mediaType: 'image/png', isBase64: true, data: 'iVBORw0KGgo...' }
|
||||
*
|
||||
* parseDataUrl('data:text/plain,Hello')
|
||||
* // { mediaType: 'text/plain', isBase64: false, data: 'Hello' }
|
||||
*
|
||||
* parseDataUrl('invalid-url')
|
||||
* // null
|
||||
*/
|
||||
export function parseDataUrl(url: string): DataUrlParts | null {
|
||||
if (!url.startsWith('data:')) {
|
||||
return null
|
||||
}
|
||||
|
||||
const commaIndex = url.indexOf(',')
|
||||
if (commaIndex === -1) {
|
||||
return null
|
||||
}
|
||||
|
||||
const header = url.slice(5, commaIndex)
|
||||
|
||||
const isBase64 = header.includes(';base64')
|
||||
|
||||
const semicolonIndex = header.indexOf(';')
|
||||
const mediaType = (semicolonIndex === -1 ? header : header.slice(0, semicolonIndex)).trim() || undefined
|
||||
|
||||
const data = url.slice(commaIndex + 1)
|
||||
|
||||
return { mediaType, isBase64, data }
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if a string is a data URL.
|
||||
*
|
||||
* @param url - The string to check
|
||||
* @returns true if the string is a valid data URL
|
||||
*/
|
||||
export function isDataUrl(url: string): boolean {
|
||||
return url.startsWith('data:') && url.includes(',')
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if a data URL contains base64-encoded image data.
|
||||
*
|
||||
* @param url - The data URL to check
|
||||
* @returns true if the URL is a base64-encoded image data URL
|
||||
*/
|
||||
export function isBase64ImageDataUrl(url: string): boolean {
|
||||
if (!url.startsWith('data:image/')) {
|
||||
return false
|
||||
}
|
||||
const commaIndex = url.indexOf(',')
|
||||
if (commaIndex === -1) {
|
||||
return false
|
||||
}
|
||||
const header = url.slice(5, commaIndex)
|
||||
return header.includes(';base64')
|
||||
}
|
||||
|
||||
@ -11,7 +11,7 @@ index 48e2f6263c6ee4c75d7e5c28733e64f6ebe92200..00d0729c4a3cbf9a48e8e1e962c7e2b2
|
||||
type OpenAICompatibleProviderOptions = z.infer<typeof openaiCompatibleProviderOptions>;
|
||||
|
||||
diff --git a/dist/index.js b/dist/index.js
|
||||
index da237bb35b7fa8e24b37cd861ee73dfc51cdfc72..b3060fbaf010e30b64df55302807828e5bfe0f9a 100644
|
||||
index da237bb35b7fa8e24b37cd861ee73dfc51cdfc72..88349c614a69a268a2e4f3b157cb5e328ca1d347 100644
|
||||
--- a/dist/index.js
|
||||
+++ b/dist/index.js
|
||||
@@ -41,7 +41,7 @@ function getOpenAIMetadata(message) {
|
||||
@ -48,21 +48,42 @@ index da237bb35b7fa8e24b37cd861ee73dfc51cdfc72..b3060fbaf010e30b64df55302807828e
|
||||
messages.push({
|
||||
role: "assistant",
|
||||
content: text,
|
||||
+ reasoning_content: reasoning_text ?? undefined,
|
||||
+ reasoning_content: reasoning_text || undefined,
|
||||
tool_calls: toolCalls.length > 0 ? toolCalls : void 0,
|
||||
...metadata
|
||||
});
|
||||
@@ -200,7 +208,8 @@ var openaiCompatibleProviderOptions = import_v4.z.object({
|
||||
@@ -200,7 +208,9 @@ var openaiCompatibleProviderOptions = import_v4.z.object({
|
||||
/**
|
||||
* Controls the verbosity of the generated text. Defaults to `medium`.
|
||||
*/
|
||||
- textVerbosity: import_v4.z.string().optional()
|
||||
+ textVerbosity: import_v4.z.string().optional(),
|
||||
+ sendReasoning: import_v4.z.boolean().optional()
|
||||
+ sendReasoning: import_v4.z.boolean().optional(),
|
||||
+ strictJsonSchema: z.boolean().optional()
|
||||
});
|
||||
|
||||
// src/openai-compatible-error.ts
|
||||
@@ -378,7 +387,7 @@ var OpenAICompatibleChatLanguageModel = class {
|
||||
@@ -225,7 +235,8 @@ var defaultOpenAICompatibleErrorStructure = {
|
||||
var import_provider2 = require("@ai-sdk/provider");
|
||||
function prepareTools({
|
||||
tools,
|
||||
- toolChoice
|
||||
+ toolChoice,
|
||||
+ strictJsonSchema
|
||||
}) {
|
||||
tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
|
||||
const toolWarnings = [];
|
||||
@@ -242,7 +253,8 @@ function prepareTools({
|
||||
function: {
|
||||
name: tool.name,
|
||||
description: tool.description,
|
||||
- parameters: tool.inputSchema
|
||||
+ parameters: tool.inputSchema,
|
||||
+ strict: strictJsonSchema
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -378,7 +390,7 @@ var OpenAICompatibleChatLanguageModel = class {
|
||||
reasoning_effort: compatibleOptions.reasoningEffort,
|
||||
verbosity: compatibleOptions.textVerbosity,
|
||||
// messages:
|
||||
@ -71,7 +92,7 @@ index da237bb35b7fa8e24b37cd861ee73dfc51cdfc72..b3060fbaf010e30b64df55302807828e
|
||||
// tools:
|
||||
tools: openaiTools,
|
||||
tool_choice: openaiToolChoice
|
||||
@@ -421,6 +430,17 @@ var OpenAICompatibleChatLanguageModel = class {
|
||||
@@ -421,6 +433,17 @@ var OpenAICompatibleChatLanguageModel = class {
|
||||
text: reasoning
|
||||
});
|
||||
}
|
||||
@ -89,7 +110,7 @@ index da237bb35b7fa8e24b37cd861ee73dfc51cdfc72..b3060fbaf010e30b64df55302807828e
|
||||
if (choice.message.tool_calls != null) {
|
||||
for (const toolCall of choice.message.tool_calls) {
|
||||
content.push({
|
||||
@@ -598,6 +618,17 @@ var OpenAICompatibleChatLanguageModel = class {
|
||||
@@ -598,6 +621,17 @@ var OpenAICompatibleChatLanguageModel = class {
|
||||
delta: delta.content
|
||||
});
|
||||
}
|
||||
@ -107,7 +128,7 @@ index da237bb35b7fa8e24b37cd861ee73dfc51cdfc72..b3060fbaf010e30b64df55302807828e
|
||||
if (delta.tool_calls != null) {
|
||||
for (const toolCallDelta of delta.tool_calls) {
|
||||
const index = toolCallDelta.index;
|
||||
@@ -765,6 +796,14 @@ var OpenAICompatibleChatResponseSchema = import_v43.z.object({
|
||||
@@ -765,6 +799,14 @@ var OpenAICompatibleChatResponseSchema = import_v43.z.object({
|
||||
arguments: import_v43.z.string()
|
||||
})
|
||||
})
|
||||
@ -122,7 +143,7 @@ index da237bb35b7fa8e24b37cd861ee73dfc51cdfc72..b3060fbaf010e30b64df55302807828e
|
||||
).nullish()
|
||||
}),
|
||||
finish_reason: import_v43.z.string().nullish()
|
||||
@@ -795,6 +834,14 @@ var createOpenAICompatibleChatChunkSchema = (errorSchema) => import_v43.z.union(
|
||||
@@ -795,6 +837,14 @@ var createOpenAICompatibleChatChunkSchema = (errorSchema) => import_v43.z.union(
|
||||
arguments: import_v43.z.string().nullish()
|
||||
})
|
||||
})
|
||||
@ -138,7 +159,7 @@ index da237bb35b7fa8e24b37cd861ee73dfc51cdfc72..b3060fbaf010e30b64df55302807828e
|
||||
}).nullish(),
|
||||
finish_reason: import_v43.z.string().nullish()
|
||||
diff --git a/dist/index.mjs b/dist/index.mjs
|
||||
index a809a7aa0e148bfd43e01dd7b018568b151c8ad5..565b605eeacd9830b2b0e817e58ad0c5700264de 100644
|
||||
index a809a7aa0e148bfd43e01dd7b018568b151c8ad5..fca65c04000ce4c01fb90e93326ac179c2378055 100644
|
||||
--- a/dist/index.mjs
|
||||
+++ b/dist/index.mjs
|
||||
@@ -23,7 +23,7 @@ function getOpenAIMetadata(message) {
|
||||
@ -175,21 +196,42 @@ index a809a7aa0e148bfd43e01dd7b018568b151c8ad5..565b605eeacd9830b2b0e817e58ad0c5
|
||||
messages.push({
|
||||
role: "assistant",
|
||||
content: text,
|
||||
+ reasoning_content: reasoning_text ?? undefined,
|
||||
+ reasoning_content: reasoning_text || undefined,
|
||||
tool_calls: toolCalls.length > 0 ? toolCalls : void 0,
|
||||
...metadata
|
||||
});
|
||||
@@ -182,7 +190,8 @@ var openaiCompatibleProviderOptions = z.object({
|
||||
@@ -182,7 +190,9 @@ var openaiCompatibleProviderOptions = z.object({
|
||||
/**
|
||||
* Controls the verbosity of the generated text. Defaults to `medium`.
|
||||
*/
|
||||
- textVerbosity: z.string().optional()
|
||||
+ textVerbosity: z.string().optional(),
|
||||
+ sendReasoning: z.boolean().optional()
|
||||
+ sendReasoning: z.boolean().optional(),
|
||||
+ strictJsonSchema: z.boolean().optional()
|
||||
});
|
||||
|
||||
// src/openai-compatible-error.ts
|
||||
@@ -362,7 +371,7 @@ var OpenAICompatibleChatLanguageModel = class {
|
||||
@@ -209,7 +219,8 @@ import {
|
||||
} from "@ai-sdk/provider";
|
||||
function prepareTools({
|
||||
tools,
|
||||
- toolChoice
|
||||
+ toolChoice,
|
||||
+ strictJsonSchema
|
||||
}) {
|
||||
tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
|
||||
const toolWarnings = [];
|
||||
@@ -226,7 +237,8 @@ function prepareTools({
|
||||
function: {
|
||||
name: tool.name,
|
||||
description: tool.description,
|
||||
- parameters: tool.inputSchema
|
||||
+ parameters: tool.inputSchema,
|
||||
+ strict: strictJsonSchema
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -362,7 +374,7 @@ var OpenAICompatibleChatLanguageModel = class {
|
||||
reasoning_effort: compatibleOptions.reasoningEffort,
|
||||
verbosity: compatibleOptions.textVerbosity,
|
||||
// messages:
|
||||
@ -198,7 +240,7 @@ index a809a7aa0e148bfd43e01dd7b018568b151c8ad5..565b605eeacd9830b2b0e817e58ad0c5
|
||||
// tools:
|
||||
tools: openaiTools,
|
||||
tool_choice: openaiToolChoice
|
||||
@@ -405,6 +414,17 @@ var OpenAICompatibleChatLanguageModel = class {
|
||||
@@ -405,6 +417,17 @@ var OpenAICompatibleChatLanguageModel = class {
|
||||
text: reasoning
|
||||
});
|
||||
}
|
||||
@ -216,7 +258,7 @@ index a809a7aa0e148bfd43e01dd7b018568b151c8ad5..565b605eeacd9830b2b0e817e58ad0c5
|
||||
if (choice.message.tool_calls != null) {
|
||||
for (const toolCall of choice.message.tool_calls) {
|
||||
content.push({
|
||||
@@ -582,6 +602,17 @@ var OpenAICompatibleChatLanguageModel = class {
|
||||
@@ -582,6 +605,17 @@ var OpenAICompatibleChatLanguageModel = class {
|
||||
delta: delta.content
|
||||
});
|
||||
}
|
||||
@ -234,7 +276,7 @@ index a809a7aa0e148bfd43e01dd7b018568b151c8ad5..565b605eeacd9830b2b0e817e58ad0c5
|
||||
if (delta.tool_calls != null) {
|
||||
for (const toolCallDelta of delta.tool_calls) {
|
||||
const index = toolCallDelta.index;
|
||||
@@ -749,6 +780,14 @@ var OpenAICompatibleChatResponseSchema = z3.object({
|
||||
@@ -749,6 +783,14 @@ var OpenAICompatibleChatResponseSchema = z3.object({
|
||||
arguments: z3.string()
|
||||
})
|
||||
})
|
||||
@ -249,7 +291,7 @@ index a809a7aa0e148bfd43e01dd7b018568b151c8ad5..565b605eeacd9830b2b0e817e58ad0c5
|
||||
).nullish()
|
||||
}),
|
||||
finish_reason: z3.string().nullish()
|
||||
@@ -779,6 +818,14 @@ var createOpenAICompatibleChatChunkSchema = (errorSchema) => z3.union([
|
||||
@@ -779,6 +821,14 @@ var createOpenAICompatibleChatChunkSchema = (errorSchema) => z3.union([
|
||||
arguments: z3.string().nullish()
|
||||
})
|
||||
})
|
||||
33
patches/@anthropic-ai__claude-agent-sdk@0.1.76.patch
Normal file
33
patches/@anthropic-ai__claude-agent-sdk@0.1.76.patch
Normal file
@ -0,0 +1,33 @@
|
||||
diff --git a/sdk.mjs b/sdk.mjs
|
||||
index 1e1c3e4e3f81db622fb2789d17f3d421f212306e..5d193cdb6a43c7799fd5eff2d8af80827bfbdf1e 100755
|
||||
--- a/sdk.mjs
|
||||
+++ b/sdk.mjs
|
||||
@@ -11985,7 +11985,7 @@ function createAbortController(maxListeners = DEFAULT_MAX_LISTENERS) {
|
||||
}
|
||||
|
||||
// ../src/transport/ProcessTransport.ts
|
||||
-import { spawn } from "child_process";
|
||||
+import { fork } from "child_process";
|
||||
import { createInterface } from "readline";
|
||||
|
||||
// ../src/utils/fsOperations.ts
|
||||
@@ -12999,14 +12999,14 @@ class ProcessTransport {
|
||||
return isRunningWithBun() ? "bun" : "node";
|
||||
}
|
||||
spawnLocalProcess(spawnOptions) {
|
||||
- const { command, args, cwd: cwd2, env, signal } = spawnOptions;
|
||||
+ const { args, cwd: cwd2, env, signal } = spawnOptions;
|
||||
const stderrMode = env.DEBUG_CLAUDE_AGENT_SDK || this.options.stderr ? "pipe" : "ignore";
|
||||
- const childProcess = spawn(command, args, {
|
||||
+ logForSdkDebugging(`Forking Claude Code Node.js process: ${args[0]} ${args.slice(1).join(" ")}`);
|
||||
+ const childProcess = fork(args[0], args.slice(1), {
|
||||
cwd: cwd2,
|
||||
- stdio: ["pipe", "pipe", stderrMode],
|
||||
+ stdio: stderrMode === "pipe" ? ["pipe", "pipe", "pipe", "ipc"] : ["pipe", "pipe", "ignore", "ipc"],
|
||||
signal,
|
||||
- env,
|
||||
- windowsHide: true
|
||||
+ env
|
||||
});
|
||||
if (env.DEBUG_CLAUDE_AGENT_SDK || this.options.stderr) {
|
||||
childProcess.stderr.on("data", (data) => {
|
||||
140
patches/@openrouter__ai-sdk-provider.patch
Normal file
140
patches/@openrouter__ai-sdk-provider.patch
Normal file
@ -0,0 +1,140 @@
|
||||
diff --git a/dist/index.js b/dist/index.js
|
||||
index f33510a50d11a2cb92a90ea70cc0ac84c89f29b9..db0af7e2cc05c47baeb29c0a3974a155316fbd05 100644
|
||||
--- a/dist/index.js
|
||||
+++ b/dist/index.js
|
||||
@@ -1050,7 +1050,8 @@ var OpenRouterProviderMetadataSchema = import_v43.z.object({
|
||||
var OpenRouterProviderOptionsSchema = import_v43.z.object({
|
||||
openrouter: import_v43.z.object({
|
||||
reasoning_details: import_v43.z.array(ReasoningDetailUnionSchema).optional(),
|
||||
- annotations: import_v43.z.array(FileAnnotationSchema).optional()
|
||||
+ annotations: import_v43.z.array(FileAnnotationSchema).optional(),
|
||||
+ strictJsonSchema: import_v43.z.boolean().optional()
|
||||
}).optional()
|
||||
}).optional();
|
||||
|
||||
@@ -1658,7 +1659,8 @@ var OpenRouterChatLanguageModel = class {
|
||||
responseFormat,
|
||||
topK,
|
||||
tools,
|
||||
- toolChoice
|
||||
+ toolChoice,
|
||||
+ providerOptions
|
||||
}) {
|
||||
var _a15;
|
||||
const baseArgs = __spreadValues(__spreadValues({
|
||||
@@ -1712,7 +1714,8 @@ var OpenRouterChatLanguageModel = class {
|
||||
function: {
|
||||
name: tool.name,
|
||||
description: tool.description,
|
||||
- parameters: tool.inputSchema
|
||||
+ parameters: tool.inputSchema,
|
||||
+ strict: providerOptions?.openrouter?.strictJsonSchema
|
||||
}
|
||||
}));
|
||||
return __spreadProps(__spreadValues({}, baseArgs), {
|
||||
@@ -1725,7 +1728,7 @@ var OpenRouterChatLanguageModel = class {
|
||||
async doGenerate(options) {
|
||||
var _a15, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w;
|
||||
const providerOptions = options.providerOptions || {};
|
||||
- const openrouterOptions = providerOptions.openrouter || {};
|
||||
+ const { strictJsonSchema: _strictJsonSchema, ...openrouterOptions } = providerOptions.openrouter || {};
|
||||
const args = __spreadValues(__spreadValues({}, this.getArgs(options)), openrouterOptions);
|
||||
const { value: responseValue, responseHeaders } = await postJsonToApi({
|
||||
url: this.config.url({
|
||||
@@ -1931,7 +1934,7 @@ var OpenRouterChatLanguageModel = class {
|
||||
async doStream(options) {
|
||||
var _a15;
|
||||
const providerOptions = options.providerOptions || {};
|
||||
- const openrouterOptions = providerOptions.openrouter || {};
|
||||
+ const { strictJsonSchema: _strictJsonSchema, ...openrouterOptions } = providerOptions.openrouter || {};
|
||||
const args = __spreadValues(__spreadValues({}, this.getArgs(options)), openrouterOptions);
|
||||
const { value: response, responseHeaders } = await postJsonToApi({
|
||||
url: this.config.url({
|
||||
@@ -2564,7 +2567,7 @@ var OpenRouterCompletionLanguageModel = class {
|
||||
async doGenerate(options) {
|
||||
var _a15, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o;
|
||||
const providerOptions = options.providerOptions || {};
|
||||
- const openrouterOptions = providerOptions.openrouter || {};
|
||||
+ const { strictJsonSchema: _strictJsonSchema, ...openrouterOptions } = providerOptions.openrouter || {};
|
||||
const args = __spreadValues(__spreadValues({}, this.getArgs(options)), openrouterOptions);
|
||||
const { value: response, responseHeaders } = await postJsonToApi({
|
||||
url: this.config.url({
|
||||
@@ -2623,7 +2626,7 @@ var OpenRouterCompletionLanguageModel = class {
|
||||
}
|
||||
async doStream(options) {
|
||||
const providerOptions = options.providerOptions || {};
|
||||
- const openrouterOptions = providerOptions.openrouter || {};
|
||||
+ const { strictJsonSchema: _strictJsonSchema, ...openrouterOptions } = providerOptions.openrouter || {};
|
||||
const args = __spreadValues(__spreadValues({}, this.getArgs(options)), openrouterOptions);
|
||||
const { value: response, responseHeaders } = await postJsonToApi({
|
||||
url: this.config.url({
|
||||
diff --git a/dist/index.mjs b/dist/index.mjs
|
||||
index 8a688331b88b4af738ee4ca8062b5f24124d3d81..a2aa299a44352addc26f8891d839ea31a2150ee2 100644
|
||||
--- a/dist/index.mjs
|
||||
+++ b/dist/index.mjs
|
||||
@@ -1015,7 +1015,8 @@ var OpenRouterProviderMetadataSchema = z3.object({
|
||||
var OpenRouterProviderOptionsSchema = z3.object({
|
||||
openrouter: z3.object({
|
||||
reasoning_details: z3.array(ReasoningDetailUnionSchema).optional(),
|
||||
- annotations: z3.array(FileAnnotationSchema).optional()
|
||||
+ annotations: z3.array(FileAnnotationSchema).optional(),
|
||||
+ strictJsonSchema: z3.boolean().optional()
|
||||
}).optional()
|
||||
}).optional();
|
||||
|
||||
@@ -1623,7 +1624,8 @@ var OpenRouterChatLanguageModel = class {
|
||||
responseFormat,
|
||||
topK,
|
||||
tools,
|
||||
- toolChoice
|
||||
+ toolChoice,
|
||||
+ providerOptions
|
||||
}) {
|
||||
var _a15;
|
||||
const baseArgs = __spreadValues(__spreadValues({
|
||||
@@ -1677,7 +1679,8 @@ var OpenRouterChatLanguageModel = class {
|
||||
function: {
|
||||
name: tool.name,
|
||||
description: tool.description,
|
||||
- parameters: tool.inputSchema
|
||||
+ parameters: tool.inputSchema,
|
||||
+ strict: providerOptions?.openrouter?.strictJsonSchema
|
||||
}
|
||||
}));
|
||||
return __spreadProps(__spreadValues({}, baseArgs), {
|
||||
@@ -1690,7 +1693,7 @@ var OpenRouterChatLanguageModel = class {
|
||||
async doGenerate(options) {
|
||||
var _a15, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w;
|
||||
const providerOptions = options.providerOptions || {};
|
||||
- const openrouterOptions = providerOptions.openrouter || {};
|
||||
+ const { strictJsonSchema: _strictJsonSchema, ...openrouterOptions } = providerOptions.openrouter || {};
|
||||
const args = __spreadValues(__spreadValues({}, this.getArgs(options)), openrouterOptions);
|
||||
const { value: responseValue, responseHeaders } = await postJsonToApi({
|
||||
url: this.config.url({
|
||||
@@ -1896,7 +1899,7 @@ var OpenRouterChatLanguageModel = class {
|
||||
async doStream(options) {
|
||||
var _a15;
|
||||
const providerOptions = options.providerOptions || {};
|
||||
- const openrouterOptions = providerOptions.openrouter || {};
|
||||
+ const { strictJsonSchema: _strictJsonSchema, ...openrouterOptions } = providerOptions.openrouter || {};
|
||||
const args = __spreadValues(__spreadValues({}, this.getArgs(options)), openrouterOptions);
|
||||
const { value: response, responseHeaders } = await postJsonToApi({
|
||||
url: this.config.url({
|
||||
@@ -2529,7 +2532,7 @@ var OpenRouterCompletionLanguageModel = class {
|
||||
async doGenerate(options) {
|
||||
var _a15, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o;
|
||||
const providerOptions = options.providerOptions || {};
|
||||
- const openrouterOptions = providerOptions.openrouter || {};
|
||||
+ const { strictJsonSchema: _strictJsonSchema, ...openrouterOptions } = providerOptions.openrouter || {};
|
||||
const args = __spreadValues(__spreadValues({}, this.getArgs(options)), openrouterOptions);
|
||||
const { value: response, responseHeaders } = await postJsonToApi({
|
||||
url: this.config.url({
|
||||
@@ -2588,7 +2591,7 @@ var OpenRouterCompletionLanguageModel = class {
|
||||
}
|
||||
async doStream(options) {
|
||||
const providerOptions = options.providerOptions || {};
|
||||
- const openrouterOptions = providerOptions.openrouter || {};
|
||||
+ const { strictJsonSchema: _strictJsonSchema, ...openrouterOptions } = providerOptions.openrouter || {};
|
||||
const args = __spreadValues(__spreadValues({}, this.getArgs(options)), openrouterOptions);
|
||||
const { value: response, responseHeaders } = await postJsonToApi({
|
||||
url: this.config.url({
|
||||
@ -7,7 +7,7 @@ index 8dd9b498050dbecd8dd6b901acf1aa8ca38a49af..ed644349c9d38fe2a66b2fb44214f7c1
|
||||
type OllamaChatModelId = "athene-v2" | "athene-v2:72b" | "aya-expanse" | "aya-expanse:8b" | "aya-expanse:32b" | "codegemma" | "codegemma:2b" | "codegemma:7b" | "codellama" | "codellama:7b" | "codellama:13b" | "codellama:34b" | "codellama:70b" | "codellama:code" | "codellama:python" | "command-r" | "command-r:35b" | "command-r-plus" | "command-r-plus:104b" | "command-r7b" | "command-r7b:7b" | "deepseek-r1" | "deepseek-r1:1.5b" | "deepseek-r1:7b" | "deepseek-r1:8b" | "deepseek-r1:14b" | "deepseek-r1:32b" | "deepseek-r1:70b" | "deepseek-r1:671b" | "deepseek-coder-v2" | "deepseek-coder-v2:16b" | "deepseek-coder-v2:236b" | "deepseek-v3" | "deepseek-v3:671b" | "devstral" | "devstral:24b" | "dolphin3" | "dolphin3:8b" | "exaone3.5" | "exaone3.5:2.4b" | "exaone3.5:7.8b" | "exaone3.5:32b" | "falcon2" | "falcon2:11b" | "falcon3" | "falcon3:1b" | "falcon3:3b" | "falcon3:7b" | "falcon3:10b" | "firefunction-v2" | "firefunction-v2:70b" | "gemma" | "gemma:2b" | "gemma:7b" | "gemma2" | "gemma2:2b" | "gemma2:9b" | "gemma2:27b" | "gemma3" | "gemma3:1b" | "gemma3:4b" | "gemma3:12b" | "gemma3:27b" | "granite3-dense" | "granite3-dense:2b" | "granite3-dense:8b" | "granite3-guardian" | "granite3-guardian:2b" | "granite3-guardian:8b" | "granite3-moe" | "granite3-moe:1b" | "granite3-moe:3b" | "granite3.1-dense" | "granite3.1-dense:2b" | "granite3.1-dense:8b" | "granite3.1-moe" | "granite3.1-moe:1b" | "granite3.1-moe:3b" | "llama2" | "llama2:7b" | "llama2:13b" | "llama2:70b" | "llama3" | "llama3:8b" | "llama3:70b" | "llama3-chatqa" | "llama3-chatqa:8b" | "llama3-chatqa:70b" | "llama3-gradient" | "llama3-gradient:8b" | "llama3-gradient:70b" | "llama3.1" | "llama3.1:8b" | "llama3.1:70b" | "llama3.1:405b" | "llama3.2" | "llama3.2:1b" | "llama3.2:3b" | "llama3.2-vision" | "llama3.2-vision:11b" | "llama3.2-vision:90b" | "llama3.3" | "llama3.3:70b" | "llama4" | "llama4:16x17b" | "llama4:128x17b" | "llama-guard3" | "llama-guard3:1b" | "llama-guard3:8b" | "llava" | "llava:7b" | "llava:13b" | "llava:34b" | "llava-llama3" | "llava-llama3:8b" | "llava-phi3" | "llava-phi3:3.8b" | "marco-o1" | "marco-o1:7b" | "mistral" | "mistral:7b" | "mistral-large" | "mistral-large:123b" | "mistral-nemo" | "mistral-nemo:12b" | "mistral-small" | "mistral-small:22b" | "mixtral" | "mixtral:8x7b" | "mixtral:8x22b" | "moondream" | "moondream:1.8b" | "openhermes" | "openhermes:v2.5" | "nemotron" | "nemotron:70b" | "nemotron-mini" | "nemotron-mini:4b" | "olmo" | "olmo:7b" | "olmo:13b" | "opencoder" | "opencoder:1.5b" | "opencoder:8b" | "phi3" | "phi3:3.8b" | "phi3:14b" | "phi3.5" | "phi3.5:3.8b" | "phi4" | "phi4:14b" | "qwen" | "qwen:7b" | "qwen:14b" | "qwen:32b" | "qwen:72b" | "qwen:110b" | "qwen2" | "qwen2:0.5b" | "qwen2:1.5b" | "qwen2:7b" | "qwen2:72b" | "qwen2.5" | "qwen2.5:0.5b" | "qwen2.5:1.5b" | "qwen2.5:3b" | "qwen2.5:7b" | "qwen2.5:14b" | "qwen2.5:32b" | "qwen2.5:72b" | "qwen2.5-coder" | "qwen2.5-coder:0.5b" | "qwen2.5-coder:1.5b" | "qwen2.5-coder:3b" | "qwen2.5-coder:7b" | "qwen2.5-coder:14b" | "qwen2.5-coder:32b" | "qwen3" | "qwen3:0.6b" | "qwen3:1.7b" | "qwen3:4b" | "qwen3:8b" | "qwen3:14b" | "qwen3:30b" | "qwen3:32b" | "qwen3:235b" | "qwq" | "qwq:32b" | "sailor2" | "sailor2:1b" | "sailor2:8b" | "sailor2:20b" | "shieldgemma" | "shieldgemma:2b" | "shieldgemma:9b" | "shieldgemma:27b" | "smallthinker" | "smallthinker:3b" | "smollm" | "smollm:135m" | "smollm:360m" | "smollm:1.7b" | "tinyllama" | "tinyllama:1.1b" | "tulu3" | "tulu3:8b" | "tulu3:70b" | (string & {});
|
||||
declare const ollamaProviderOptions: z.ZodObject<{
|
||||
- think: z.ZodOptional<z.ZodBoolean>;
|
||||
+ think: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodEnum<['low', 'medium', 'high']>]>>;
|
||||
+ think: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodLiteral<"low">, z.ZodLiteral<"medium">, z.ZodLiteral<"high">]>>;
|
||||
options: z.ZodOptional<z.ZodObject<{
|
||||
num_ctx: z.ZodOptional<z.ZodNumber>;
|
||||
repeat_last_n: z.ZodOptional<z.ZodNumber>;
|
||||
@ -29,7 +29,7 @@ index 8dd9b498050dbecd8dd6b901acf1aa8ca38a49af..ed644349c9d38fe2a66b2fb44214f7c1
|
||||
|
||||
declare const ollamaCompletionProviderOptions: z.ZodObject<{
|
||||
- think: z.ZodOptional<z.ZodBoolean>;
|
||||
+ think: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodEnum<['low', 'medium', 'high']>]>>;
|
||||
+ think: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodLiteral<"low">, z.ZodLiteral<"medium">, z.ZodLiteral<"high">]>>;
|
||||
user: z.ZodOptional<z.ZodString>;
|
||||
suffix: z.ZodOptional<z.ZodString>;
|
||||
echo: z.ZodOptional<z.ZodBoolean>;
|
||||
@ -42,7 +42,7 @@ index 35b5142ce8476ce2549ed7c2ec48e7d8c46c90d9..2ef64dc9a4c2be043e6af608241a6a83
|
||||
// src/completion/ollama-completion-language-model.ts
|
||||
var ollamaCompletionProviderOptions = import_v42.z.object({
|
||||
- think: import_v42.z.boolean().optional(),
|
||||
+ think: import_v42.z.union([import_v42.z.boolean(), import_v42.z.enum(['low', 'medium', 'high'])]).optional(),
|
||||
+ think: import_v42.z.union([import_v42.z.boolean(), import_v42.z.literal('low'), import_v42.z.literal('medium'), import_v42.z.literal('high')]).optional(),
|
||||
user: import_v42.z.string().optional(),
|
||||
suffix: import_v42.z.string().optional(),
|
||||
echo: import_v42.z.boolean().optional()
|
||||
@ -64,7 +64,7 @@ index 35b5142ce8476ce2549ed7c2ec48e7d8c46c90d9..2ef64dc9a4c2be043e6af608241a6a83
|
||||
* Only supported by certain models like DeepSeek R1 and Qwen 3.
|
||||
*/
|
||||
- think: import_v44.z.boolean().optional(),
|
||||
+ think: import_v44.z.union([import_v44.z.boolean(), import_v44.z.enum(['low', 'medium', 'high'])]).optional(),
|
||||
+ think: import_v44.z.union([import_v44.z.boolean(), import_v44.z.literal('low'), import_v44.z.literal('medium'), import_v44.z.literal('high')]).optional(),
|
||||
options: import_v44.z.object({
|
||||
num_ctx: import_v44.z.number().optional(),
|
||||
repeat_last_n: import_v44.z.number().optional(),
|
||||
@ -97,7 +97,7 @@ index e2a634a78d80ac9542f2cc4f96cf2291094b10cf..67b23efce3c1cf4f026693d3ff924698
|
||||
// src/completion/ollama-completion-language-model.ts
|
||||
var ollamaCompletionProviderOptions = z2.object({
|
||||
- think: z2.boolean().optional(),
|
||||
+ think: z2.union([z2.boolean(), z2.enum(['low', 'medium', 'high'])]).optional(),
|
||||
+ think: z2.union([z2.boolean(), z2.literal('low'), z2.literal('medium'), z2.literal('high')]).optional(),
|
||||
user: z2.string().optional(),
|
||||
suffix: z2.string().optional(),
|
||||
echo: z2.boolean().optional()
|
||||
@ -119,7 +119,7 @@ index e2a634a78d80ac9542f2cc4f96cf2291094b10cf..67b23efce3c1cf4f026693d3ff924698
|
||||
* Only supported by certain models like DeepSeek R1 and Qwen 3.
|
||||
*/
|
||||
- think: z4.boolean().optional(),
|
||||
+ think: z4.union([z4.boolean(), z4.enum(['low', 'medium', 'high'])]).optional(),
|
||||
+ think: z4.union([z4.boolean(), z4.literal('low'), z4.literal('medium'), z4.literal('high')]).optional(),
|
||||
options: z4.object({
|
||||
num_ctx: z4.number().optional(),
|
||||
repeat_last_n: z4.number().optional(),
|
||||
25639
pnpm-lock.yaml
generated
Normal file
25639
pnpm-lock.yaml
generated
Normal file
File diff suppressed because it is too large
Load Diff
8
pnpm-workspace.yaml
Normal file
8
pnpm-workspace.yaml
Normal file
@ -0,0 +1,8 @@
|
||||
packages:
|
||||
- 'packages/*'
|
||||
|
||||
supportedArchitectures:
|
||||
os:
|
||||
- current
|
||||
cpu:
|
||||
- current
|
||||
@ -50,7 +50,7 @@ Usage Instructions:
|
||||
- pt-pt (Portuguese)
|
||||
|
||||
Run Command:
|
||||
yarn i18n:translate
|
||||
pnpm i18n:translate
|
||||
|
||||
Performance Optimization Recommendations:
|
||||
- For stable API services: MAX_CONCURRENT_TRANSLATIONS=8, TRANSLATION_DELAY_MS=50
|
||||
@ -152,7 +152,8 @@ const languageMap = {
|
||||
'es-es': 'Spanish',
|
||||
'fr-fr': 'French',
|
||||
'pt-pt': 'Portuguese',
|
||||
'de-de': 'German'
|
||||
'de-de': 'German',
|
||||
'ro-ro': 'Romanian'
|
||||
}
|
||||
|
||||
const PROMPT = `
|
||||
|
||||
@ -1,42 +1,35 @@
|
||||
const { Arch } = require('electron-builder')
|
||||
const { downloadNpmPackage } = require('./utils')
|
||||
const { execSync } = require('child_process')
|
||||
const fs = require('fs')
|
||||
const path = require('path')
|
||||
const yaml = require('js-yaml')
|
||||
|
||||
const workspaceConfigPath = path.join(__dirname, '..', 'pnpm-workspace.yaml')
|
||||
|
||||
// if you want to add new prebuild binaries packages with different architectures, you can add them here
|
||||
// please add to allX64 and allArm64 from yarn.lock
|
||||
const allArm64 = {
|
||||
'@img/sharp-darwin-arm64': '0.34.3',
|
||||
'@img/sharp-win32-arm64': '0.34.3',
|
||||
'@img/sharp-linux-arm64': '0.34.3',
|
||||
|
||||
'@img/sharp-libvips-darwin-arm64': '1.2.0',
|
||||
'@img/sharp-libvips-linux-arm64': '1.2.0',
|
||||
|
||||
'@libsql/darwin-arm64': '0.4.7',
|
||||
'@libsql/linux-arm64-gnu': '0.4.7',
|
||||
'@strongtz/win32-arm64-msvc': '0.4.7',
|
||||
|
||||
'@napi-rs/system-ocr-darwin-arm64': '1.0.2',
|
||||
'@napi-rs/system-ocr-win32-arm64-msvc': '1.0.2'
|
||||
}
|
||||
|
||||
const allX64 = {
|
||||
'@img/sharp-darwin-x64': '0.34.3',
|
||||
'@img/sharp-linux-x64': '0.34.3',
|
||||
'@img/sharp-win32-x64': '0.34.3',
|
||||
|
||||
'@img/sharp-libvips-darwin-x64': '1.2.0',
|
||||
'@img/sharp-libvips-linux-x64': '1.2.0',
|
||||
|
||||
'@libsql/darwin-x64': '0.4.7',
|
||||
'@libsql/linux-x64-gnu': '0.4.7',
|
||||
'@libsql/win32-x64-msvc': '0.4.7',
|
||||
|
||||
'@napi-rs/system-ocr-darwin-x64': '1.0.2',
|
||||
'@napi-rs/system-ocr-win32-x64-msvc': '1.0.2'
|
||||
}
|
||||
|
||||
const claudeCodeVenderPath = '@anthropic-ai/claude-agent-sdk/vendor'
|
||||
const claudeCodeVenders = ['arm64-darwin', 'arm64-linux', 'x64-darwin', 'x64-linux', 'x64-win32']
|
||||
// please add to allX64 and allArm64 from pnpm-lock.yaml
|
||||
const packages = [
|
||||
'@img/sharp-darwin-arm64',
|
||||
'@img/sharp-darwin-x64',
|
||||
'@img/sharp-linux-arm64',
|
||||
'@img/sharp-linux-x64',
|
||||
'@img/sharp-win32-arm64',
|
||||
'@img/sharp-win32-x64',
|
||||
'@img/sharp-libvips-darwin-arm64',
|
||||
'@img/sharp-libvips-darwin-x64',
|
||||
'@img/sharp-libvips-linux-arm64',
|
||||
'@img/sharp-libvips-linux-x64',
|
||||
'@libsql/darwin-arm64',
|
||||
'@libsql/darwin-x64',
|
||||
'@libsql/linux-arm64-gnu',
|
||||
'@libsql/linux-x64-gnu',
|
||||
'@libsql/win32-x64-msvc',
|
||||
'@napi-rs/system-ocr-darwin-arm64',
|
||||
'@napi-rs/system-ocr-darwin-x64',
|
||||
'@napi-rs/system-ocr-win32-arm64-msvc',
|
||||
'@napi-rs/system-ocr-win32-x64-msvc',
|
||||
'@strongtz/win32-arm64-msvc'
|
||||
]
|
||||
|
||||
const platformToArch = {
|
||||
mac: 'darwin',
|
||||
@ -45,61 +38,82 @@ const platformToArch = {
|
||||
}
|
||||
|
||||
exports.default = async function (context) {
|
||||
const arch = context.arch
|
||||
const archType = arch === Arch.arm64 ? 'arm64' : 'x64'
|
||||
const platform = context.packager.platform.name
|
||||
const arch = context.arch === Arch.arm64 ? 'arm64' : 'x64'
|
||||
const platformName = context.packager.platform.name
|
||||
const platform = platformToArch[platformName]
|
||||
|
||||
const downloadPackages = async (packages) => {
|
||||
console.log('downloading packages ......')
|
||||
const downloadPromises = []
|
||||
|
||||
for (const name of Object.keys(packages)) {
|
||||
if (name.includes(`${platformToArch[platform]}`) && name.includes(`-${archType}`)) {
|
||||
downloadPromises.push(
|
||||
downloadNpmPackage(
|
||||
name,
|
||||
`https://registry.npmjs.org/${name}/-/${name.split('/').pop()}-${packages[name]}.tgz`
|
||||
)
|
||||
)
|
||||
}
|
||||
const downloadPackages = async () => {
|
||||
// Skip if target platform and architecture match current system
|
||||
if (platform === process.platform && arch === process.arch) {
|
||||
console.log(`Skipping install: target (${platform}/${arch}) matches current system`)
|
||||
return
|
||||
}
|
||||
|
||||
await Promise.all(downloadPromises)
|
||||
console.log(`Installing packages for target platform=${platform} arch=${arch}...`)
|
||||
|
||||
// Backup and modify pnpm-workspace.yaml to add target platform support
|
||||
const originalWorkspaceConfig = fs.readFileSync(workspaceConfigPath, 'utf-8')
|
||||
const workspaceConfig = yaml.load(originalWorkspaceConfig)
|
||||
|
||||
// Add target platform to supportedArchitectures.os
|
||||
if (!workspaceConfig.supportedArchitectures.os.includes(platform)) {
|
||||
workspaceConfig.supportedArchitectures.os.push(platform)
|
||||
}
|
||||
|
||||
// Add target architecture to supportedArchitectures.cpu
|
||||
if (!workspaceConfig.supportedArchitectures.cpu.includes(arch)) {
|
||||
workspaceConfig.supportedArchitectures.cpu.push(arch)
|
||||
}
|
||||
|
||||
const modifiedWorkspaceConfig = yaml.dump(workspaceConfig)
|
||||
console.log('Modified workspace config:', modifiedWorkspaceConfig)
|
||||
fs.writeFileSync(workspaceConfigPath, modifiedWorkspaceConfig)
|
||||
|
||||
try {
|
||||
execSync(`pnpm install`, { stdio: 'inherit' })
|
||||
} finally {
|
||||
// Restore original pnpm-workspace.yaml
|
||||
fs.writeFileSync(workspaceConfigPath, originalWorkspaceConfig)
|
||||
}
|
||||
}
|
||||
|
||||
const changeFilters = async (filtersToExclude, filtersToInclude) => {
|
||||
// remove filters for the target architecture (allow inclusion)
|
||||
let filters = context.packager.config.files[0].filter
|
||||
filters = filters.filter((filter) => !filtersToInclude.includes(filter))
|
||||
await downloadPackages()
|
||||
|
||||
const excludePackages = async (packagesToExclude) => {
|
||||
// 从项目根目录的 electron-builder.yml 读取 files 配置,避免多次覆盖配置导致出错
|
||||
const electronBuilderConfigPath = path.join(__dirname, '..', 'electron-builder.yml')
|
||||
const electronBuilderConfig = yaml.load(fs.readFileSync(electronBuilderConfigPath, 'utf-8'))
|
||||
let filters = electronBuilderConfig.files
|
||||
|
||||
// add filters for other architectures (exclude them)
|
||||
filters.push(...filtersToExclude)
|
||||
filters.push(...packagesToExclude)
|
||||
|
||||
context.packager.config.files[0].filter = filters
|
||||
}
|
||||
|
||||
await downloadPackages(arch === Arch.arm64 ? allArm64 : allX64)
|
||||
const arm64KeepPackages = packages.filter((p) => p.includes('arm64') && p.includes(platform))
|
||||
const arm64ExcludePackages = packages
|
||||
.filter((p) => !arm64KeepPackages.includes(p))
|
||||
.map((p) => '!node_modules/' + p + '/**')
|
||||
|
||||
const arm64Filters = Object.keys(allArm64).map((f) => '!node_modules/' + f + '/**')
|
||||
const x64Filters = Object.keys(allX64).map((f) => '!node_modules/' + f + '/*')
|
||||
const excludeClaudeCodeRipgrepFilters = claudeCodeVenders
|
||||
.filter((f) => f !== `${archType}-${platformToArch[platform]}`)
|
||||
.map((f) => '!node_modules/' + claudeCodeVenderPath + '/ripgrep/' + f + '/**')
|
||||
const excludeClaudeCodeJBPlutins = ['!node_modules/' + claudeCodeVenderPath + '/' + 'claude-code-jetbrains-plugin']
|
||||
const x64KeepPackages = packages.filter((p) => p.includes('x64') && p.includes(platform))
|
||||
const x64ExcludePackages = packages
|
||||
.filter((p) => !x64KeepPackages.includes(p))
|
||||
.map((p) => '!node_modules/' + p + '/**')
|
||||
|
||||
const includeClaudeCodeFilters = [
|
||||
'!node_modules/' + claudeCodeVenderPath + '/ripgrep/' + `${archType}-${platformToArch[platform]}/**`
|
||||
]
|
||||
const excludeRipgrepFilters = ['arm64-darwin', 'arm64-linux', 'x64-darwin', 'x64-linux', 'x64-win32']
|
||||
.filter((f) => {
|
||||
// On Windows ARM64, also keep x64-win32 for emulation compatibility
|
||||
if (platform === 'win32' && context.arch === Arch.arm64 && f === 'x64-win32') {
|
||||
return false
|
||||
}
|
||||
return f !== `${arch}-${platform}`
|
||||
})
|
||||
.map((f) => '!node_modules/@anthropic-ai/claude-agent-sdk/vendor/ripgrep/' + f + '/**')
|
||||
|
||||
if (arch === Arch.arm64) {
|
||||
await changeFilters(
|
||||
[...x64Filters, ...excludeClaudeCodeRipgrepFilters, ...excludeClaudeCodeJBPlutins],
|
||||
[...arm64Filters, ...includeClaudeCodeFilters]
|
||||
)
|
||||
if (context.arch === Arch.arm64) {
|
||||
await excludePackages([...arm64ExcludePackages, ...excludeRipgrepFilters])
|
||||
} else {
|
||||
await changeFilters(
|
||||
[...arm64Filters, ...excludeClaudeCodeRipgrepFilters, ...excludeClaudeCodeJBPlutins],
|
||||
[...x64Filters, ...includeClaudeCodeFilters]
|
||||
)
|
||||
await excludePackages([...x64ExcludePackages, ...excludeRipgrepFilters])
|
||||
}
|
||||
}
|
||||
|
||||
@ -145,7 +145,7 @@ export function main() {
|
||||
console.log('i18n 检查已通过')
|
||||
} catch (e) {
|
||||
console.error(e)
|
||||
throw new Error(`检查未通过。尝试运行 yarn i18n:sync 以解决问题。`)
|
||||
throw new Error(`检查未通过。尝试运行 pnpm i18n:sync 以解决问题。`)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1,211 +0,0 @@
|
||||
/**
|
||||
* Feishu (Lark) Webhook Notification Script
|
||||
* Sends GitHub issue summaries to Feishu with signature verification
|
||||
*/
|
||||
|
||||
const crypto = require('crypto')
|
||||
const https = require('https')
|
||||
|
||||
/**
|
||||
* Generate Feishu webhook signature
|
||||
* @param {string} secret - Feishu webhook secret
|
||||
* @param {number} timestamp - Unix timestamp in seconds
|
||||
* @returns {string} Base64 encoded signature
|
||||
*/
|
||||
function generateSignature(secret, timestamp) {
|
||||
const stringToSign = `${timestamp}\n${secret}`
|
||||
const hmac = crypto.createHmac('sha256', stringToSign)
|
||||
return hmac.digest('base64')
|
||||
}
|
||||
|
||||
/**
|
||||
* Send message to Feishu webhook
|
||||
* @param {string} webhookUrl - Feishu webhook URL
|
||||
* @param {string} secret - Feishu webhook secret
|
||||
* @param {object} content - Message content
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
function sendToFeishu(webhookUrl, secret, content) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const timestamp = Math.floor(Date.now() / 1000)
|
||||
const sign = generateSignature(secret, timestamp)
|
||||
|
||||
const payload = JSON.stringify({
|
||||
timestamp: timestamp.toString(),
|
||||
sign: sign,
|
||||
msg_type: 'interactive',
|
||||
card: content
|
||||
})
|
||||
|
||||
const url = new URL(webhookUrl)
|
||||
const options = {
|
||||
hostname: url.hostname,
|
||||
path: url.pathname + url.search,
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Content-Length': Buffer.byteLength(payload)
|
||||
}
|
||||
}
|
||||
|
||||
const req = https.request(options, (res) => {
|
||||
let data = ''
|
||||
res.on('data', (chunk) => {
|
||||
data += chunk
|
||||
})
|
||||
res.on('end', () => {
|
||||
if (res.statusCode >= 200 && res.statusCode < 300) {
|
||||
console.log('✅ Successfully sent to Feishu:', data)
|
||||
resolve()
|
||||
} else {
|
||||
reject(new Error(`Feishu API error: ${res.statusCode} - ${data}`))
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
req.on('error', (error) => {
|
||||
reject(error)
|
||||
})
|
||||
|
||||
req.write(payload)
|
||||
req.end()
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Create Feishu card message from issue data
|
||||
* @param {object} issueData - GitHub issue data
|
||||
* @returns {object} Feishu card content
|
||||
*/
|
||||
function createIssueCard(issueData) {
|
||||
const { issueUrl, issueNumber, issueTitle, issueSummary, issueAuthor, labels } = issueData
|
||||
|
||||
// Build labels section if labels exist
|
||||
const labelElements =
|
||||
labels && labels.length > 0
|
||||
? labels.map((label) => ({
|
||||
tag: 'markdown',
|
||||
content: `\`${label}\``
|
||||
}))
|
||||
: []
|
||||
|
||||
return {
|
||||
elements: [
|
||||
{
|
||||
tag: 'div',
|
||||
text: {
|
||||
tag: 'lark_md',
|
||||
content: `**👤 Author:** ${issueAuthor}`
|
||||
}
|
||||
},
|
||||
...(labelElements.length > 0
|
||||
? [
|
||||
{
|
||||
tag: 'div',
|
||||
text: {
|
||||
tag: 'lark_md',
|
||||
content: `**🏷️ Labels:** ${labels.join(', ')}`
|
||||
}
|
||||
}
|
||||
]
|
||||
: []),
|
||||
{
|
||||
tag: 'hr'
|
||||
},
|
||||
{
|
||||
tag: 'div',
|
||||
text: {
|
||||
tag: 'lark_md',
|
||||
content: `**📋 Summary:**\n${issueSummary}`
|
||||
}
|
||||
},
|
||||
{
|
||||
tag: 'hr'
|
||||
},
|
||||
{
|
||||
tag: 'action',
|
||||
actions: [
|
||||
{
|
||||
tag: 'button',
|
||||
text: {
|
||||
tag: 'plain_text',
|
||||
content: '🔗 View Issue'
|
||||
},
|
||||
type: 'primary',
|
||||
url: issueUrl
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
header: {
|
||||
template: 'blue',
|
||||
title: {
|
||||
tag: 'plain_text',
|
||||
content: `#${issueNumber} - ${issueTitle}`
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Main function
|
||||
*/
|
||||
async function main() {
|
||||
try {
|
||||
// Get environment variables
|
||||
const webhookUrl = process.env.FEISHU_WEBHOOK_URL
|
||||
const secret = process.env.FEISHU_WEBHOOK_SECRET
|
||||
const issueUrl = process.env.ISSUE_URL
|
||||
const issueNumber = process.env.ISSUE_NUMBER
|
||||
const issueTitle = process.env.ISSUE_TITLE
|
||||
const issueSummary = process.env.ISSUE_SUMMARY
|
||||
const issueAuthor = process.env.ISSUE_AUTHOR
|
||||
const labelsStr = process.env.ISSUE_LABELS || ''
|
||||
|
||||
// Validate required environment variables
|
||||
if (!webhookUrl) {
|
||||
throw new Error('FEISHU_WEBHOOK_URL environment variable is required')
|
||||
}
|
||||
if (!secret) {
|
||||
throw new Error('FEISHU_WEBHOOK_SECRET environment variable is required')
|
||||
}
|
||||
if (!issueUrl || !issueNumber || !issueTitle || !issueSummary) {
|
||||
throw new Error('Issue data environment variables are required')
|
||||
}
|
||||
|
||||
// Parse labels
|
||||
const labels = labelsStr
|
||||
? labelsStr
|
||||
.split(',')
|
||||
.map((l) => l.trim())
|
||||
.filter(Boolean)
|
||||
: []
|
||||
|
||||
// Create issue data object
|
||||
const issueData = {
|
||||
issueUrl,
|
||||
issueNumber,
|
||||
issueTitle,
|
||||
issueSummary,
|
||||
issueAuthor: issueAuthor || 'Unknown',
|
||||
labels
|
||||
}
|
||||
|
||||
// Create card content
|
||||
const card = createIssueCard(issueData)
|
||||
|
||||
console.log('📤 Sending notification to Feishu...')
|
||||
console.log(`Issue #${issueNumber}: ${issueTitle}`)
|
||||
|
||||
// Send to Feishu
|
||||
await sendToFeishu(webhookUrl, secret, card)
|
||||
|
||||
console.log('✅ Notification sent successfully!')
|
||||
} catch (error) {
|
||||
console.error('❌ Error:', error.message)
|
||||
process.exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// Run main function
|
||||
main()
|
||||
421
scripts/feishu-notify.ts
Normal file
421
scripts/feishu-notify.ts
Normal file
@ -0,0 +1,421 @@
|
||||
#!/usr/bin/env npx tsx
|
||||
/**
|
||||
* @fileoverview Feishu (Lark) Webhook Notification CLI Tool
|
||||
* @description Sends notifications to Feishu with signature verification.
|
||||
* Supports subcommands for different notification types.
|
||||
* @module feishu-notify
|
||||
* @example
|
||||
* // Send GitHub issue notification
|
||||
* pnpm tsx feishu-notify.ts issue -u "https://..." -n "123" -t "Title" -m "Summary"
|
||||
*
|
||||
* // Using environment variables for credentials
|
||||
* FEISHU_WEBHOOK_URL="..." FEISHU_WEBHOOK_SECRET="..." pnpm tsx feishu-notify.ts issue ...
|
||||
*/
|
||||
|
||||
import { Command } from 'commander'
|
||||
import crypto from 'crypto'
|
||||
import dotenv from 'dotenv'
|
||||
import https from 'https'
|
||||
import * as z from 'zod'
|
||||
|
||||
// Load environment variables from .env file
|
||||
dotenv.config()
|
||||
|
||||
/** CLI tool version */
|
||||
const VERSION = '1.0.0'
|
||||
|
||||
/** GitHub issue data structure */
|
||||
interface IssueData {
|
||||
/** GitHub issue URL */
|
||||
issueUrl: string
|
||||
/** Issue number */
|
||||
issueNumber: string
|
||||
/** Issue title */
|
||||
issueTitle: string
|
||||
/** Issue summary/description */
|
||||
issueSummary: string
|
||||
/** Issue author username */
|
||||
issueAuthor: string
|
||||
/** Issue labels */
|
||||
labels: string[]
|
||||
}
|
||||
|
||||
/** Feishu card text element */
|
||||
interface FeishuTextElement {
|
||||
tag: 'div'
|
||||
text: {
|
||||
tag: 'lark_md'
|
||||
content: string
|
||||
}
|
||||
}
|
||||
|
||||
/** Feishu card horizontal rule element */
|
||||
interface FeishuHrElement {
|
||||
tag: 'hr'
|
||||
}
|
||||
|
||||
/** Feishu card action button */
|
||||
interface FeishuActionElement {
|
||||
tag: 'action'
|
||||
actions: Array<{
|
||||
tag: 'button'
|
||||
text: {
|
||||
tag: 'plain_text'
|
||||
content: string
|
||||
}
|
||||
type: 'primary' | 'default'
|
||||
url: string
|
||||
}>
|
||||
}
|
||||
|
||||
/** Feishu card element union type */
|
||||
type FeishuCardElement = FeishuTextElement | FeishuHrElement | FeishuActionElement
|
||||
|
||||
/** Zod schema for Feishu header color template */
|
||||
const FeishuHeaderTemplateSchema = z.enum([
|
||||
'blue',
|
||||
'wathet',
|
||||
'turquoise',
|
||||
'green',
|
||||
'yellow',
|
||||
'orange',
|
||||
'red',
|
||||
'carmine',
|
||||
'violet',
|
||||
'purple',
|
||||
'indigo',
|
||||
'grey',
|
||||
'default'
|
||||
])
|
||||
|
||||
/** Feishu card header color template (inferred from schema) */
|
||||
type FeishuHeaderTemplate = z.infer<typeof FeishuHeaderTemplateSchema>
|
||||
|
||||
/** Feishu interactive card structure */
|
||||
interface FeishuCard {
|
||||
elements: FeishuCardElement[]
|
||||
header: {
|
||||
template: FeishuHeaderTemplate
|
||||
title: {
|
||||
tag: 'plain_text'
|
||||
content: string
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Feishu webhook request payload */
|
||||
interface FeishuPayload {
|
||||
timestamp: string
|
||||
sign: string
|
||||
msg_type: 'interactive'
|
||||
card: FeishuCard
|
||||
}
|
||||
|
||||
/** Issue subcommand options */
|
||||
interface IssueOptions {
|
||||
url: string
|
||||
number: string
|
||||
title: string
|
||||
summary: string
|
||||
author?: string
|
||||
labels?: string
|
||||
}
|
||||
|
||||
/** Send subcommand options */
|
||||
interface SendOptions {
|
||||
title: string
|
||||
description: string
|
||||
color?: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate Feishu webhook signature using HMAC-SHA256
|
||||
* @param secret - Feishu webhook secret
|
||||
* @param timestamp - Unix timestamp in seconds
|
||||
* @returns Base64 encoded signature
|
||||
*/
|
||||
function generateSignature(secret: string, timestamp: number): string {
|
||||
const stringToSign = `${timestamp}\n${secret}`
|
||||
const hmac = crypto.createHmac('sha256', stringToSign)
|
||||
return hmac.digest('base64')
|
||||
}
|
||||
|
||||
/**
|
||||
* Send message to Feishu webhook
|
||||
* @param webhookUrl - Feishu webhook URL
|
||||
* @param secret - Feishu webhook secret
|
||||
* @param content - Feishu card message content
|
||||
* @returns Resolves when message is sent successfully
|
||||
* @throws When Feishu API returns non-2xx status code or network error occurs
|
||||
*/
|
||||
function sendToFeishu(webhookUrl: string, secret: string, content: FeishuCard): Promise<void> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const timestamp = Math.floor(Date.now() / 1000)
|
||||
const sign = generateSignature(secret, timestamp)
|
||||
|
||||
const payload: FeishuPayload = {
|
||||
timestamp: timestamp.toString(),
|
||||
sign,
|
||||
msg_type: 'interactive',
|
||||
card: content
|
||||
}
|
||||
|
||||
const payloadStr = JSON.stringify(payload)
|
||||
const url = new URL(webhookUrl)
|
||||
|
||||
const options: https.RequestOptions = {
|
||||
hostname: url.hostname,
|
||||
path: url.pathname + url.search,
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Content-Length': Buffer.byteLength(payloadStr)
|
||||
}
|
||||
}
|
||||
|
||||
const req = https.request(options, (res) => {
|
||||
let data = ''
|
||||
res.on('data', (chunk: Buffer) => {
|
||||
data += chunk.toString()
|
||||
})
|
||||
res.on('end', () => {
|
||||
if (res.statusCode && res.statusCode >= 200 && res.statusCode < 300) {
|
||||
console.log('Successfully sent to Feishu:', data)
|
||||
resolve()
|
||||
} else {
|
||||
reject(new Error(`Feishu API error: ${res.statusCode} - ${data}`))
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
req.on('error', (error: Error) => {
|
||||
reject(error)
|
||||
})
|
||||
|
||||
req.write(payloadStr)
|
||||
req.end()
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Create Feishu card message from issue data
|
||||
* @param issueData - GitHub issue data
|
||||
* @returns Feishu card content
|
||||
*/
|
||||
function createIssueCard(issueData: IssueData): FeishuCard {
|
||||
const { issueUrl, issueNumber, issueTitle, issueSummary, issueAuthor, labels } = issueData
|
||||
|
||||
const elements: FeishuCardElement[] = [
|
||||
{
|
||||
tag: 'div',
|
||||
text: {
|
||||
tag: 'lark_md',
|
||||
content: `**Author:** ${issueAuthor}`
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
if (labels.length > 0) {
|
||||
elements.push({
|
||||
tag: 'div',
|
||||
text: {
|
||||
tag: 'lark_md',
|
||||
content: `**Labels:** ${labels.join(', ')}`
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
elements.push(
|
||||
{ tag: 'hr' },
|
||||
{
|
||||
tag: 'div',
|
||||
text: {
|
||||
tag: 'lark_md',
|
||||
content: `**Summary:**\n${issueSummary}`
|
||||
}
|
||||
},
|
||||
{ tag: 'hr' },
|
||||
{
|
||||
tag: 'action',
|
||||
actions: [
|
||||
{
|
||||
tag: 'button',
|
||||
text: {
|
||||
tag: 'plain_text',
|
||||
content: 'View Issue'
|
||||
},
|
||||
type: 'primary',
|
||||
url: issueUrl
|
||||
}
|
||||
]
|
||||
}
|
||||
)
|
||||
|
||||
return {
|
||||
elements,
|
||||
header: {
|
||||
template: 'blue',
|
||||
title: {
|
||||
tag: 'plain_text',
|
||||
content: `#${issueNumber} - ${issueTitle}`
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a simple Feishu card message
|
||||
* @param title - Card title
|
||||
* @param description - Card description content
|
||||
* @param color - Header color template (default: 'turquoise')
|
||||
* @returns Feishu card content
|
||||
*/
|
||||
function createSimpleCard(title: string, description: string, color: FeishuHeaderTemplate = 'turquoise'): FeishuCard {
|
||||
return {
|
||||
elements: [
|
||||
{
|
||||
tag: 'div',
|
||||
text: {
|
||||
tag: 'lark_md',
|
||||
content: description
|
||||
}
|
||||
}
|
||||
],
|
||||
header: {
|
||||
template: color,
|
||||
title: {
|
||||
tag: 'plain_text',
|
||||
content: title
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get Feishu credentials from environment variables
|
||||
*/
|
||||
function getCredentials(): { webhookUrl: string; secret: string } {
|
||||
const webhookUrl = process.env.FEISHU_WEBHOOK_URL
|
||||
const secret = process.env.FEISHU_WEBHOOK_SECRET
|
||||
|
||||
if (!webhookUrl) {
|
||||
console.error('Error: FEISHU_WEBHOOK_URL environment variable is required')
|
||||
process.exit(1)
|
||||
}
|
||||
if (!secret) {
|
||||
console.error('Error: FEISHU_WEBHOOK_SECRET environment variable is required')
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
return { webhookUrl, secret }
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle send subcommand
|
||||
*/
|
||||
async function handleSendCommand(options: SendOptions): Promise<void> {
|
||||
const { webhookUrl, secret } = getCredentials()
|
||||
|
||||
const { title, description, color = 'turquoise' } = options
|
||||
|
||||
// Validate color parameter
|
||||
const colorValidation = FeishuHeaderTemplateSchema.safeParse(color)
|
||||
if (!colorValidation.success) {
|
||||
console.error(`Error: Invalid color "${color}". Valid colors: ${FeishuHeaderTemplateSchema.options.join(', ')}`)
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
const card = createSimpleCard(title, description, colorValidation.data)
|
||||
|
||||
console.log('Sending notification to Feishu...')
|
||||
console.log(`Title: ${title}`)
|
||||
|
||||
await sendToFeishu(webhookUrl, secret, card)
|
||||
|
||||
console.log('Notification sent successfully!')
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle issue subcommand
|
||||
*/
|
||||
async function handleIssueCommand(options: IssueOptions): Promise<void> {
|
||||
const { webhookUrl, secret } = getCredentials()
|
||||
|
||||
const { url, number, title, summary, author = 'Unknown', labels: labelsStr = '' } = options
|
||||
|
||||
if (!url || !number || !title || !summary) {
|
||||
console.error('Error: --url, --number, --title, and --summary are required')
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
const labels = labelsStr
|
||||
? labelsStr
|
||||
.split(',')
|
||||
.map((l) => l.trim())
|
||||
.filter(Boolean)
|
||||
: []
|
||||
|
||||
const issueData: IssueData = {
|
||||
issueUrl: url,
|
||||
issueNumber: number,
|
||||
issueTitle: title,
|
||||
issueSummary: summary,
|
||||
issueAuthor: author,
|
||||
labels
|
||||
}
|
||||
|
||||
const card = createIssueCard(issueData)
|
||||
|
||||
console.log('Sending notification to Feishu...')
|
||||
console.log(`Issue #${number}: ${title}`)
|
||||
|
||||
await sendToFeishu(webhookUrl, secret, card)
|
||||
|
||||
console.log('Notification sent successfully!')
|
||||
}
|
||||
|
||||
// Configure CLI
|
||||
const program = new Command()
|
||||
|
||||
program.name('feishu-notify').description('Send notifications to Feishu webhook').version(VERSION)
|
||||
|
||||
// Send subcommand (generic)
|
||||
program
|
||||
.command('send')
|
||||
.description('Send a simple notification to Feishu')
|
||||
.requiredOption('-t, --title <title>', 'Card title')
|
||||
.requiredOption('-d, --description <description>', 'Card description (supports markdown)')
|
||||
.option(
|
||||
'-c, --color <color>',
|
||||
`Header color template (default: turquoise). Options: ${FeishuHeaderTemplateSchema.options.join(', ')}`,
|
||||
'turquoise'
|
||||
)
|
||||
.action(async (options: SendOptions) => {
|
||||
try {
|
||||
await handleSendCommand(options)
|
||||
} catch (error) {
|
||||
console.error('Error:', error instanceof Error ? error.message : error)
|
||||
process.exit(1)
|
||||
}
|
||||
})
|
||||
|
||||
// Issue subcommand
|
||||
program
|
||||
.command('issue')
|
||||
.description('Send GitHub issue notification to Feishu')
|
||||
.requiredOption('-u, --url <url>', 'GitHub issue URL')
|
||||
.requiredOption('-n, --number <number>', 'Issue number')
|
||||
.requiredOption('-t, --title <title>', 'Issue title')
|
||||
.requiredOption('-m, --summary <summary>', 'Issue summary')
|
||||
.option('-a, --author <author>', 'Issue author', 'Unknown')
|
||||
.option('-l, --labels <labels>', 'Issue labels, comma-separated')
|
||||
.action(async (options: IssueOptions) => {
|
||||
try {
|
||||
await handleIssueCommand(options)
|
||||
} catch (error) {
|
||||
console.error('Error:', error instanceof Error ? error.message : error)
|
||||
process.exit(1)
|
||||
}
|
||||
})
|
||||
|
||||
program.parse()
|
||||
@ -57,7 +57,7 @@ function generateLanguagesFileContent(languages: Record<string, LanguageData>):
|
||||
*
|
||||
* ⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️
|
||||
* THIS FILE IS AUTOMATICALLY GENERATED BY A SCRIPT. DO NOT EDIT IT MANUALLY!
|
||||
* Run \`yarn update:languages\` to update this file.
|
||||
* Run \`pnpm update:languages\` to update this file.
|
||||
* ⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️
|
||||
*
|
||||
*/
|
||||
@ -81,7 +81,7 @@ export const languages: Record<string, LanguageData> = ${languagesObjectString};
|
||||
async function format(filePath: string): Promise<void> {
|
||||
console.log('🎨 Formatting file with Biome...')
|
||||
try {
|
||||
await execAsync(`yarn biome format --write ${filePath}`)
|
||||
await execAsync(`pnpm biome format --write ${filePath}`)
|
||||
console.log('✅ Biome formatting complete.')
|
||||
} catch (e: any) {
|
||||
console.error('❌ Biome formatting failed:', e.stdout || e.stderr)
|
||||
@ -96,7 +96,7 @@ async function format(filePath: string): Promise<void> {
|
||||
async function checkTypeScript(filePath: string): Promise<void> {
|
||||
console.log('🧐 Checking file with TypeScript compiler...')
|
||||
try {
|
||||
await execAsync(`yarn tsc --noEmit --skipLibCheck ${filePath}`)
|
||||
await execAsync(`pnpm tsc --noEmit --skipLibCheck ${filePath}`)
|
||||
console.log('✅ TypeScript check passed.')
|
||||
} catch (e: any) {
|
||||
console.error('❌ TypeScript check failed:', e.stdout || e.stderr)
|
||||
|
||||
@ -1,64 +0,0 @@
|
||||
const fs = require('fs')
|
||||
const path = require('path')
|
||||
const os = require('os')
|
||||
const zlib = require('zlib')
|
||||
const tar = require('tar')
|
||||
const { pipeline } = require('stream/promises')
|
||||
|
||||
async function downloadNpmPackage(packageName, url) {
|
||||
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'npm-download-'))
|
||||
const targetDir = path.join('./node_modules/', packageName)
|
||||
const filename = path.join(tempDir, packageName.replace('/', '-') + '.tgz')
|
||||
const extractDir = path.join(tempDir, 'extract')
|
||||
|
||||
// Skip if directory already exists
|
||||
if (fs.existsSync(targetDir)) {
|
||||
console.log(`${targetDir} already exists, skipping download...`)
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
console.log(`Downloading ${packageName}...`, url)
|
||||
|
||||
// Download file using fetch API
|
||||
const response = await fetch(url)
|
||||
if (!response.ok) {
|
||||
throw new Error(`HTTP error! status: ${response.status}`)
|
||||
}
|
||||
|
||||
const fileStream = fs.createWriteStream(filename)
|
||||
await pipeline(response.body, fileStream)
|
||||
|
||||
console.log(`Extracting ${filename}...`)
|
||||
|
||||
// Create extraction directory
|
||||
fs.mkdirSync(extractDir, { recursive: true })
|
||||
|
||||
// Extract tar.gz file using Node.js streams
|
||||
await pipeline(fs.createReadStream(filename), zlib.createGunzip(), tar.extract({ cwd: extractDir }))
|
||||
|
||||
// Remove the downloaded file
|
||||
fs.rmSync(filename, { force: true })
|
||||
|
||||
// Create target directory
|
||||
fs.mkdirSync(targetDir, { recursive: true })
|
||||
|
||||
// Move extracted package contents to target directory
|
||||
const packageDir = path.join(extractDir, 'package')
|
||||
if (fs.existsSync(packageDir)) {
|
||||
fs.cpSync(packageDir, targetDir, { recursive: true })
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`Error processing ${packageName}: ${error.message}`)
|
||||
throw error
|
||||
} finally {
|
||||
// Clean up temp directory
|
||||
if (fs.existsSync(tempDir)) {
|
||||
fs.rmSync(tempDir, { recursive: true, force: true })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
downloadNpmPackage
|
||||
}
|
||||
@ -18,7 +18,7 @@ if (!['patch', 'minor', 'major'].includes(versionType)) {
|
||||
}
|
||||
|
||||
// 更新版本
|
||||
exec(`yarn version ${versionType} --immediate`)
|
||||
exec(`pnpm version ${versionType}`)
|
||||
|
||||
// 读取更新后的 package.json 获取新版本号
|
||||
const updatedPackageJson = JSON.parse(fs.readFileSync('package.json', 'utf8'))
|
||||
|
||||
240
src/main/__tests__/mcp.test.ts
Normal file
240
src/main/__tests__/mcp.test.ts
Normal file
@ -0,0 +1,240 @@
|
||||
import { buildFunctionCallToolName, buildMcpToolName, generateMcpToolFunctionName, toCamelCase } from '@shared/mcp'
|
||||
import { describe, expect, it } from 'vitest'
|
||||
|
||||
describe('toCamelCase', () => {
|
||||
it('should convert hyphenated strings', () => {
|
||||
expect(toCamelCase('my-server')).toBe('myServer')
|
||||
expect(toCamelCase('my-tool-name')).toBe('myToolName')
|
||||
})
|
||||
|
||||
it('should convert underscored strings', () => {
|
||||
expect(toCamelCase('my_server')).toBe('myServer')
|
||||
expect(toCamelCase('search_issues')).toBe('searchIssues')
|
||||
})
|
||||
|
||||
it('should handle mixed delimiters', () => {
|
||||
expect(toCamelCase('my-server_name')).toBe('myServerName')
|
||||
})
|
||||
|
||||
it('should handle leading numbers by prefixing underscore', () => {
|
||||
expect(toCamelCase('123server')).toBe('_123server')
|
||||
})
|
||||
|
||||
it('should handle special characters', () => {
|
||||
expect(toCamelCase('test@server!')).toBe('testServer')
|
||||
expect(toCamelCase('tool#name$')).toBe('toolName')
|
||||
})
|
||||
|
||||
it('should trim whitespace', () => {
|
||||
expect(toCamelCase(' server ')).toBe('server')
|
||||
})
|
||||
|
||||
it('should handle empty string', () => {
|
||||
expect(toCamelCase('')).toBe('')
|
||||
})
|
||||
|
||||
it('should handle uppercase snake case', () => {
|
||||
expect(toCamelCase('MY_SERVER')).toBe('myServer')
|
||||
expect(toCamelCase('SEARCH_ISSUES')).toBe('searchIssues')
|
||||
})
|
||||
|
||||
it('should handle mixed case', () => {
|
||||
expect(toCamelCase('MyServer')).toBe('myserver')
|
||||
expect(toCamelCase('myTOOL')).toBe('mytool')
|
||||
})
|
||||
})
|
||||
|
||||
describe('buildMcpToolName', () => {
|
||||
it('should build basic name with defaults', () => {
|
||||
expect(buildMcpToolName('github', 'search_issues')).toBe('github_searchIssues')
|
||||
})
|
||||
|
||||
it('should handle undefined server name', () => {
|
||||
expect(buildMcpToolName(undefined, 'search_issues')).toBe('searchIssues')
|
||||
})
|
||||
|
||||
it('should apply custom prefix and delimiter', () => {
|
||||
expect(buildMcpToolName('github', 'search', { prefix: 'mcp__', delimiter: '__' })).toBe('mcp__github__search')
|
||||
})
|
||||
|
||||
it('should respect maxLength', () => {
|
||||
const result = buildMcpToolName('veryLongServerName', 'veryLongToolName', { maxLength: 20 })
|
||||
expect(result.length).toBeLessThanOrEqual(20)
|
||||
})
|
||||
|
||||
it('should handle collision with existingNames', () => {
|
||||
const existingNames = new Set(['github_search'])
|
||||
const result = buildMcpToolName('github', 'search', { existingNames })
|
||||
expect(result).toBe('github_search1')
|
||||
expect(existingNames.has('github_search1')).toBe(true)
|
||||
})
|
||||
|
||||
it('should respect maxLength when adding collision suffix', () => {
|
||||
const existingNames = new Set(['a'.repeat(20)])
|
||||
const result = buildMcpToolName('a'.repeat(20), '', { maxLength: 20, existingNames })
|
||||
expect(result.length).toBeLessThanOrEqual(20)
|
||||
expect(existingNames.has(result)).toBe(true)
|
||||
})
|
||||
|
||||
it('should handle multiple collisions with maxLength', () => {
|
||||
const existingNames = new Set(['abcd', 'abc1', 'abc2'])
|
||||
const result = buildMcpToolName('abcd', '', { maxLength: 4, existingNames })
|
||||
expect(result).toBe('abc3')
|
||||
expect(result.length).toBeLessThanOrEqual(4)
|
||||
})
|
||||
})
|
||||
|
||||
describe('generateMcpToolFunctionName', () => {
|
||||
it('should return format serverName_toolName in camelCase', () => {
|
||||
expect(generateMcpToolFunctionName('github', 'search_issues')).toBe('github_searchIssues')
|
||||
})
|
||||
|
||||
it('should handle hyphenated names', () => {
|
||||
expect(generateMcpToolFunctionName('my-server', 'my-tool')).toBe('myServer_myTool')
|
||||
})
|
||||
|
||||
it('should handle undefined server name', () => {
|
||||
expect(generateMcpToolFunctionName(undefined, 'search_issues')).toBe('searchIssues')
|
||||
})
|
||||
|
||||
it('should handle collision detection', () => {
|
||||
const existingNames = new Set<string>()
|
||||
const first = generateMcpToolFunctionName('github', 'search', existingNames)
|
||||
const second = generateMcpToolFunctionName('github', 'search', existingNames)
|
||||
expect(first).toBe('github_search')
|
||||
expect(second).toBe('github_search1')
|
||||
})
|
||||
})
|
||||
|
||||
describe('buildFunctionCallToolName', () => {
|
||||
describe('basic format', () => {
|
||||
it('should return format mcp__{server}__{tool} in camelCase', () => {
|
||||
const result = buildFunctionCallToolName('github', 'search_issues')
|
||||
expect(result).toBe('mcp__github__searchIssues')
|
||||
})
|
||||
|
||||
it('should handle simple server and tool names', () => {
|
||||
expect(buildFunctionCallToolName('fetch', 'get_page')).toBe('mcp__fetch__getPage')
|
||||
expect(buildFunctionCallToolName('database', 'query')).toBe('mcp__database__query')
|
||||
})
|
||||
})
|
||||
|
||||
describe('valid JavaScript identifier', () => {
|
||||
it('should always start with mcp__ prefix (valid JS identifier start)', () => {
|
||||
const result = buildFunctionCallToolName('123server', '456tool')
|
||||
expect(result).toMatch(/^mcp__/)
|
||||
})
|
||||
|
||||
it('should handle hyphenated names with camelCase', () => {
|
||||
const result = buildFunctionCallToolName('my-server', 'my-tool')
|
||||
expect(result).toBe('mcp__myServer__myTool')
|
||||
})
|
||||
|
||||
it('should be a valid JavaScript identifier', () => {
|
||||
const testCases = [
|
||||
['github', 'create_issue'],
|
||||
['my-server', 'fetch-data'],
|
||||
['test@server', 'tool#name'],
|
||||
['server.name', 'tool.action']
|
||||
]
|
||||
|
||||
for (const [server, tool] of testCases) {
|
||||
const result = buildFunctionCallToolName(server, tool)
|
||||
expect(result).toMatch(/^[a-zA-Z_][a-zA-Z0-9_]*$/)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
describe('character sanitization', () => {
|
||||
it('should convert special characters to camelCase boundaries', () => {
|
||||
expect(buildFunctionCallToolName('my-server', 'my-tool-name')).toBe('mcp__myServer__myToolName')
|
||||
expect(buildFunctionCallToolName('test@server!', 'tool#name$')).toBe('mcp__testServer__toolName')
|
||||
expect(buildFunctionCallToolName('server.name', 'tool.action')).toBe('mcp__serverName__toolAction')
|
||||
})
|
||||
|
||||
it('should handle spaces', () => {
|
||||
const result = buildFunctionCallToolName('my server', 'my tool')
|
||||
expect(result).toBe('mcp__myServer__myTool')
|
||||
})
|
||||
})
|
||||
|
||||
describe('length constraints', () => {
|
||||
it('should not exceed 63 characters', () => {
|
||||
const longServerName = 'a'.repeat(50)
|
||||
const longToolName = 'b'.repeat(50)
|
||||
const result = buildFunctionCallToolName(longServerName, longToolName)
|
||||
expect(result.length).toBeLessThanOrEqual(63)
|
||||
})
|
||||
|
||||
it('should not end with underscores after truncation', () => {
|
||||
const longServerName = 'a'.repeat(30)
|
||||
const longToolName = 'b'.repeat(30)
|
||||
const result = buildFunctionCallToolName(longServerName, longToolName)
|
||||
expect(result).not.toMatch(/_+$/)
|
||||
expect(result.length).toBeLessThanOrEqual(63)
|
||||
})
|
||||
})
|
||||
|
||||
describe('edge cases', () => {
|
||||
it('should handle empty server name', () => {
|
||||
const result = buildFunctionCallToolName('', 'tool')
|
||||
expect(result).toBe('mcp__tool')
|
||||
})
|
||||
|
||||
it('should handle empty tool name', () => {
|
||||
const result = buildFunctionCallToolName('server', '')
|
||||
expect(result).toBe('mcp__server__')
|
||||
})
|
||||
|
||||
it('should trim whitespace from names', () => {
|
||||
const result = buildFunctionCallToolName(' server ', ' tool ')
|
||||
expect(result).toBe('mcp__server__tool')
|
||||
})
|
||||
|
||||
it('should handle mixed case by normalizing to lowercase first', () => {
|
||||
const result = buildFunctionCallToolName('MyServer', 'MyTool')
|
||||
expect(result).toBe('mcp__myserver__mytool')
|
||||
})
|
||||
|
||||
it('should handle uppercase snake case', () => {
|
||||
const result = buildFunctionCallToolName('MY_SERVER', 'SEARCH_ISSUES')
|
||||
expect(result).toBe('mcp__myServer__searchIssues')
|
||||
})
|
||||
})
|
||||
|
||||
describe('deterministic output', () => {
|
||||
it('should produce consistent results for same input', () => {
|
||||
const result1 = buildFunctionCallToolName('github', 'search_repos')
|
||||
const result2 = buildFunctionCallToolName('github', 'search_repos')
|
||||
expect(result1).toBe(result2)
|
||||
})
|
||||
|
||||
it('should produce different results for different inputs', () => {
|
||||
const result1 = buildFunctionCallToolName('server1', 'tool')
|
||||
const result2 = buildFunctionCallToolName('server2', 'tool')
|
||||
expect(result1).not.toBe(result2)
|
||||
})
|
||||
})
|
||||
|
||||
describe('real-world scenarios', () => {
|
||||
it('should handle GitHub MCP server', () => {
|
||||
expect(buildFunctionCallToolName('github', 'create_issue')).toBe('mcp__github__createIssue')
|
||||
expect(buildFunctionCallToolName('github', 'search_repositories')).toBe('mcp__github__searchRepositories')
|
||||
})
|
||||
|
||||
it('should handle filesystem MCP server', () => {
|
||||
expect(buildFunctionCallToolName('filesystem', 'read_file')).toBe('mcp__filesystem__readFile')
|
||||
expect(buildFunctionCallToolName('filesystem', 'write_file')).toBe('mcp__filesystem__writeFile')
|
||||
})
|
||||
|
||||
it('should handle hyphenated server names (common in npm packages)', () => {
|
||||
expect(buildFunctionCallToolName('cherry-fetch', 'get_page')).toBe('mcp__cherryFetch__getPage')
|
||||
expect(buildFunctionCallToolName('mcp-server-github', 'search')).toBe('mcp__mcpServerGithub__search')
|
||||
})
|
||||
|
||||
it('should handle scoped npm package style names', () => {
|
||||
const result = buildFunctionCallToolName('@anthropic/mcp-server', 'chat')
|
||||
expect(result).toBe('mcp__AnthropicMcpServer__chat')
|
||||
})
|
||||
})
|
||||
})
|
||||
@ -1,6 +1,10 @@
|
||||
import { loggerService } from '@logger'
|
||||
import { MESSAGE_STREAM_TIMEOUT_MS } from '@main/apiServer/config/timeouts'
|
||||
import { createStreamAbortController, STREAM_TIMEOUT_REASON } from '@main/apiServer/utils/createStreamAbortController'
|
||||
import {
|
||||
createStreamAbortController,
|
||||
STREAM_TIMEOUT_REASON,
|
||||
type StreamAbortController
|
||||
} from '@main/apiServer/utils/createStreamAbortController'
|
||||
import { agentService, sessionMessageService, sessionService } from '@main/services/agents'
|
||||
import type { Request, Response } from 'express'
|
||||
|
||||
@ -26,7 +30,7 @@ const verifyAgentAndSession = async (agentId: string, sessionId: string) => {
|
||||
}
|
||||
|
||||
export const createMessage = async (req: Request, res: Response): Promise<void> => {
|
||||
let clearAbortTimeout: (() => void) | undefined
|
||||
let streamController: StreamAbortController | undefined
|
||||
|
||||
try {
|
||||
const { agentId, sessionId } = req.params
|
||||
@ -45,14 +49,10 @@ export const createMessage = async (req: Request, res: Response): Promise<void>
|
||||
res.setHeader('Access-Control-Allow-Origin', '*')
|
||||
res.setHeader('Access-Control-Allow-Headers', 'Cache-Control')
|
||||
|
||||
const {
|
||||
abortController,
|
||||
registerAbortHandler,
|
||||
clearAbortTimeout: helperClearAbortTimeout
|
||||
} = createStreamAbortController({
|
||||
streamController = createStreamAbortController({
|
||||
timeoutMs: MESSAGE_STREAM_TIMEOUT_MS
|
||||
})
|
||||
clearAbortTimeout = helperClearAbortTimeout
|
||||
const { abortController, registerAbortHandler, dispose } = streamController
|
||||
const { stream, completion } = await sessionMessageService.createSessionMessage(
|
||||
session,
|
||||
messageData,
|
||||
@ -64,8 +64,8 @@ export const createMessage = async (req: Request, res: Response): Promise<void>
|
||||
let responseEnded = false
|
||||
let streamFinished = false
|
||||
|
||||
const cleanupAbortTimeout = () => {
|
||||
clearAbortTimeout?.()
|
||||
const cleanup = () => {
|
||||
dispose()
|
||||
}
|
||||
|
||||
const finalizeResponse = () => {
|
||||
@ -78,7 +78,7 @@ export const createMessage = async (req: Request, res: Response): Promise<void>
|
||||
}
|
||||
|
||||
responseEnded = true
|
||||
cleanupAbortTimeout()
|
||||
cleanup()
|
||||
try {
|
||||
// res.write('data: {"type":"finish"}\n\n')
|
||||
res.write('data: [DONE]\n\n')
|
||||
@ -108,7 +108,7 @@ export const createMessage = async (req: Request, res: Response): Promise<void>
|
||||
* - Mark the response as ended to prevent further writes
|
||||
*/
|
||||
registerAbortHandler((abortReason) => {
|
||||
cleanupAbortTimeout()
|
||||
cleanup()
|
||||
|
||||
if (responseEnded) return
|
||||
|
||||
@ -189,7 +189,7 @@ export const createMessage = async (req: Request, res: Response): Promise<void>
|
||||
logger.error('Error writing stream error to SSE', { error: writeError })
|
||||
}
|
||||
responseEnded = true
|
||||
cleanupAbortTimeout()
|
||||
cleanup()
|
||||
res.end()
|
||||
}
|
||||
}
|
||||
@ -221,14 +221,14 @@ export const createMessage = async (req: Request, res: Response): Promise<void>
|
||||
logger.error('Error writing completion error to SSE stream', { error: writeError })
|
||||
}
|
||||
responseEnded = true
|
||||
cleanupAbortTimeout()
|
||||
cleanup()
|
||||
res.end()
|
||||
})
|
||||
// Clear timeout when response ends
|
||||
res.on('close', cleanupAbortTimeout)
|
||||
res.on('finish', cleanupAbortTimeout)
|
||||
res.on('close', cleanup)
|
||||
res.on('finish', cleanup)
|
||||
} catch (error: any) {
|
||||
clearAbortTimeout?.()
|
||||
streamController?.dispose()
|
||||
logger.error('Error in streaming message handler', {
|
||||
error,
|
||||
agentId: req.params.agentId,
|
||||
|
||||
@ -4,6 +4,7 @@ export interface StreamAbortController {
|
||||
abortController: AbortController
|
||||
registerAbortHandler: (handler: StreamAbortHandler) => void
|
||||
clearAbortTimeout: () => void
|
||||
dispose: () => void
|
||||
}
|
||||
|
||||
export const STREAM_TIMEOUT_REASON = 'stream timeout'
|
||||
@ -40,6 +41,15 @@ export const createStreamAbortController = (options: CreateStreamAbortController
|
||||
|
||||
signal.addEventListener('abort', handleAbort, { once: true })
|
||||
|
||||
let disposed = false
|
||||
|
||||
const dispose = () => {
|
||||
if (disposed) return
|
||||
disposed = true
|
||||
clearAbortTimeout()
|
||||
signal.removeEventListener('abort', handleAbort)
|
||||
}
|
||||
|
||||
const registerAbortHandler = (handler: StreamAbortHandler) => {
|
||||
abortHandler = handler
|
||||
|
||||
@ -59,6 +69,7 @@ export const createStreamAbortController = (options: CreateStreamAbortController
|
||||
return {
|
||||
abortController,
|
||||
registerAbortHandler,
|
||||
clearAbortTimeout
|
||||
clearAbortTimeout,
|
||||
dispose
|
||||
}
|
||||
}
|
||||
|
||||
@ -37,7 +37,7 @@ import { versionService } from './services/VersionService'
|
||||
import { windowService } from './services/WindowService'
|
||||
import { initWebviewHotkeys } from './services/WebviewService'
|
||||
import { runAsyncFunction } from './utils'
|
||||
import { ovmsManager } from './services/OvmsManager'
|
||||
import { isOvmsSupported } from './services/OvmsManager'
|
||||
|
||||
const logger = loggerService.withContext('MainEntry')
|
||||
|
||||
@ -75,6 +75,15 @@ if (isLinux && process.env.XDG_SESSION_TYPE === 'wayland') {
|
||||
app.commandLine.appendSwitch('enable-features', 'GlobalShortcutsPortal')
|
||||
}
|
||||
|
||||
/**
|
||||
* Set window class and name for X11
|
||||
* This ensures the system tray and window manager identify the app correctly
|
||||
*/
|
||||
if (isLinux) {
|
||||
app.commandLine.appendSwitch('class', 'cherry-studio')
|
||||
app.commandLine.appendSwitch('name', 'cherry-studio')
|
||||
}
|
||||
|
||||
// DocumentPolicyIncludeJSCallStacksInCrashReports: Enable features for unresponsive renderer js call stacks
|
||||
// EarlyEstablishGpuChannel,EstablishGpuChannelAsync: Enable features for early establish gpu channel
|
||||
// speed up the startup time
|
||||
@ -158,7 +167,7 @@ if (!app.requestSingleInstanceLock()) {
|
||||
|
||||
registerShortcuts(mainWindow)
|
||||
|
||||
registerIpc(mainWindow, app)
|
||||
await registerIpc(mainWindow, app)
|
||||
localTransferService.startDiscovery({ resetList: true })
|
||||
|
||||
replaceDevtoolsFont(mainWindow)
|
||||
@ -248,7 +257,14 @@ if (!app.requestSingleInstanceLock()) {
|
||||
|
||||
app.on('will-quit', async () => {
|
||||
// 简单的资源清理,不阻塞退出流程
|
||||
await ovmsManager.stopOvms()
|
||||
if (isOvmsSupported) {
|
||||
const { ovmsManager } = await import('./services/OvmsManager')
|
||||
if (ovmsManager) {
|
||||
await ovmsManager.stopOvms()
|
||||
} else {
|
||||
logger.warn('Unexpected behavior: undefined ovmsManager, but OVMS should be supported.')
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
await mcpService.cleanup()
|
||||
|
||||
@ -59,7 +59,7 @@ import NotificationService from './services/NotificationService'
|
||||
import * as NutstoreService from './services/NutstoreService'
|
||||
import ObsidianVaultService from './services/ObsidianVaultService'
|
||||
import { ocrService } from './services/ocr/OcrService'
|
||||
import { ovmsManager } from './services/OvmsManager'
|
||||
import { isOvmsSupported } from './services/OvmsManager'
|
||||
import powerMonitorService from './services/PowerMonitorService'
|
||||
import { proxyManager } from './services/ProxyManager'
|
||||
import { pythonService } from './services/PythonService'
|
||||
@ -97,6 +97,7 @@ import {
|
||||
untildify
|
||||
} from './utils/file'
|
||||
import { updateAppDataConfig } from './utils/init'
|
||||
import { getCpuName, getDeviceType, getHostname } from './utils/system'
|
||||
import { compress, decompress } from './utils/zip'
|
||||
|
||||
const logger = loggerService.withContext('IPC')
|
||||
@ -120,7 +121,7 @@ function extractPluginError(error: unknown): PluginError | null {
|
||||
return null
|
||||
}
|
||||
|
||||
export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
||||
export async function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
||||
const appUpdater = new AppUpdater()
|
||||
const notificationService = new NotificationService()
|
||||
|
||||
@ -498,9 +499,9 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
||||
ipcMain.handle(IpcChannel.Zip_Decompress, (_, text: Buffer) => decompress(text))
|
||||
|
||||
// system
|
||||
ipcMain.handle(IpcChannel.System_GetDeviceType, () => (isMac ? 'mac' : isWin ? 'windows' : 'linux'))
|
||||
ipcMain.handle(IpcChannel.System_GetHostname, () => require('os').hostname())
|
||||
ipcMain.handle(IpcChannel.System_GetCpuName, () => require('os').cpus()[0].model)
|
||||
ipcMain.handle(IpcChannel.System_GetDeviceType, getDeviceType)
|
||||
ipcMain.handle(IpcChannel.System_GetHostname, getHostname)
|
||||
ipcMain.handle(IpcChannel.System_GetCpuName, getCpuName)
|
||||
ipcMain.handle(IpcChannel.System_CheckGitBash, () => {
|
||||
if (!isWin) {
|
||||
return true // Non-Windows systems don't need Git Bash
|
||||
@ -974,15 +975,36 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
||||
ipcMain.handle(IpcChannel.OCR_ListProviders, () => ocrService.listProviderIds())
|
||||
|
||||
// OVMS
|
||||
ipcMain.handle(IpcChannel.Ovms_AddModel, (_, modelName: string, modelId: string, modelSource: string, task: string) =>
|
||||
ovmsManager.addModel(modelName, modelId, modelSource, task)
|
||||
)
|
||||
ipcMain.handle(IpcChannel.Ovms_StopAddModel, () => ovmsManager.stopAddModel())
|
||||
ipcMain.handle(IpcChannel.Ovms_GetModels, () => ovmsManager.getModels())
|
||||
ipcMain.handle(IpcChannel.Ovms_IsRunning, () => ovmsManager.initializeOvms())
|
||||
ipcMain.handle(IpcChannel.Ovms_GetStatus, () => ovmsManager.getOvmsStatus())
|
||||
ipcMain.handle(IpcChannel.Ovms_RunOVMS, () => ovmsManager.runOvms())
|
||||
ipcMain.handle(IpcChannel.Ovms_StopOVMS, () => ovmsManager.stopOvms())
|
||||
ipcMain.handle(IpcChannel.Ovms_IsSupported, () => isOvmsSupported)
|
||||
if (isOvmsSupported) {
|
||||
const { ovmsManager } = await import('./services/OvmsManager')
|
||||
if (ovmsManager) {
|
||||
ipcMain.handle(
|
||||
IpcChannel.Ovms_AddModel,
|
||||
(_, modelName: string, modelId: string, modelSource: string, task: string) =>
|
||||
ovmsManager.addModel(modelName, modelId, modelSource, task)
|
||||
)
|
||||
ipcMain.handle(IpcChannel.Ovms_StopAddModel, () => ovmsManager.stopAddModel())
|
||||
ipcMain.handle(IpcChannel.Ovms_GetModels, () => ovmsManager.getModels())
|
||||
ipcMain.handle(IpcChannel.Ovms_IsRunning, () => ovmsManager.initializeOvms())
|
||||
ipcMain.handle(IpcChannel.Ovms_GetStatus, () => ovmsManager.getOvmsStatus())
|
||||
ipcMain.handle(IpcChannel.Ovms_RunOVMS, () => ovmsManager.runOvms())
|
||||
ipcMain.handle(IpcChannel.Ovms_StopOVMS, () => ovmsManager.stopOvms())
|
||||
} else {
|
||||
logger.error('Unexpected behavior: undefined ovmsManager, but OVMS should be supported.')
|
||||
}
|
||||
} else {
|
||||
const fallback = () => {
|
||||
throw new Error('OVMS is only supported on Windows with intel CPU.')
|
||||
}
|
||||
ipcMain.handle(IpcChannel.Ovms_AddModel, fallback)
|
||||
ipcMain.handle(IpcChannel.Ovms_StopAddModel, fallback)
|
||||
ipcMain.handle(IpcChannel.Ovms_GetModels, fallback)
|
||||
ipcMain.handle(IpcChannel.Ovms_IsRunning, fallback)
|
||||
ipcMain.handle(IpcChannel.Ovms_GetStatus, fallback)
|
||||
ipcMain.handle(IpcChannel.Ovms_RunOVMS, fallback)
|
||||
ipcMain.handle(IpcChannel.Ovms_StopOVMS, fallback)
|
||||
}
|
||||
|
||||
// CherryAI
|
||||
ipcMain.handle(IpcChannel.Cherryai_GetSignature, (_, params) => generateSignature(params))
|
||||
@ -1039,12 +1061,18 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
||||
} catch (error) {
|
||||
const pluginError = extractPluginError(error)
|
||||
if (pluginError) {
|
||||
logger.error('Failed to list installed plugins', { agentId, error: pluginError })
|
||||
logger.error('Failed to list installed plugins', {
|
||||
agentId,
|
||||
error: pluginError
|
||||
})
|
||||
return { success: false, error: pluginError }
|
||||
}
|
||||
|
||||
const err = normalizeError(error)
|
||||
logger.error('Failed to list installed plugins', { agentId, error: err })
|
||||
logger.error('Failed to list installed plugins', {
|
||||
agentId,
|
||||
error: err
|
||||
})
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
|
||||
@ -1,5 +1,14 @@
|
||||
import { describe, expect, it, vi } from 'vitest'
|
||||
|
||||
vi.mock('node:fs', () => ({
|
||||
default: {
|
||||
existsSync: vi.fn(() => false),
|
||||
mkdirSync: vi.fn()
|
||||
},
|
||||
existsSync: vi.fn(() => false),
|
||||
mkdirSync: vi.fn()
|
||||
}))
|
||||
|
||||
vi.mock('electron', () => {
|
||||
const sendCommand = vi.fn(async (command: string, params?: { expression?: string }) => {
|
||||
if (command === 'Runtime.evaluate') {
|
||||
@ -21,24 +30,31 @@ vi.mock('electron', () => {
|
||||
sendCommand
|
||||
}
|
||||
|
||||
const webContents = {
|
||||
const createWebContents = () => ({
|
||||
debugger: debuggerObj,
|
||||
setUserAgent: vi.fn(),
|
||||
getURL: vi.fn(() => 'https://example.com/'),
|
||||
getTitle: vi.fn(async () => 'Example Title'),
|
||||
loadURL: vi.fn(async () => {}),
|
||||
once: vi.fn(),
|
||||
removeListener: vi.fn(),
|
||||
on: vi.fn()
|
||||
}
|
||||
|
||||
const loadURL = vi.fn(async () => {})
|
||||
on: vi.fn(),
|
||||
isDestroyed: vi.fn(() => false),
|
||||
canGoBack: vi.fn(() => false),
|
||||
canGoForward: vi.fn(() => false),
|
||||
goBack: vi.fn(),
|
||||
goForward: vi.fn(),
|
||||
reload: vi.fn(),
|
||||
executeJavaScript: vi.fn(async () => null),
|
||||
setWindowOpenHandler: vi.fn()
|
||||
})
|
||||
|
||||
const windows: any[] = []
|
||||
const views: any[] = []
|
||||
|
||||
class MockBrowserWindow {
|
||||
private destroyed = false
|
||||
public webContents = webContents
|
||||
public loadURL = loadURL
|
||||
public webContents = createWebContents()
|
||||
public isDestroyed = vi.fn(() => this.destroyed)
|
||||
public close = vi.fn(() => {
|
||||
this.destroyed = true
|
||||
@ -47,31 +63,58 @@ vi.mock('electron', () => {
|
||||
this.destroyed = true
|
||||
})
|
||||
public on = vi.fn()
|
||||
public setBrowserView = vi.fn()
|
||||
public addBrowserView = vi.fn()
|
||||
public removeBrowserView = vi.fn()
|
||||
public getContentSize = vi.fn(() => [1200, 800])
|
||||
public show = vi.fn()
|
||||
|
||||
constructor() {
|
||||
windows.push(this)
|
||||
}
|
||||
}
|
||||
|
||||
class MockBrowserView {
|
||||
public webContents = createWebContents()
|
||||
public setBounds = vi.fn()
|
||||
public setAutoResize = vi.fn()
|
||||
public destroy = vi.fn()
|
||||
|
||||
constructor() {
|
||||
views.push(this)
|
||||
}
|
||||
}
|
||||
|
||||
const app = {
|
||||
isReady: vi.fn(() => true),
|
||||
whenReady: vi.fn(async () => {}),
|
||||
on: vi.fn()
|
||||
on: vi.fn(),
|
||||
getPath: vi.fn((key: string) => {
|
||||
if (key === 'userData') return '/mock/userData'
|
||||
if (key === 'temp') return '/tmp'
|
||||
return '/mock/unknown'
|
||||
}),
|
||||
getAppPath: vi.fn(() => '/mock/app'),
|
||||
setPath: vi.fn()
|
||||
}
|
||||
|
||||
const nativeTheme = {
|
||||
on: vi.fn(),
|
||||
shouldUseDarkColors: false
|
||||
}
|
||||
|
||||
return {
|
||||
BrowserWindow: MockBrowserWindow as any,
|
||||
BrowserView: MockBrowserView as any,
|
||||
app,
|
||||
nativeTheme,
|
||||
__mockDebugger: debuggerObj,
|
||||
__mockSendCommand: sendCommand,
|
||||
__mockLoadURL: loadURL,
|
||||
__mockWindows: windows
|
||||
__mockWindows: windows,
|
||||
__mockViews: views
|
||||
}
|
||||
})
|
||||
|
||||
import * as electron from 'electron'
|
||||
const { __mockWindows } = electron as typeof electron & { __mockWindows: any[] }
|
||||
|
||||
import { CdpBrowserController } from '../browser'
|
||||
|
||||
describe('CdpBrowserController', () => {
|
||||
@ -81,54 +124,249 @@ describe('CdpBrowserController', () => {
|
||||
expect(result).toBe('ok')
|
||||
})
|
||||
|
||||
it('opens a URL (hidden) and returns current page info', async () => {
|
||||
it('opens a URL in normal mode and returns current page info', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
const result = await controller.open('https://foo.bar/', 5000, false)
|
||||
expect(result.currentUrl).toBe('https://example.com/')
|
||||
expect(result.title).toBe('Example Title')
|
||||
})
|
||||
|
||||
it('opens a URL (visible) when show=true', async () => {
|
||||
it('opens a URL in private mode', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
const result = await controller.open('https://foo.bar/', 5000, true, 'session-a')
|
||||
const result = await controller.open('https://foo.bar/', 5000, true)
|
||||
expect(result.currentUrl).toBe('https://example.com/')
|
||||
expect(result.title).toBe('Example Title')
|
||||
})
|
||||
|
||||
it('reuses session for execute and supports multiline', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
await controller.open('https://foo.bar/', 5000, false, 'session-b')
|
||||
const result = await controller.execute('const a=1; const b=2; a+b;', 5000, 'session-b')
|
||||
await controller.open('https://foo.bar/', 5000, false)
|
||||
const result = await controller.execute('const a=1; const b=2; a+b;', 5000, false)
|
||||
expect(result).toBe('ok')
|
||||
})
|
||||
|
||||
it('evicts least recently used session when exceeding maxSessions', async () => {
|
||||
const controller = new CdpBrowserController({ maxSessions: 2, idleTimeoutMs: 1000 * 60 })
|
||||
await controller.open('https://foo.bar/', 5000, false, 's1')
|
||||
await controller.open('https://foo.bar/', 5000, false, 's2')
|
||||
await controller.open('https://foo.bar/', 5000, false, 's3')
|
||||
const destroyedCount = __mockWindows.filter(
|
||||
(w: any) => w.destroy.mock.calls.length > 0 || w.close.mock.calls.length > 0
|
||||
).length
|
||||
expect(destroyedCount).toBeGreaterThanOrEqual(1)
|
||||
it('normal and private modes are isolated', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
await controller.open('https://foo.bar/', 5000, false)
|
||||
await controller.open('https://foo.bar/', 5000, true)
|
||||
const normalResult = await controller.execute('1+1', 5000, false)
|
||||
const privateResult = await controller.execute('1+1', 5000, true)
|
||||
expect(normalResult).toBe('ok')
|
||||
expect(privateResult).toBe('ok')
|
||||
})
|
||||
|
||||
it('fetches URL and returns html format', async () => {
|
||||
it('fetches URL and returns html format with tabId', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
const result = await controller.fetch('https://example.com/', 'html')
|
||||
expect(result).toBe('<html><body><h1>Test</h1><p>Content</p></body></html>')
|
||||
expect(result.tabId).toBeDefined()
|
||||
expect(result.content).toBe('<html><body><h1>Test</h1><p>Content</p></body></html>')
|
||||
})
|
||||
|
||||
it('fetches URL and returns txt format', async () => {
|
||||
it('fetches URL and returns txt format with tabId', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
const result = await controller.fetch('https://example.com/', 'txt')
|
||||
expect(result).toBe('Test\nContent')
|
||||
expect(result.tabId).toBeDefined()
|
||||
expect(result.content).toBe('Test\nContent')
|
||||
})
|
||||
|
||||
it('fetches URL and returns markdown format (default)', async () => {
|
||||
it('fetches URL and returns markdown format (default) with tabId', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
const result = await controller.fetch('https://example.com/')
|
||||
expect(typeof result).toBe('string')
|
||||
expect(result).toContain('Test')
|
||||
expect(result.tabId).toBeDefined()
|
||||
expect(typeof result.content).toBe('string')
|
||||
expect(result.content).toContain('Test')
|
||||
})
|
||||
|
||||
it('fetches URL in private mode with tabId', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
const result = await controller.fetch('https://example.com/', 'html', 10000, true)
|
||||
expect(result.tabId).toBeDefined()
|
||||
expect(result.content).toBe('<html><body><h1>Test</h1><p>Content</p></body></html>')
|
||||
})
|
||||
|
||||
describe('Multi-tab support', () => {
|
||||
it('creates new tab with newTab parameter', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
const result1 = await controller.open('https://site1.com/', 5000, false, true)
|
||||
const result2 = await controller.open('https://site2.com/', 5000, false, true)
|
||||
|
||||
expect(result1.tabId).toBeDefined()
|
||||
expect(result2.tabId).toBeDefined()
|
||||
expect(result1.tabId).not.toBe(result2.tabId)
|
||||
})
|
||||
|
||||
it('reuses same tab without newTab parameter', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
const result1 = await controller.open('https://site1.com/', 5000, false)
|
||||
const result2 = await controller.open('https://site2.com/', 5000, false)
|
||||
|
||||
expect(result1.tabId).toBe(result2.tabId)
|
||||
})
|
||||
|
||||
it('fetches in new tab with newTab parameter', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
await controller.open('https://example.com/', 5000, false)
|
||||
const tabs = await controller.listTabs(false)
|
||||
const initialTabCount = tabs.length
|
||||
|
||||
await controller.fetch('https://other.com/', 'html', 10000, false, true)
|
||||
const tabsAfter = await controller.listTabs(false)
|
||||
|
||||
expect(tabsAfter.length).toBe(initialTabCount + 1)
|
||||
})
|
||||
})
|
||||
|
||||
describe('Tab management', () => {
|
||||
it('lists tabs in a window', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
await controller.open('https://example.com/', 5000, false)
|
||||
|
||||
const tabs = await controller.listTabs(false)
|
||||
expect(tabs.length).toBeGreaterThan(0)
|
||||
expect(tabs[0].tabId).toBeDefined()
|
||||
})
|
||||
|
||||
it('lists tabs separately for normal and private modes', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
await controller.open('https://example.com/', 5000, false)
|
||||
await controller.open('https://example.com/', 5000, true)
|
||||
|
||||
const normalTabs = await controller.listTabs(false)
|
||||
const privateTabs = await controller.listTabs(true)
|
||||
|
||||
expect(normalTabs.length).toBe(1)
|
||||
expect(privateTabs.length).toBe(1)
|
||||
expect(normalTabs[0].tabId).not.toBe(privateTabs[0].tabId)
|
||||
})
|
||||
|
||||
it('closes specific tab', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
const result1 = await controller.open('https://site1.com/', 5000, false, true)
|
||||
await controller.open('https://site2.com/', 5000, false, true)
|
||||
|
||||
const tabsBefore = await controller.listTabs(false)
|
||||
expect(tabsBefore.length).toBe(2)
|
||||
|
||||
await controller.closeTab(false, result1.tabId)
|
||||
|
||||
const tabsAfter = await controller.listTabs(false)
|
||||
expect(tabsAfter.length).toBe(1)
|
||||
expect(tabsAfter.find((t) => t.tabId === result1.tabId)).toBeUndefined()
|
||||
})
|
||||
|
||||
it('switches active tab', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
const result1 = await controller.open('https://site1.com/', 5000, false, true)
|
||||
const result2 = await controller.open('https://site2.com/', 5000, false, true)
|
||||
|
||||
await controller.switchTab(false, result1.tabId)
|
||||
await controller.switchTab(false, result2.tabId)
|
||||
})
|
||||
|
||||
it('throws error when switching to non-existent tab', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
await controller.open('https://example.com/', 5000, false)
|
||||
|
||||
await expect(controller.switchTab(false, 'non-existent-tab')).rejects.toThrow('Tab non-existent-tab not found')
|
||||
})
|
||||
})
|
||||
|
||||
describe('Reset behavior', () => {
|
||||
it('resets specific tab only', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
const result1 = await controller.open('https://site1.com/', 5000, false, true)
|
||||
await controller.open('https://site2.com/', 5000, false, true)
|
||||
|
||||
await controller.reset(false, result1.tabId)
|
||||
|
||||
const tabs = await controller.listTabs(false)
|
||||
expect(tabs.length).toBe(1)
|
||||
})
|
||||
|
||||
it('resets specific window only', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
await controller.open('https://example.com/', 5000, false)
|
||||
await controller.open('https://example.com/', 5000, true)
|
||||
|
||||
await controller.reset(false)
|
||||
|
||||
const normalTabs = await controller.listTabs(false)
|
||||
const privateTabs = await controller.listTabs(true)
|
||||
|
||||
expect(normalTabs.length).toBe(0)
|
||||
expect(privateTabs.length).toBe(1)
|
||||
})
|
||||
|
||||
it('resets all windows', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
await controller.open('https://example.com/', 5000, false)
|
||||
await controller.open('https://example.com/', 5000, true)
|
||||
|
||||
await controller.reset()
|
||||
|
||||
const normalTabs = await controller.listTabs(false)
|
||||
const privateTabs = await controller.listTabs(true)
|
||||
|
||||
expect(normalTabs.length).toBe(0)
|
||||
expect(privateTabs.length).toBe(0)
|
||||
})
|
||||
})
|
||||
|
||||
describe('showWindow parameter', () => {
|
||||
it('passes showWindow parameter through open', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
const result = await controller.open('https://example.com/', 5000, false, false, true)
|
||||
expect(result.currentUrl).toBe('https://example.com/')
|
||||
expect(result.tabId).toBeDefined()
|
||||
})
|
||||
|
||||
it('passes showWindow parameter through fetch', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
const result = await controller.fetch('https://example.com/', 'html', 10000, false, false, true)
|
||||
expect(result.tabId).toBeDefined()
|
||||
expect(result.content).toBe('<html><body><h1>Test</h1><p>Content</p></body></html>')
|
||||
})
|
||||
|
||||
it('passes showWindow parameter through createTab', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
const { tabId, view } = await controller.createTab(false, true)
|
||||
expect(tabId).toBeDefined()
|
||||
expect(view).toBeDefined()
|
||||
})
|
||||
|
||||
it('shows existing window when showWindow=true on subsequent calls', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
// First call creates window
|
||||
await controller.open('https://example.com/', 5000, false, false, false)
|
||||
// Second call with showWindow=true should show existing window
|
||||
const result = await controller.open('https://example.com/', 5000, false, false, true)
|
||||
expect(result.currentUrl).toBe('https://example.com/')
|
||||
})
|
||||
})
|
||||
|
||||
describe('Window limits and eviction', () => {
|
||||
it('respects maxWindows limit', async () => {
|
||||
const controller = new CdpBrowserController({ maxWindows: 1 })
|
||||
await controller.open('https://example.com/', 5000, false)
|
||||
await controller.open('https://example.com/', 5000, true)
|
||||
|
||||
const normalTabs = await controller.listTabs(false)
|
||||
const privateTabs = await controller.listTabs(true)
|
||||
|
||||
expect(privateTabs.length).toBe(1)
|
||||
expect(normalTabs.length).toBe(0)
|
||||
})
|
||||
|
||||
it('cleans up idle windows on next access', async () => {
|
||||
const controller = new CdpBrowserController({ idleTimeoutMs: 1 })
|
||||
await controller.open('https://example.com/', 5000, false)
|
||||
|
||||
await new Promise((r) => setTimeout(r, 10))
|
||||
|
||||
await controller.open('https://example.com/', 5000, true)
|
||||
|
||||
const normalTabs = await controller.listTabs(false)
|
||||
expect(normalTabs.length).toBe(0)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
177
src/main/mcpServers/browser/README.md
Normal file
177
src/main/mcpServers/browser/README.md
Normal file
@ -0,0 +1,177 @@
|
||||
# Browser MCP Server
|
||||
|
||||
A Model Context Protocol (MCP) server for controlling browser windows via Chrome DevTools Protocol (CDP).
|
||||
|
||||
## Features
|
||||
|
||||
### ✨ User Data Persistence
|
||||
- **Normal mode (default)**: Cookies, localStorage, and sessionStorage persist across browser restarts
|
||||
- **Private mode**: Ephemeral browsing - no data persists (like incognito mode)
|
||||
|
||||
### 🔄 Window Management
|
||||
- Two browsing modes: normal (persistent) and private (ephemeral)
|
||||
- Lazy idle timeout cleanup (cleaned on next window access)
|
||||
- Maximum window limits to prevent resource exhaustion
|
||||
|
||||
> **Note**: Normal mode uses a global `persist:default` partition shared by all clients. This means login sessions and stored data are accessible to any code using the MCP server.
|
||||
|
||||
## Architecture
|
||||
|
||||
### How It Works
|
||||
```
|
||||
Normal Mode (BrowserWindow)
|
||||
├─ Persistent Storage (partition: persist:default) ← Global, shared across all clients
|
||||
└─ Tabs (BrowserView) ← created via newTab or automatically
|
||||
|
||||
Private Mode (BrowserWindow)
|
||||
├─ Ephemeral Storage (partition: private) ← No disk persistence
|
||||
└─ Tabs (BrowserView) ← created via newTab or automatically
|
||||
```
|
||||
|
||||
- **One Window Per Mode**: Normal and private modes each have their own window
|
||||
- **Multi-Tab Support**: Use `newTab: true` for parallel URL requests
|
||||
- **Storage Isolation**: Normal and private modes have completely separate storage
|
||||
|
||||
## Available Tools
|
||||
|
||||
### `open`
|
||||
Open a URL in a browser window. Optionally return page content.
|
||||
```json
|
||||
{
|
||||
"url": "https://example.com",
|
||||
"format": "markdown",
|
||||
"timeout": 10000,
|
||||
"privateMode": false,
|
||||
"newTab": false,
|
||||
"showWindow": false
|
||||
}
|
||||
```
|
||||
- `format`: If set (`html`, `txt`, `markdown`, `json`), returns page content in that format along with tabId. If not set, just opens the page and returns navigation info.
|
||||
- `newTab`: Set to `true` to open in a new tab (required for parallel requests)
|
||||
- `showWindow`: Set to `true` to display the browser window (useful for debugging)
|
||||
- Returns (without format): `{ currentUrl, title, tabId }`
|
||||
- Returns (with format): `{ tabId, content }` where content is in the specified format
|
||||
|
||||
### `execute`
|
||||
Execute JavaScript code in the page context.
|
||||
```json
|
||||
{
|
||||
"code": "document.title",
|
||||
"timeout": 5000,
|
||||
"privateMode": false,
|
||||
"tabId": "optional-tab-id"
|
||||
}
|
||||
```
|
||||
- `tabId`: Target a specific tab (from `open` response)
|
||||
|
||||
### `reset`
|
||||
Reset browser windows and tabs.
|
||||
```json
|
||||
{
|
||||
"privateMode": false,
|
||||
"tabId": "optional-tab-id"
|
||||
}
|
||||
```
|
||||
- Omit all parameters to close all windows
|
||||
- Set `privateMode` to close a specific window
|
||||
- Set both `privateMode` and `tabId` to close a specific tab only
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Basic Navigation
|
||||
```typescript
|
||||
// Open a URL in normal mode (data persists)
|
||||
await controller.open('https://example.com')
|
||||
```
|
||||
|
||||
### Fetch Page Content
|
||||
```typescript
|
||||
// Open URL and get content as markdown
|
||||
await open({ url: 'https://example.com', format: 'markdown' })
|
||||
|
||||
// Open URL and get raw HTML
|
||||
await open({ url: 'https://example.com', format: 'html' })
|
||||
```
|
||||
|
||||
### Multi-Tab / Parallel Requests
|
||||
```typescript
|
||||
// Open multiple URLs in parallel using newTab
|
||||
const [page1, page2] = await Promise.all([
|
||||
controller.open('https://site1.com', 10000, false, true), // newTab: true
|
||||
controller.open('https://site2.com', 10000, false, true) // newTab: true
|
||||
])
|
||||
|
||||
// Execute on specific tab
|
||||
await controller.execute('document.title', 5000, false, page1.tabId)
|
||||
|
||||
// Close specific tab when done
|
||||
await controller.reset(false, page1.tabId)
|
||||
```
|
||||
|
||||
### Private Browsing
|
||||
```typescript
|
||||
// Open a URL in private mode (no data persistence)
|
||||
await controller.open('https://example.com', 10000, true)
|
||||
|
||||
// Cookies and localStorage won't persist after reset
|
||||
```
|
||||
|
||||
### Data Persistence (Normal Mode)
|
||||
```typescript
|
||||
// Set data
|
||||
await controller.open('https://example.com', 10000, false)
|
||||
await controller.execute('localStorage.setItem("key", "value")', 5000, false)
|
||||
|
||||
// Close window
|
||||
await controller.reset(false)
|
||||
|
||||
// Reopen - data persists!
|
||||
await controller.open('https://example.com', 10000, false)
|
||||
const value = await controller.execute('localStorage.getItem("key")', 5000, false)
|
||||
// Returns: "value"
|
||||
```
|
||||
|
||||
### No Persistence (Private Mode)
|
||||
```typescript
|
||||
// Set data in private mode
|
||||
await controller.open('https://example.com', 10000, true)
|
||||
await controller.execute('localStorage.setItem("key", "value")', 5000, true)
|
||||
|
||||
// Close private window
|
||||
await controller.reset(true)
|
||||
|
||||
// Reopen - data is gone!
|
||||
await controller.open('https://example.com', 10000, true)
|
||||
const value = await controller.execute('localStorage.getItem("key")', 5000, true)
|
||||
// Returns: null
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
```typescript
|
||||
const controller = new CdpBrowserController({
|
||||
maxWindows: 5, // Maximum concurrent windows
|
||||
idleTimeoutMs: 5 * 60 * 1000 // 5 minutes idle timeout (lazy cleanup)
|
||||
})
|
||||
```
|
||||
|
||||
> **Note on Idle Timeout**: Idle windows are cleaned up lazily when the next window is created or accessed, not on a background timer.
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Use Normal Mode for Authentication**: When you need to stay logged in across sessions
|
||||
2. **Use Private Mode for Sensitive Operations**: When you don't want data to persist
|
||||
3. **Use `newTab: true` for Parallel Requests**: Avoid race conditions when fetching multiple URLs
|
||||
4. **Resource Cleanup**: Call `reset()` when done, or `reset(privateMode, tabId)` to close specific tabs
|
||||
5. **Error Handling**: All tool handlers return error responses on failure
|
||||
6. **Timeout Configuration**: Adjust timeouts based on page complexity
|
||||
|
||||
## Technical Details
|
||||
|
||||
- **CDP Version**: 1.3
|
||||
- **User Agent**: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:145.0) Gecko/20100101 Firefox/145.0
|
||||
- **Storage**:
|
||||
- Normal mode: `persist:default` (disk-persisted, global)
|
||||
- Private mode: `private` (memory only)
|
||||
- **Window Size**: 1200x800 (default)
|
||||
- **Visibility**: Windows hidden by default (use `showWindow: true` to display)
|
||||
3
src/main/mcpServers/browser/constants.ts
Normal file
3
src/main/mcpServers/browser/constants.ts
Normal file
@ -0,0 +1,3 @@
|
||||
export const TAB_BAR_HEIGHT = 92 // Height for Chrome-style tab bar (42px) + address bar (50px)
|
||||
export const SESSION_KEY_DEFAULT = 'default'
|
||||
export const SESSION_KEY_PRIVATE = 'private'
|
||||
File diff suppressed because it is too large
Load Diff
567
src/main/mcpServers/browser/tabbar-html.ts
Normal file
567
src/main/mcpServers/browser/tabbar-html.ts
Normal file
@ -0,0 +1,567 @@
|
||||
export const TAB_BAR_HTML = `<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<style>
|
||||
* { margin: 0; padding: 0; box-sizing: border-box; }
|
||||
html, body {
|
||||
height: 100%;
|
||||
overflow: hidden;
|
||||
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
|
||||
font-size: 12px;
|
||||
user-select: none;
|
||||
}
|
||||
|
||||
/* Light theme (default) */
|
||||
:root {
|
||||
--bg-tabrow: #dee1e6;
|
||||
--bg-toolbar: #fff;
|
||||
--bg-tab-hover: rgba(0,0,0,0.04);
|
||||
--bg-tab-active: #fff;
|
||||
--bg-url: #f1f3f4;
|
||||
--bg-url-focus: #fff;
|
||||
--bg-btn-hover: rgba(0,0,0,0.08);
|
||||
--bg-favicon: #9aa0a6;
|
||||
--color-text: #5f6368;
|
||||
--color-text-active: #202124;
|
||||
--color-separator: #c4c7cc;
|
||||
--shadow-url-focus: 0 1px 6px rgba(32,33,36,0.28);
|
||||
--window-close-hover: #e81123;
|
||||
}
|
||||
|
||||
/* Dark theme */
|
||||
body.theme-dark {
|
||||
--bg-tabrow: #202124;
|
||||
--bg-toolbar: #292a2d;
|
||||
--bg-tab-hover: rgba(255,255,255,0.06);
|
||||
--bg-tab-active: #292a2d;
|
||||
--bg-url: #35363a;
|
||||
--bg-url-focus: #202124;
|
||||
--bg-btn-hover: rgba(255,255,255,0.1);
|
||||
--bg-favicon: #5f6368;
|
||||
--color-text: #9aa0a6;
|
||||
--color-text-active: #e8eaed;
|
||||
--color-separator: #3c3d41;
|
||||
--shadow-url-focus: 0 1px 6px rgba(0,0,0,0.5);
|
||||
--window-close-hover: #e81123;
|
||||
}
|
||||
|
||||
body {
|
||||
background: var(--bg-tabrow);
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
position: relative;
|
||||
}
|
||||
body.platform-mac { --traffic-light-width: 70px; --window-controls-width: 0px; }
|
||||
body.platform-win, body.platform-linux { --traffic-light-width: 0px; --window-controls-width: 138px; }
|
||||
|
||||
/* Chrome-style tab row */
|
||||
#tab-row {
|
||||
display: flex;
|
||||
align-items: flex-end;
|
||||
padding: 8px 8px 0 8px;
|
||||
padding-left: calc(8px + var(--traffic-light-width, 0px));
|
||||
padding-right: calc(8px + var(--window-controls-width, 0px));
|
||||
height: 42px;
|
||||
flex-shrink: 0;
|
||||
-webkit-app-region: drag;
|
||||
background: var(--bg-tabrow);
|
||||
position: relative;
|
||||
z-index: 1;
|
||||
}
|
||||
|
||||
#tabs-container {
|
||||
display: flex;
|
||||
align-items: flex-end;
|
||||
height: 34px;
|
||||
flex: 1;
|
||||
min-width: 0;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
/* New tab button - inside tabs container, right after last tab */
|
||||
#new-tab-btn {
|
||||
width: 28px;
|
||||
height: 28px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
border-radius: 50%;
|
||||
cursor: pointer;
|
||||
margin-left: 4px;
|
||||
margin-bottom: 3px;
|
||||
-webkit-app-region: no-drag;
|
||||
flex-shrink: 0;
|
||||
}
|
||||
#new-tab-btn:hover { background: var(--bg-btn-hover); }
|
||||
#new-tab-btn svg { width: 18px; height: 18px; fill: var(--color-text); }
|
||||
|
||||
/* Chrome-style tabs - shrink instead of scroll */
|
||||
.tab {
|
||||
position: relative;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
height: 34px;
|
||||
min-width: 36px;
|
||||
max-width: 240px;
|
||||
flex: 1 1 240px;
|
||||
padding: 0 6px;
|
||||
background: transparent;
|
||||
cursor: pointer;
|
||||
-webkit-app-region: no-drag;
|
||||
border-radius: 8px 8px 0 0;
|
||||
transition: background 0.1s;
|
||||
}
|
||||
/* When tab is narrow, hide title, show favicon by default, show close on hover */
|
||||
.tab.narrow .tab-title { display: none; }
|
||||
.tab.narrow { justify-content: center; padding: 0; }
|
||||
.tab.narrow .tab-favicon { margin-right: 0; }
|
||||
.tab.narrow .tab-close { position: absolute; margin-left: 0; }
|
||||
/* On narrow tab hover, hide favicon and show close button */
|
||||
.tab.narrow:hover .tab-favicon { display: none; }
|
||||
.tab.narrow:hover .tab-close { opacity: 1; }
|
||||
/* Separator line using pseudo-element */
|
||||
.tab::after {
|
||||
content: '';
|
||||
position: absolute;
|
||||
right: 0;
|
||||
top: 8px;
|
||||
bottom: 8px;
|
||||
width: 1px;
|
||||
background: var(--color-separator);
|
||||
pointer-events: none;
|
||||
}
|
||||
/* Hide separator for last tab */
|
||||
.tab:last-of-type::after { display: none; }
|
||||
/* Hide separator when tab is hovered (right side) */
|
||||
.tab:hover::after { display: none; }
|
||||
/* Hide separator on tab before hovered tab (left side of hovered) - managed by JS .before-hover class */
|
||||
.tab.before-hover::after { display: none; }
|
||||
/* Hide separator for active tab and its neighbors */
|
||||
.tab.active::after { display: none; }
|
||||
/* Hide separator on tab before active (left side of active) - managed by JS .before-active class */
|
||||
.tab.before-active::after { display: none; }
|
||||
|
||||
.tab:hover { background: var(--bg-tab-hover); }
|
||||
.tab.active {
|
||||
background: var(--bg-tab-active);
|
||||
z-index: 1;
|
||||
}
|
||||
|
||||
/* Tab favicon placeholder */
|
||||
.tab-favicon {
|
||||
width: 16px;
|
||||
height: 16px;
|
||||
margin-right: 8px;
|
||||
border-radius: 2px;
|
||||
background: var(--bg-favicon);
|
||||
flex-shrink: 0;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
}
|
||||
.tab-favicon svg { width: 12px; height: 12px; fill: #fff; }
|
||||
body.theme-dark .tab-favicon svg { fill: #9aa0a6; }
|
||||
|
||||
.tab-title {
|
||||
flex: 1;
|
||||
min-width: 0;
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
white-space: nowrap;
|
||||
color: var(--color-text);
|
||||
font-size: 12px;
|
||||
font-weight: 400;
|
||||
}
|
||||
.tab.active .tab-title { color: var(--color-text-active); }
|
||||
|
||||
.tab-close {
|
||||
width: 24px;
|
||||
height: 24px;
|
||||
border-radius: 50%;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
margin-left: 4px;
|
||||
opacity: 0;
|
||||
transition: opacity 0.1s, background 0.1s;
|
||||
flex-shrink: 0;
|
||||
}
|
||||
.tab:hover .tab-close { opacity: 1; }
|
||||
.tab-close:hover { background: var(--bg-btn-hover); }
|
||||
.tab-close svg { width: 16px; height: 16px; fill: var(--color-text); }
|
||||
.tab-close:hover svg { fill: var(--color-text-active); }
|
||||
|
||||
/* Chrome-style address bar */
|
||||
#address-bar {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
padding: 6px 16px 8px 8px;
|
||||
gap: 4px;
|
||||
background: var(--bg-toolbar);
|
||||
-webkit-app-region: drag;
|
||||
}
|
||||
.nav-btn {
|
||||
width: 32px;
|
||||
height: 32px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
border-radius: 50%;
|
||||
cursor: pointer;
|
||||
background: transparent;
|
||||
border: none;
|
||||
flex-shrink: 0;
|
||||
-webkit-app-region: no-drag;
|
||||
}
|
||||
.nav-btn:hover { background: var(--bg-btn-hover); }
|
||||
.nav-btn:disabled { opacity: 0.3; cursor: default; }
|
||||
.nav-btn:disabled:hover { background: transparent; }
|
||||
.nav-btn svg { width: 20px; height: 20px; fill: var(--color-text); }
|
||||
|
||||
#url-container {
|
||||
flex: 1;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
background: var(--bg-url);
|
||||
border-radius: 24px;
|
||||
padding: 0 16px;
|
||||
height: 36px;
|
||||
-webkit-app-region: no-drag;
|
||||
transition: background 0.2s, box-shadow 0.2s;
|
||||
}
|
||||
#url-container:focus-within {
|
||||
background: var(--bg-url-focus);
|
||||
box-shadow: var(--shadow-url-focus);
|
||||
}
|
||||
#url-input {
|
||||
flex: 1;
|
||||
background: transparent;
|
||||
border: none;
|
||||
outline: none;
|
||||
color: var(--color-text-active);
|
||||
font-size: 14px;
|
||||
font-family: inherit;
|
||||
}
|
||||
#url-input::placeholder { color: var(--color-text); }
|
||||
#url-input::-webkit-input-placeholder { color: var(--color-text); }
|
||||
|
||||
/* Window controls for Windows/Linux - use inline-flex inside tab-row instead of fixed position */
|
||||
#window-controls {
|
||||
display: none;
|
||||
height: 42px;
|
||||
margin-left: auto;
|
||||
margin-right: calc(-8px - var(--window-controls-width, 0px));
|
||||
margin-top: -8px;
|
||||
-webkit-app-region: no-drag;
|
||||
}
|
||||
body.platform-win #window-controls,
|
||||
body.platform-linux #window-controls { display: flex; }
|
||||
.window-control-btn {
|
||||
width: 46px;
|
||||
height: 42px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
background: transparent;
|
||||
border: none;
|
||||
cursor: pointer;
|
||||
transition: background 0.1s;
|
||||
-webkit-app-region: no-drag;
|
||||
}
|
||||
.window-control-btn:hover { background: var(--bg-btn-hover); }
|
||||
.window-control-btn.close:hover { background: var(--window-close-hover); }
|
||||
.window-control-btn svg { width: 10px; height: 10px; color: var(--color-text); fill: var(--color-text); stroke: var(--color-text); }
|
||||
.window-control-btn:hover svg { color: var(--color-text-active); fill: var(--color-text-active); stroke: var(--color-text-active); }
|
||||
.window-control-btn.close:hover svg { color: #fff; fill: #fff; stroke: #fff; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div id="tab-row">
|
||||
<div id="tabs-container">
|
||||
<div id="new-tab-btn" title="New tab">
|
||||
<svg viewBox="0 0 24 24"><path d="M19 13h-6v6h-2v-6H5v-2h6V5h2v6h6v2z"/></svg>
|
||||
</div>
|
||||
</div>
|
||||
<!-- Window controls for Windows/Linux - inside tab-row to avoid drag region issues -->
|
||||
<div id="window-controls">
|
||||
<button class="window-control-btn" id="minimize-btn" title="Minimize">
|
||||
<svg viewBox="0 0 10 1"><rect width="10" height="1"/></svg>
|
||||
</button>
|
||||
<button class="window-control-btn" id="maximize-btn" title="Maximize">
|
||||
<svg viewBox="0 0 10 10"><rect x="0.5" y="0.5" width="9" height="9" fill="none" stroke="currentColor" stroke-width="1.2"/></svg>
|
||||
</button>
|
||||
<button class="window-control-btn close" id="close-btn" title="Close">
|
||||
<svg viewBox="0 0 10 10"><path d="M0 0L10 10M10 0L0 10" stroke="currentColor" stroke-width="1.2"/></svg>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<div id="address-bar">
|
||||
<button class="nav-btn" id="back-btn" title="Back" disabled>
|
||||
<svg viewBox="0 0 24 24"><path d="M20 11H7.83l5.59-5.59L12 4l-8 8 8 8 1.41-1.41L7.83 13H20v-2z"/></svg>
|
||||
</button>
|
||||
<button class="nav-btn" id="forward-btn" title="Forward" disabled>
|
||||
<svg viewBox="0 0 24 24"><path d="M12 4l-1.41 1.41L16.17 11H4v2h12.17l-5.58 5.59L12 20l8-8z"/></svg>
|
||||
</button>
|
||||
<button class="nav-btn" id="refresh-btn" title="Refresh">
|
||||
<svg viewBox="0 0 24 24"><path d="M17.65 6.35A7.958 7.958 0 0012 4c-4.42 0-7.99 3.58-7.99 8s3.57 8 7.99 8c3.73 0 6.84-2.55 7.73-6h-2.08A5.99 5.99 0 0112 18c-3.31 0-6-2.69-6-6s2.69-6 6-6c1.66 0 3.14.69 4.22 1.78L13 11h7V4l-2.35 2.35z"/></svg>
|
||||
</button>
|
||||
<div id="url-container">
|
||||
<input type="text" id="url-input" placeholder="Search or enter URL" spellcheck="false" />
|
||||
</div>
|
||||
</div>
|
||||
<script>
|
||||
const tabsContainer = document.getElementById('tabs-container');
|
||||
const urlInput = document.getElementById('url-input');
|
||||
const backBtn = document.getElementById('back-btn');
|
||||
const forwardBtn = document.getElementById('forward-btn');
|
||||
const refreshBtn = document.getElementById('refresh-btn');
|
||||
|
||||
window.currentUrl = '';
|
||||
window.canGoBack = false;
|
||||
window.canGoForward = false;
|
||||
|
||||
// Helper function to update before-active class for separator hiding
|
||||
function updateBeforeActiveClass() {
|
||||
var tabs = tabsContainer.querySelectorAll('.tab');
|
||||
tabs.forEach(function(tab, index) {
|
||||
tab.classList.remove('before-active');
|
||||
if (index < tabs.length - 1 && tabs[index + 1].classList.contains('active')) {
|
||||
tab.classList.add('before-active');
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Helper function to update narrow class based on tab width
|
||||
function updateNarrowClass() {
|
||||
var tabs = tabsContainer.querySelectorAll('.tab');
|
||||
tabs.forEach(function(tab) {
|
||||
if (tab.offsetWidth < 72) {
|
||||
tab.classList.add('narrow');
|
||||
} else {
|
||||
tab.classList.remove('narrow');
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
var newTabBtnHtml = '<div id="new-tab-btn" title="New tab"><svg viewBox="0 0 24 24"><path d="M19 13h-6v6h-2v-6H5v-2h6V5h2v6h6v2z"/></svg></div>';
|
||||
|
||||
// Track if we're in "closing mode" where tab widths should be fixed
|
||||
var closingModeTimeout = null;
|
||||
var isInClosingMode = false;
|
||||
|
||||
function enterClosingMode() {
|
||||
isInClosingMode = true;
|
||||
// Clear any existing timeout
|
||||
if (closingModeTimeout) {
|
||||
clearTimeout(closingModeTimeout);
|
||||
}
|
||||
// Set timeout to exit closing mode after 1 second of no activity
|
||||
closingModeTimeout = setTimeout(function() {
|
||||
exitClosingMode();
|
||||
}, 1000);
|
||||
}
|
||||
|
||||
function exitClosingMode() {
|
||||
isInClosingMode = false;
|
||||
if (closingModeTimeout) {
|
||||
clearTimeout(closingModeTimeout);
|
||||
closingModeTimeout = null;
|
||||
}
|
||||
// Remove fixed widths from tabs
|
||||
var tabs = tabsContainer.querySelectorAll('.tab');
|
||||
tabs.forEach(function(tab) {
|
||||
tab.style.flex = '';
|
||||
tab.style.width = '';
|
||||
});
|
||||
}
|
||||
|
||||
// Exit closing mode when mouse leaves the tab row
|
||||
document.getElementById('tab-row').addEventListener('mouseleave', function() {
|
||||
if (isInClosingMode) {
|
||||
exitClosingMode();
|
||||
}
|
||||
});
|
||||
|
||||
window.updateTabs = function(tabs, activeUrl, canGoBack, canGoForward) {
|
||||
// Capture current tab widths before update if in closing mode
|
||||
var previousWidths = {};
|
||||
if (isInClosingMode) {
|
||||
var existingTabs = tabsContainer.querySelectorAll('.tab');
|
||||
existingTabs.forEach(function(tab) {
|
||||
previousWidths[tab.dataset.id] = tab.offsetWidth;
|
||||
});
|
||||
}
|
||||
|
||||
if (!tabs || tabs.length === 0) {
|
||||
// Window will be closed by main process when last tab is closed
|
||||
// Just clear the UI in case this is called before window closes
|
||||
tabsContainer.innerHTML = newTabBtnHtml;
|
||||
urlInput.value = '';
|
||||
document.getElementById('new-tab-btn').addEventListener('click', function() {
|
||||
sendAction({ type: 'new' });
|
||||
});
|
||||
return;
|
||||
}
|
||||
tabsContainer.innerHTML = tabs.map(function(tab) {
|
||||
var cls = 'tab' + (tab.isActive ? ' active' : '');
|
||||
var title = (tab.title || 'New Tab').replace(/&/g,'&').replace(/</g,'<').replace(/>/g,'>').replace(/"/g,'"');
|
||||
var url = (tab.url || '').replace(/&/g,'&').replace(/</g,'<').replace(/>/g,'>').replace(/"/g,'"');
|
||||
return '<div class="' + cls + '" data-id="' + tab.id + '" title="' + url + '">' +
|
||||
'<div class="tab-favicon"><svg viewBox="0 0 24 24"><path d="M12 2C6.48 2 2 6.48 2 12s4.48 10 10 10 10-4.48 10-10S17.52 2 12 2zm-1 17.93c-3.95-.49-7-3.85-7-7.93 0-.62.08-1.21.21-1.79L9 15v1c0 1.1.9 2 2 2v1.93zm6.9-2.54c-.26-.81-1-1.39-1.9-1.39h-1v-3c0-.55-.45-1-1-1H8v-2h2c.55 0 1-.45 1-1V7h2c1.1 0 2-.9 2-2v-.41c2.93 1.19 5 4.06 5 7.41 0 2.08-.8 3.97-2.1 5.39z"/></svg></div>' +
|
||||
'<span class="tab-title">' + title + '</span>' +
|
||||
'<div class="tab-close" data-id="' + tab.id + '">' +
|
||||
'<svg viewBox="0 0 24 24"><path d="M19 6.41L17.59 5 12 10.59 6.41 5 5 6.41 10.59 12 5 17.59 6.41 19 12 13.41 17.59 19 19 17.59 13.41 12z"/></svg>' +
|
||||
'</div>' +
|
||||
'</div>';
|
||||
}).join('') + newTabBtnHtml;
|
||||
|
||||
// Re-attach event listener for new tab button
|
||||
document.getElementById('new-tab-btn').addEventListener('click', function() {
|
||||
sendAction({ type: 'new' });
|
||||
});
|
||||
|
||||
// If in closing mode, fix the widths of remaining tabs
|
||||
if (isInClosingMode) {
|
||||
var newTabs = tabsContainer.querySelectorAll('.tab');
|
||||
newTabs.forEach(function(tab) {
|
||||
var prevWidth = previousWidths[tab.dataset.id];
|
||||
if (prevWidth) {
|
||||
tab.style.flex = '0 0 ' + prevWidth + 'px';
|
||||
tab.style.width = prevWidth + 'px';
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Update before-active class for proper separator hiding
|
||||
updateBeforeActiveClass();
|
||||
// Update narrow class based on tab width
|
||||
updateNarrowClass();
|
||||
|
||||
if (activeUrl !== undefined) {
|
||||
window.currentUrl = activeUrl || '';
|
||||
if (document.activeElement !== urlInput) {
|
||||
urlInput.value = window.currentUrl;
|
||||
}
|
||||
}
|
||||
|
||||
if (canGoBack !== undefined) {
|
||||
window.canGoBack = canGoBack;
|
||||
backBtn.disabled = !canGoBack;
|
||||
}
|
||||
if (canGoForward !== undefined) {
|
||||
window.canGoForward = canGoForward;
|
||||
forwardBtn.disabled = !canGoForward;
|
||||
}
|
||||
};
|
||||
|
||||
function sendAction(action) {
|
||||
window.postMessage({ channel: 'tabbar-action', payload: action }, '*');
|
||||
}
|
||||
|
||||
tabsContainer.addEventListener('click', function(e) {
|
||||
var closeBtn = e.target.closest('.tab-close');
|
||||
if (closeBtn) {
|
||||
e.stopPropagation();
|
||||
enterClosingMode();
|
||||
sendAction({ type: 'close', tabId: closeBtn.dataset.id });
|
||||
return;
|
||||
}
|
||||
var tab = e.target.closest('.tab');
|
||||
if (tab) {
|
||||
sendAction({ type: 'switch', tabId: tab.dataset.id });
|
||||
}
|
||||
});
|
||||
|
||||
tabsContainer.addEventListener('auxclick', function(e) {
|
||||
if (e.button === 1) {
|
||||
var tab = e.target.closest('.tab');
|
||||
if (tab) {
|
||||
enterClosingMode();
|
||||
sendAction({ type: 'close', tabId: tab.dataset.id });
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Handle hover state for separator hiding (left side of hovered tab)
|
||||
tabsContainer.addEventListener('mouseover', function(e) {
|
||||
var tab = e.target.closest('.tab');
|
||||
// Clear all before-hover classes first
|
||||
tabsContainer.querySelectorAll('.before-hover').forEach(function(t) {
|
||||
t.classList.remove('before-hover');
|
||||
});
|
||||
if (tab) {
|
||||
var prev = tab.previousElementSibling;
|
||||
if (prev && prev.classList.contains('tab')) {
|
||||
prev.classList.add('before-hover');
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
tabsContainer.addEventListener('mouseleave', function() {
|
||||
tabsContainer.querySelectorAll('.before-hover').forEach(function(t) {
|
||||
t.classList.remove('before-hover');
|
||||
});
|
||||
});
|
||||
|
||||
urlInput.addEventListener('keydown', function(e) {
|
||||
if (e.key === 'Enter') {
|
||||
var url = urlInput.value.trim();
|
||||
if (url) {
|
||||
sendAction({ type: 'navigate', url: url });
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
urlInput.addEventListener('focus', function() {
|
||||
urlInput.select();
|
||||
});
|
||||
|
||||
backBtn.addEventListener('click', function() {
|
||||
if (window.canGoBack) {
|
||||
sendAction({ type: 'back' });
|
||||
}
|
||||
});
|
||||
|
||||
forwardBtn.addEventListener('click', function() {
|
||||
if (window.canGoForward) {
|
||||
sendAction({ type: 'forward' });
|
||||
}
|
||||
});
|
||||
|
||||
refreshBtn.addEventListener('click', function() {
|
||||
sendAction({ type: 'refresh' });
|
||||
});
|
||||
|
||||
// Window controls for Windows/Linux
|
||||
document.getElementById('minimize-btn').addEventListener('click', function() {
|
||||
sendAction({ type: 'window-minimize' });
|
||||
});
|
||||
document.getElementById('maximize-btn').addEventListener('click', function() {
|
||||
sendAction({ type: 'window-maximize' });
|
||||
});
|
||||
document.getElementById('close-btn').addEventListener('click', function() {
|
||||
sendAction({ type: 'window-close' });
|
||||
});
|
||||
|
||||
// Platform initialization - called from main process
|
||||
window.initPlatform = function(platform) {
|
||||
document.body.classList.add('platform-' + platform);
|
||||
};
|
||||
|
||||
// Theme initialization - called from main process
|
||||
window.setTheme = function(isDark) {
|
||||
if (isDark) {
|
||||
document.body.classList.add('theme-dark');
|
||||
} else {
|
||||
document.body.classList.remove('theme-dark');
|
||||
}
|
||||
};
|
||||
|
||||
// Update narrow class on window resize
|
||||
window.addEventListener('resize', function() {
|
||||
updateNarrowClass();
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
</html>`
|
||||
@ -1,36 +1,39 @@
|
||||
import * as z from 'zod'
|
||||
|
||||
import type { CdpBrowserController } from '../controller'
|
||||
import { logger } from '../types'
|
||||
import { errorResponse, successResponse } from './utils'
|
||||
|
||||
export const ExecuteSchema = z.object({
|
||||
code: z
|
||||
.string()
|
||||
.describe(
|
||||
'JavaScript evaluated via Chrome DevTools Runtime.evaluate. Keep it short; prefer one-line with semicolons for multiple statements.'
|
||||
),
|
||||
timeout: z.number().default(5000).describe('Timeout in milliseconds for code execution (default: 5000ms)'),
|
||||
sessionId: z.string().optional().describe('Session identifier to target a specific page (default: default)')
|
||||
code: z.string().describe('JavaScript code to run in page context'),
|
||||
timeout: z.number().default(5000).describe('Execution timeout in ms (default: 5000)'),
|
||||
privateMode: z.boolean().optional().describe('Target private session (default: false)'),
|
||||
tabId: z.string().optional().describe('Target specific tab by ID')
|
||||
})
|
||||
|
||||
export const executeToolDefinition = {
|
||||
name: 'execute',
|
||||
description:
|
||||
'Run JavaScript in the current page via Runtime.evaluate. Prefer short, single-line snippets; use semicolons for multiple statements.',
|
||||
'Run JavaScript in the currently open page. Use after open to: click elements, fill forms, extract content (document.body.innerText), or interact with the page. The page must be opened first with open or fetch.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
code: {
|
||||
type: 'string',
|
||||
description: 'One-line JS to evaluate in page context'
|
||||
description:
|
||||
'JavaScript to evaluate. Examples: document.body.innerText (get text), document.querySelector("button").click() (click), document.title (get title)'
|
||||
},
|
||||
timeout: {
|
||||
type: 'number',
|
||||
description: 'Timeout in milliseconds (default 5000)'
|
||||
description: 'Execution timeout in ms (default: 5000)'
|
||||
},
|
||||
sessionId: {
|
||||
privateMode: {
|
||||
type: 'boolean',
|
||||
description: 'Target private session (default: false)'
|
||||
},
|
||||
tabId: {
|
||||
type: 'string',
|
||||
description: 'Session identifier; targets a specific page (default: default)'
|
||||
description: 'Target specific tab by ID (from open response)'
|
||||
}
|
||||
},
|
||||
required: ['code']
|
||||
@ -38,11 +41,12 @@ export const executeToolDefinition = {
|
||||
}
|
||||
|
||||
export async function handleExecute(controller: CdpBrowserController, args: unknown) {
|
||||
const { code, timeout, sessionId } = ExecuteSchema.parse(args)
|
||||
const { code, timeout, privateMode, tabId } = ExecuteSchema.parse(args)
|
||||
try {
|
||||
const value = await controller.execute(code, timeout, sessionId ?? 'default')
|
||||
const value = await controller.execute(code, timeout, privateMode ?? false, tabId)
|
||||
return successResponse(typeof value === 'string' ? value : JSON.stringify(value))
|
||||
} catch (error) {
|
||||
logger.error('Execute failed', { error, code: code.slice(0, 100), privateMode, tabId })
|
||||
return errorResponse(error as Error)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,49 +0,0 @@
|
||||
import * as z from 'zod'
|
||||
|
||||
import type { CdpBrowserController } from '../controller'
|
||||
import { errorResponse, successResponse } from './utils'
|
||||
|
||||
export const FetchSchema = z.object({
|
||||
url: z.url().describe('URL to fetch'),
|
||||
format: z.enum(['html', 'txt', 'markdown', 'json']).default('markdown').describe('Output format (default: markdown)'),
|
||||
timeout: z.number().optional().describe('Timeout in milliseconds for navigation (default: 10000)'),
|
||||
sessionId: z.string().optional().describe('Session identifier (default: default)')
|
||||
})
|
||||
|
||||
export const fetchToolDefinition = {
|
||||
name: 'fetch',
|
||||
description: 'Fetch a URL using the browser and return content in specified format (html, txt, markdown, json)',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
url: {
|
||||
type: 'string',
|
||||
description: 'URL to fetch'
|
||||
},
|
||||
format: {
|
||||
type: 'string',
|
||||
enum: ['html', 'txt', 'markdown', 'json'],
|
||||
description: 'Output format (default: markdown)'
|
||||
},
|
||||
timeout: {
|
||||
type: 'number',
|
||||
description: 'Navigation timeout in milliseconds (default: 10000)'
|
||||
},
|
||||
sessionId: {
|
||||
type: 'string',
|
||||
description: 'Session identifier (default: default)'
|
||||
}
|
||||
},
|
||||
required: ['url']
|
||||
}
|
||||
}
|
||||
|
||||
export async function handleFetch(controller: CdpBrowserController, args: unknown) {
|
||||
const { url, format, timeout, sessionId } = FetchSchema.parse(args)
|
||||
try {
|
||||
const content = await controller.fetch(url, format, timeout ?? 10000, sessionId ?? 'default')
|
||||
return successResponse(typeof content === 'string' ? content : JSON.stringify(content))
|
||||
} catch (error) {
|
||||
return errorResponse(error as Error)
|
||||
}
|
||||
}
|
||||
@ -1,15 +1,13 @@
|
||||
export { ExecuteSchema, executeToolDefinition, handleExecute } from './execute'
|
||||
export { FetchSchema, fetchToolDefinition, handleFetch } from './fetch'
|
||||
export { handleOpen, OpenSchema, openToolDefinition } from './open'
|
||||
export { handleReset, resetToolDefinition } from './reset'
|
||||
|
||||
import type { CdpBrowserController } from '../controller'
|
||||
import { executeToolDefinition, handleExecute } from './execute'
|
||||
import { fetchToolDefinition, handleFetch } from './fetch'
|
||||
import { handleOpen, openToolDefinition } from './open'
|
||||
import { handleReset, resetToolDefinition } from './reset'
|
||||
|
||||
export const toolDefinitions = [openToolDefinition, executeToolDefinition, resetToolDefinition, fetchToolDefinition]
|
||||
export const toolDefinitions = [openToolDefinition, executeToolDefinition, resetToolDefinition]
|
||||
|
||||
export const toolHandlers: Record<
|
||||
string,
|
||||
@ -20,6 +18,5 @@ export const toolHandlers: Record<
|
||||
> = {
|
||||
open: handleOpen,
|
||||
execute: handleExecute,
|
||||
reset: handleReset,
|
||||
fetch: handleFetch
|
||||
reset: handleReset
|
||||
}
|
||||
|
||||
@ -1,39 +1,52 @@
|
||||
import * as z from 'zod'
|
||||
|
||||
import type { CdpBrowserController } from '../controller'
|
||||
import { successResponse } from './utils'
|
||||
import { logger } from '../types'
|
||||
import { errorResponse, successResponse } from './utils'
|
||||
|
||||
export const OpenSchema = z.object({
|
||||
url: z.url().describe('URL to open in the controlled Electron window'),
|
||||
timeout: z.number().optional().describe('Timeout in milliseconds for navigation (default: 10000)'),
|
||||
show: z.boolean().optional().describe('Whether to show the browser window (default: false)'),
|
||||
sessionId: z
|
||||
.string()
|
||||
url: z.url().describe('URL to navigate to'),
|
||||
format: z
|
||||
.enum(['html', 'txt', 'markdown', 'json'])
|
||||
.optional()
|
||||
.describe('Session identifier; separate sessions keep separate pages (default: default)')
|
||||
.describe('If set, return page content in this format. If not set, just open the page and return tabId.'),
|
||||
timeout: z.number().optional().describe('Navigation timeout in ms (default: 10000)'),
|
||||
privateMode: z.boolean().optional().describe('Use incognito mode, no data persisted (default: false)'),
|
||||
newTab: z.boolean().optional().describe('Open in new tab, required for parallel requests (default: false)'),
|
||||
showWindow: z.boolean().optional().default(true).describe('Show browser window (default: true)')
|
||||
})
|
||||
|
||||
export const openToolDefinition = {
|
||||
name: 'open',
|
||||
description: 'Open a URL in a hidden Electron window controlled via Chrome DevTools Protocol',
|
||||
description:
|
||||
'Navigate to a URL in a browser window. If format is specified, returns { tabId, content } with page content in that format. Otherwise, returns { currentUrl, title, tabId } for subsequent operations with execute tool. Set newTab=true when opening multiple URLs in parallel.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
url: {
|
||||
type: 'string',
|
||||
description: 'URL to load'
|
||||
description: 'URL to navigate to'
|
||||
},
|
||||
format: {
|
||||
type: 'string',
|
||||
enum: ['html', 'txt', 'markdown', 'json'],
|
||||
description: 'If set, return page content in this format. If not set, just open the page and return tabId.'
|
||||
},
|
||||
timeout: {
|
||||
type: 'number',
|
||||
description: 'Navigation timeout in milliseconds (default 10000)'
|
||||
description: 'Navigation timeout in ms (default: 10000)'
|
||||
},
|
||||
show: {
|
||||
privateMode: {
|
||||
type: 'boolean',
|
||||
description: 'Whether to show the browser window (default false)'
|
||||
description: 'Use incognito mode, no data persisted (default: false)'
|
||||
},
|
||||
sessionId: {
|
||||
type: 'string',
|
||||
description: 'Session identifier; separate sessions keep separate pages (default: default)'
|
||||
newTab: {
|
||||
type: 'boolean',
|
||||
description: 'Open in new tab, required for parallel requests (default: false)'
|
||||
},
|
||||
showWindow: {
|
||||
type: 'boolean',
|
||||
description: 'Show browser window (default: true)'
|
||||
}
|
||||
},
|
||||
required: ['url']
|
||||
@ -41,7 +54,28 @@ export const openToolDefinition = {
|
||||
}
|
||||
|
||||
export async function handleOpen(controller: CdpBrowserController, args: unknown) {
|
||||
const { url, timeout, show, sessionId } = OpenSchema.parse(args)
|
||||
const res = await controller.open(url, timeout ?? 10000, show ?? false, sessionId ?? 'default')
|
||||
return successResponse(JSON.stringify(res))
|
||||
try {
|
||||
const { url, format, timeout, privateMode, newTab, showWindow } = OpenSchema.parse(args)
|
||||
|
||||
if (format) {
|
||||
const { tabId, content } = await controller.fetch(
|
||||
url,
|
||||
format,
|
||||
timeout ?? 10000,
|
||||
privateMode ?? false,
|
||||
newTab ?? false,
|
||||
showWindow
|
||||
)
|
||||
return successResponse(JSON.stringify({ tabId, content }))
|
||||
} else {
|
||||
const res = await controller.open(url, timeout ?? 10000, privateMode ?? false, newTab ?? false, showWindow)
|
||||
return successResponse(JSON.stringify(res))
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Open failed', {
|
||||
error,
|
||||
url: args && typeof args === 'object' && 'url' in args ? args.url : undefined
|
||||
})
|
||||
return errorResponse(error instanceof Error ? error : String(error))
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,34 +1,43 @@
|
||||
import * as z from 'zod'
|
||||
|
||||
import type { CdpBrowserController } from '../controller'
|
||||
import { successResponse } from './utils'
|
||||
import { logger } from '../types'
|
||||
import { errorResponse, successResponse } from './utils'
|
||||
|
||||
/** Zod schema for validating reset tool arguments */
|
||||
export const ResetSchema = z.object({
|
||||
sessionId: z.string().optional().describe('Session identifier to reset; omit to reset all sessions')
|
||||
privateMode: z.boolean().optional().describe('true=private window, false=normal window, omit=all windows'),
|
||||
tabId: z.string().optional().describe('Close specific tab only (requires privateMode)')
|
||||
})
|
||||
|
||||
/** MCP tool definition for the reset tool */
|
||||
export const resetToolDefinition = {
|
||||
name: 'reset',
|
||||
description: 'Reset the controlled window and detach debugger',
|
||||
description:
|
||||
'Close browser windows and clear state. Call when done browsing to free resources. Omit all parameters to close everything.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
sessionId: {
|
||||
privateMode: {
|
||||
type: 'boolean',
|
||||
description: 'true=reset private window only, false=reset normal window only, omit=reset all'
|
||||
},
|
||||
tabId: {
|
||||
type: 'string',
|
||||
description: 'Session identifier to reset; omit to reset all sessions'
|
||||
description: 'Close specific tab only (requires privateMode to be set)'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handler for the reset MCP tool.
|
||||
* Closes browser window(s) and detaches debugger for the specified session or all sessions.
|
||||
*/
|
||||
export async function handleReset(controller: CdpBrowserController, args: unknown) {
|
||||
const { sessionId } = ResetSchema.parse(args)
|
||||
await controller.reset(sessionId)
|
||||
return successResponse('reset')
|
||||
try {
|
||||
const { privateMode, tabId } = ResetSchema.parse(args)
|
||||
await controller.reset(privateMode, tabId)
|
||||
return successResponse('reset')
|
||||
} catch (error) {
|
||||
logger.error('Reset failed', {
|
||||
error,
|
||||
privateMode: args && typeof args === 'object' && 'privateMode' in args ? args.privateMode : undefined
|
||||
})
|
||||
return errorResponse(error instanceof Error ? error : String(error))
|
||||
}
|
||||
}
|
||||
|
||||
@ -5,9 +5,10 @@ export function successResponse(text: string) {
|
||||
}
|
||||
}
|
||||
|
||||
export function errorResponse(error: Error) {
|
||||
export function errorResponse(error: Error | string) {
|
||||
const message = error instanceof Error ? error.message : error
|
||||
return {
|
||||
content: [{ type: 'text', text: error.message }],
|
||||
content: [{ type: 'text', text: message }],
|
||||
isError: true
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,4 +1,24 @@
|
||||
import { loggerService } from '@logger'
|
||||
import type { BrowserView, BrowserWindow } from 'electron'
|
||||
|
||||
export const logger = loggerService.withContext('MCPBrowserCDP')
|
||||
export const userAgent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:145.0) Gecko/20100101 Firefox/145.0'
|
||||
export const userAgent =
|
||||
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
|
||||
|
||||
export interface TabInfo {
|
||||
id: string
|
||||
view: BrowserView
|
||||
url: string
|
||||
title: string
|
||||
lastActive: number
|
||||
}
|
||||
|
||||
export interface WindowInfo {
|
||||
windowKey: string
|
||||
privateMode: boolean
|
||||
window: BrowserWindow
|
||||
tabs: Map<string, TabInfo>
|
||||
activeTabId: string | null
|
||||
lastActive: number
|
||||
tabBarView?: BrowserView
|
||||
}
|
||||
|
||||
@ -9,6 +9,7 @@ import DiDiMcpServer from './didi-mcp'
|
||||
import DifyKnowledgeServer from './dify-knowledge'
|
||||
import FetchServer from './fetch'
|
||||
import FileSystemServer from './filesystem'
|
||||
import HubServer from './hub'
|
||||
import MemoryServer from './memory'
|
||||
import PythonServer from './python'
|
||||
import ThinkingServer from './sequentialthinking'
|
||||
@ -52,6 +53,9 @@ export function createInMemoryMCPServer(
|
||||
case BuiltinMCPServerNames.browser: {
|
||||
return new BrowserServer().server
|
||||
}
|
||||
case BuiltinMCPServerNames.hub: {
|
||||
return new HubServer().server
|
||||
}
|
||||
default:
|
||||
throw new Error(`Unknown in-memory MCP server: ${name}`)
|
||||
}
|
||||
|
||||
213
src/main/mcpServers/hub/README.md
Normal file
213
src/main/mcpServers/hub/README.md
Normal file
@ -0,0 +1,213 @@
|
||||
# Hub MCP Server
|
||||
|
||||
A built-in MCP server that aggregates all active MCP servers in Cherry Studio and exposes them through `search` and `exec` tools.
|
||||
|
||||
## Overview
|
||||
|
||||
The Hub server enables LLMs to discover and call tools from all active MCP servers without needing to know the specific server names or tool signatures upfront.
|
||||
|
||||
## Auto Mode Integration
|
||||
|
||||
The Hub server is the core component of Cherry Studio's **Auto MCP Mode**. When an assistant is set to Auto mode:
|
||||
|
||||
1. **Automatic Injection**: The Hub server is automatically injected as the only MCP server for the assistant
|
||||
2. **System Prompt**: A specialized system prompt (`HUB_MODE_SYSTEM_PROMPT`) is appended to guide the LLM on how to use the `search` and `exec` tools
|
||||
3. **Dynamic Discovery**: The LLM can discover and use any tools from all active MCP servers without manual configuration
|
||||
|
||||
### MCP Modes
|
||||
|
||||
Cherry Studio supports three MCP modes per assistant:
|
||||
|
||||
| Mode | Description | Tools Available |
|
||||
|------|-------------|-----------------|
|
||||
| **Disabled** | No MCP tools | None |
|
||||
| **Auto** | Hub server only | `search`, `exec` |
|
||||
| **Manual** | User selects servers | Selected server tools |
|
||||
|
||||
### How Auto Mode Works
|
||||
|
||||
```
|
||||
User Message
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────┐
|
||||
│ Assistant (mcpMode: 'auto') │
|
||||
│ │
|
||||
│ System Prompt + HUB_MODE_SYSTEM_PROMPT │
|
||||
│ Tools: [hub.search, hub.exec] │
|
||||
└─────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────┐
|
||||
│ LLM decides to use MCP tools │
|
||||
│ │
|
||||
│ 1. search({ query: "github,repo" }) │
|
||||
│ 2. exec({ code: "await searchRepos()" })│
|
||||
└─────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────┐
|
||||
│ Hub Server │
|
||||
│ │
|
||||
│ Aggregates all active MCP servers │
|
||||
│ Routes tool calls to appropriate server │
|
||||
└─────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Relevant Code
|
||||
|
||||
- **Type Definition**: `src/renderer/src/types/index.ts` - `McpMode` type and `getEffectiveMcpMode()`
|
||||
- **Hub Server Constant**: `src/renderer/src/store/mcp.ts` - `hubMCPServer`
|
||||
- **Server Selection**: `src/renderer/src/services/ApiService.ts` - `getMcpServersForAssistant()`
|
||||
- **System Prompt**: `src/renderer/src/config/prompts.ts` - `HUB_MODE_SYSTEM_PROMPT`
|
||||
- **Prompt Injection**: `src/renderer/src/aiCore/prepareParams/parameterBuilder.ts`
|
||||
|
||||
## Tools
|
||||
|
||||
### `search`
|
||||
|
||||
Search for available MCP tools by keywords.
|
||||
|
||||
**Parameters:**
|
||||
| Name | Type | Required | Description |
|
||||
|------|------|----------|-------------|
|
||||
| `query` | string | Yes | Search keywords, comma-separated for OR matching |
|
||||
| `limit` | number | No | Maximum results to return (default: 10, max: 50) |
|
||||
|
||||
**Example:**
|
||||
```json
|
||||
{
|
||||
"query": "browser,chrome",
|
||||
"limit": 5
|
||||
}
|
||||
```
|
||||
|
||||
**Returns:** JavaScript function declarations with JSDoc comments that can be used in the `exec` tool.
|
||||
|
||||
```javascript
|
||||
// Found 2 tool(s):
|
||||
|
||||
/**
|
||||
* Launch a browser instance
|
||||
*
|
||||
* @param {{ browser?: "chromium" | "firefox" | "webkit", headless?: boolean }} params
|
||||
* @returns {Promise<unknown>}
|
||||
*/
|
||||
async function launchBrowser(params) {
|
||||
return await __callTool("browser__launch_browser", params);
|
||||
}
|
||||
```
|
||||
|
||||
### `exec`
|
||||
|
||||
Execute JavaScript code that calls MCP tools.
|
||||
|
||||
**Parameters:**
|
||||
| Name | Type | Required | Description |
|
||||
|------|------|----------|-------------|
|
||||
| `code` | string | Yes | JavaScript code to execute |
|
||||
|
||||
**Built-in Helpers:**
|
||||
- `parallel(...promises)` - Run multiple tool calls concurrently (Promise.all)
|
||||
- `settle(...promises)` - Run multiple tool calls and get all results (Promise.allSettled)
|
||||
- `console.log/warn/error/info/debug` - Captured in output logs
|
||||
|
||||
**Example:**
|
||||
```javascript
|
||||
// Call a single tool
|
||||
const result = await searchRepos({ query: "react" });
|
||||
return result;
|
||||
|
||||
// Call multiple tools in parallel
|
||||
const [users, repos] = await parallel(
|
||||
getUsers({ limit: 10 }),
|
||||
searchRepos({ query: "typescript" })
|
||||
);
|
||||
return { users, repos };
|
||||
```
|
||||
|
||||
**Returns:**
|
||||
```json
|
||||
{
|
||||
"result": { "users": [...], "repos": [...] },
|
||||
"logs": ["[log] Processing..."],
|
||||
"error": null
|
||||
}
|
||||
```
|
||||
|
||||
## Usage Flow
|
||||
|
||||
1. **Search** for tools using keywords:
|
||||
```
|
||||
search({ query: "github,repository" })
|
||||
```
|
||||
|
||||
2. **Review** the returned function signatures and JSDoc
|
||||
|
||||
3. **Execute** code using the discovered tools:
|
||||
```
|
||||
exec({ code: 'return await searchRepos({ query: "react" })' })
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
The Hub server is a built-in server identified as `@cherry/hub`.
|
||||
|
||||
### Using Auto Mode (Recommended)
|
||||
|
||||
The easiest way to use the Hub server is through Auto mode:
|
||||
|
||||
1. Click the **MCP Tools** button (hammer icon) in the input bar
|
||||
2. Select **Auto** mode
|
||||
3. The Hub server is automatically enabled for the assistant
|
||||
|
||||
### Manual Configuration
|
||||
|
||||
Alternatively, you can enable the Hub server manually:
|
||||
|
||||
1. Go to **Settings** → **MCP Servers**
|
||||
2. Find **Hub** in the built-in servers list
|
||||
3. Toggle it on
|
||||
4. In the assistant's MCP settings, select the Hub server
|
||||
|
||||
## Caching
|
||||
|
||||
- Tool definitions are cached for **10 minutes**
|
||||
- Cache is automatically refreshed when expired
|
||||
- Cache is invalidated when MCP servers connect/disconnect
|
||||
|
||||
## Limitations
|
||||
|
||||
- Code execution has a **60-second timeout**
|
||||
- Console logs are limited to **1000 entries**
|
||||
- Search results are limited to **50 tools** maximum
|
||||
- The Hub server excludes itself from the aggregated server list
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
LLM
|
||||
│
|
||||
▼
|
||||
HubServer
|
||||
├── search → ToolRegistry → SearchIndex
|
||||
└── exec → Runtime → callMcpTool()
|
||||
│
|
||||
▼
|
||||
MCPService.callTool()
|
||||
│
|
||||
▼
|
||||
External MCP Servers
|
||||
```
|
||||
|
||||
## Files
|
||||
|
||||
| File | Description |
|
||||
|------|-------------|
|
||||
| `index.ts` | Main HubServer class |
|
||||
| `types.ts` | TypeScript type definitions |
|
||||
| `generator.ts` | Converts MCP tools to JS functions with JSDoc |
|
||||
| `tool-registry.ts` | In-memory tool cache with TTL |
|
||||
| `search.ts` | Keyword-based tool search |
|
||||
| `runtime.ts` | JavaScript code execution engine |
|
||||
| `mcp-bridge.ts` | Bridge to Cherry Studio's MCPService |
|
||||
119
src/main/mcpServers/hub/__tests__/generator.test.ts
Normal file
119
src/main/mcpServers/hub/__tests__/generator.test.ts
Normal file
@ -0,0 +1,119 @@
|
||||
import { describe, expect, it } from 'vitest'
|
||||
|
||||
import { generateToolFunction, generateToolsCode } from '../generator'
|
||||
import type { GeneratedTool } from '../types'
|
||||
|
||||
describe('generator', () => {
|
||||
describe('generateToolFunction', () => {
|
||||
it('generates a simple tool function', () => {
|
||||
const tool = {
|
||||
id: 'test-id',
|
||||
name: 'search_repos',
|
||||
description: 'Search for GitHub repositories',
|
||||
serverId: 'github',
|
||||
serverName: 'github-server',
|
||||
inputSchema: {
|
||||
type: 'object' as const,
|
||||
properties: {
|
||||
query: { type: 'string', description: 'Search query' },
|
||||
limit: { type: 'number', description: 'Max results' }
|
||||
},
|
||||
required: ['query']
|
||||
},
|
||||
type: 'mcp' as const
|
||||
}
|
||||
|
||||
const existingNames = new Set<string>()
|
||||
const callTool = async () => ({ success: true })
|
||||
|
||||
const result = generateToolFunction(tool, existingNames, callTool)
|
||||
|
||||
expect(result.functionName).toBe('githubServer_searchRepos')
|
||||
expect(result.jsCode).toContain('async function githubServer_searchRepos')
|
||||
expect(result.jsCode).toContain('Search for GitHub repositories')
|
||||
expect(result.jsCode).toContain('__callTool')
|
||||
})
|
||||
|
||||
it('handles unique function names', () => {
|
||||
const tool = {
|
||||
id: 'test-id',
|
||||
name: 'search',
|
||||
serverId: 'server1',
|
||||
serverName: 'server1',
|
||||
inputSchema: { type: 'object' as const, properties: {} },
|
||||
type: 'mcp' as const
|
||||
}
|
||||
|
||||
const existingNames = new Set<string>(['server1_search'])
|
||||
const callTool = async () => ({})
|
||||
|
||||
const result = generateToolFunction(tool, existingNames, callTool)
|
||||
|
||||
expect(result.functionName).toBe('server1_search1')
|
||||
})
|
||||
|
||||
it('handles enum types in schema', () => {
|
||||
const tool = {
|
||||
id: 'test-id',
|
||||
name: 'launch_browser',
|
||||
serverId: 'browser',
|
||||
serverName: 'browser',
|
||||
inputSchema: {
|
||||
type: 'object' as const,
|
||||
properties: {
|
||||
browser: {
|
||||
type: 'string',
|
||||
enum: ['chromium', 'firefox', 'webkit']
|
||||
}
|
||||
}
|
||||
},
|
||||
type: 'mcp' as const
|
||||
}
|
||||
|
||||
const existingNames = new Set<string>()
|
||||
const callTool = async () => ({})
|
||||
|
||||
const result = generateToolFunction(tool, existingNames, callTool)
|
||||
|
||||
expect(result.jsCode).toContain('"chromium" | "firefox" | "webkit"')
|
||||
})
|
||||
})
|
||||
|
||||
describe('generateToolsCode', () => {
|
||||
it('generates code for multiple tools', () => {
|
||||
const tools: GeneratedTool[] = [
|
||||
{
|
||||
serverId: 's1',
|
||||
serverName: 'server1',
|
||||
toolName: 'tool1',
|
||||
functionName: 'server1_tool1',
|
||||
jsCode: 'async function server1_tool1() {}',
|
||||
fn: async () => ({}),
|
||||
signature: '{}',
|
||||
returns: 'unknown'
|
||||
},
|
||||
{
|
||||
serverId: 's2',
|
||||
serverName: 'server2',
|
||||
toolName: 'tool2',
|
||||
functionName: 'server2_tool2',
|
||||
jsCode: 'async function server2_tool2() {}',
|
||||
fn: async () => ({}),
|
||||
signature: '{}',
|
||||
returns: 'unknown'
|
||||
}
|
||||
]
|
||||
|
||||
const result = generateToolsCode(tools)
|
||||
|
||||
expect(result).toContain('2 tool(s)')
|
||||
expect(result).toContain('async function server1_tool1')
|
||||
expect(result).toContain('async function server2_tool2')
|
||||
})
|
||||
|
||||
it('returns message for empty tools', () => {
|
||||
const result = generateToolsCode([])
|
||||
expect(result).toBe('// No tools available')
|
||||
})
|
||||
})
|
||||
})
|
||||
229
src/main/mcpServers/hub/__tests__/hub.test.ts
Normal file
229
src/main/mcpServers/hub/__tests__/hub.test.ts
Normal file
@ -0,0 +1,229 @@
|
||||
import type { MCPTool } from '@types'
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
|
||||
import { HubServer } from '../index'
|
||||
|
||||
const mockTools: MCPTool[] = [
|
||||
{
|
||||
id: 'github__search_repos',
|
||||
name: 'search_repos',
|
||||
description: 'Search for GitHub repositories',
|
||||
serverId: 'github',
|
||||
serverName: 'GitHub',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
query: { type: 'string', description: 'Search query' },
|
||||
limit: { type: 'number', description: 'Max results' }
|
||||
},
|
||||
required: ['query']
|
||||
},
|
||||
type: 'mcp'
|
||||
},
|
||||
{
|
||||
id: 'github__get_user',
|
||||
name: 'get_user',
|
||||
description: 'Get GitHub user profile',
|
||||
serverId: 'github',
|
||||
serverName: 'GitHub',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
username: { type: 'string', description: 'GitHub username' }
|
||||
},
|
||||
required: ['username']
|
||||
},
|
||||
type: 'mcp'
|
||||
},
|
||||
{
|
||||
id: 'database__query',
|
||||
name: 'query',
|
||||
description: 'Execute a database query',
|
||||
serverId: 'database',
|
||||
serverName: 'Database',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
sql: { type: 'string', description: 'SQL query to execute' }
|
||||
},
|
||||
required: ['sql']
|
||||
},
|
||||
type: 'mcp'
|
||||
}
|
||||
]
|
||||
|
||||
vi.mock('@main/services/MCPService', () => ({
|
||||
default: {
|
||||
listAllActiveServerTools: vi.fn(async () => mockTools),
|
||||
callToolById: vi.fn(async (toolId: string, args: unknown) => {
|
||||
if (toolId === 'github__search_repos') {
|
||||
return {
|
||||
content: [{ type: 'text', text: JSON.stringify({ repos: ['repo1', 'repo2'], query: args }) }]
|
||||
}
|
||||
}
|
||||
if (toolId === 'github__get_user') {
|
||||
return {
|
||||
content: [{ type: 'text', text: JSON.stringify({ username: (args as any).username, id: 123 }) }]
|
||||
}
|
||||
}
|
||||
if (toolId === 'database__query') {
|
||||
return {
|
||||
content: [{ type: 'text', text: JSON.stringify({ rows: [{ id: 1 }, { id: 2 }] }) }]
|
||||
}
|
||||
}
|
||||
return { content: [{ type: 'text', text: '{}' }] }
|
||||
}),
|
||||
abortTool: vi.fn(async () => true)
|
||||
}
|
||||
}))
|
||||
|
||||
import mcpService from '@main/services/MCPService'
|
||||
|
||||
describe('HubServer Integration', () => {
|
||||
let hubServer: HubServer
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
hubServer = new HubServer()
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks()
|
||||
})
|
||||
|
||||
describe('full search → exec flow', () => {
|
||||
it('searches for tools and executes them', async () => {
|
||||
const searchResult = await (hubServer as any).handleSearch({ query: 'github,repos' })
|
||||
|
||||
expect(searchResult.content).toBeDefined()
|
||||
const searchText = JSON.parse(searchResult.content[0].text)
|
||||
expect(searchText.total).toBeGreaterThan(0)
|
||||
expect(searchText.tools).toContain('github_searchRepos')
|
||||
|
||||
const execResult = await (hubServer as any).handleExec({
|
||||
code: 'return await github_searchRepos({ query: "test" })'
|
||||
})
|
||||
|
||||
expect(execResult.content).toBeDefined()
|
||||
const execOutput = JSON.parse(execResult.content[0].text)
|
||||
expect(execOutput.result).toEqual({ repos: ['repo1', 'repo2'], query: { query: 'test' } })
|
||||
})
|
||||
|
||||
it('handles multiple tool calls in parallel', async () => {
|
||||
await (hubServer as any).handleSearch({ query: 'github' })
|
||||
|
||||
const execResult = await (hubServer as any).handleExec({
|
||||
code: `
|
||||
const results = await parallel(
|
||||
github_searchRepos({ query: "react" }),
|
||||
github_getUser({ username: "octocat" })
|
||||
);
|
||||
return results
|
||||
`
|
||||
})
|
||||
|
||||
const execOutput = JSON.parse(execResult.content[0].text)
|
||||
expect(execOutput.result).toHaveLength(2)
|
||||
expect(execOutput.result[0]).toEqual({ repos: ['repo1', 'repo2'], query: { query: 'react' } })
|
||||
expect(execOutput.result[1]).toEqual({ username: 'octocat', id: 123 })
|
||||
})
|
||||
|
||||
it('searches across multiple servers', async () => {
|
||||
const searchResult = await (hubServer as any).handleSearch({ query: 'query' })
|
||||
|
||||
const searchText = JSON.parse(searchResult.content[0].text)
|
||||
expect(searchText.tools).toContain('database_query')
|
||||
})
|
||||
})
|
||||
|
||||
describe('tools caching', () => {
|
||||
it('uses cached tools within TTL', async () => {
|
||||
await (hubServer as any).handleSearch({ query: 'github' })
|
||||
const firstCallCount = vi.mocked(mcpService.listAllActiveServerTools).mock.calls.length
|
||||
|
||||
await (hubServer as any).handleSearch({ query: 'github' })
|
||||
const secondCallCount = vi.mocked(mcpService.listAllActiveServerTools).mock.calls.length
|
||||
|
||||
expect(secondCallCount).toBe(firstCallCount) // Should use cache
|
||||
})
|
||||
|
||||
it('refreshes tools after cache invalidation', async () => {
|
||||
await (hubServer as any).handleSearch({ query: 'github' })
|
||||
const firstCallCount = vi.mocked(mcpService.listAllActiveServerTools).mock.calls.length
|
||||
|
||||
hubServer.invalidateCache()
|
||||
|
||||
await (hubServer as any).handleSearch({ query: 'github' })
|
||||
const secondCallCount = vi.mocked(mcpService.listAllActiveServerTools).mock.calls.length
|
||||
|
||||
expect(secondCallCount).toBe(firstCallCount + 1)
|
||||
})
|
||||
})
|
||||
|
||||
describe('error handling', () => {
|
||||
it('throws error for invalid search query', async () => {
|
||||
await expect((hubServer as any).handleSearch({})).rejects.toThrow('query parameter is required')
|
||||
})
|
||||
|
||||
it('throws error for invalid exec code', async () => {
|
||||
await expect((hubServer as any).handleExec({})).rejects.toThrow('code parameter is required')
|
||||
})
|
||||
|
||||
it('handles runtime errors in exec', async () => {
|
||||
const execResult = await (hubServer as any).handleExec({
|
||||
code: 'throw new Error("test error")'
|
||||
})
|
||||
|
||||
const execOutput = JSON.parse(execResult.content[0].text)
|
||||
expect(execOutput.error).toBe('test error')
|
||||
expect(execOutput.isError).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe('exec timeouts', () => {
|
||||
afterEach(() => {
|
||||
vi.useRealTimers()
|
||||
})
|
||||
|
||||
it('aborts in-flight tool calls and returns logs on timeout', async () => {
|
||||
vi.useFakeTimers()
|
||||
|
||||
let toolCallStarted: (() => void) | null = null
|
||||
const toolCallStartedPromise = new Promise<void>((resolve) => {
|
||||
toolCallStarted = resolve
|
||||
})
|
||||
|
||||
vi.mocked(mcpService.callToolById).mockImplementationOnce(async () => {
|
||||
toolCallStarted?.()
|
||||
return await new Promise(() => {})
|
||||
})
|
||||
|
||||
const execPromise = (hubServer as any).handleExec({
|
||||
code: `
|
||||
console.log("starting");
|
||||
return await github_searchRepos({ query: "hang" });
|
||||
`
|
||||
})
|
||||
|
||||
await toolCallStartedPromise
|
||||
await vi.advanceTimersByTimeAsync(60000)
|
||||
await vi.runAllTimersAsync()
|
||||
|
||||
const execResult = await execPromise
|
||||
const execOutput = JSON.parse(execResult.content[0].text)
|
||||
|
||||
expect(execOutput.error).toBe('Execution timed out after 60000ms')
|
||||
expect(execOutput.result).toBeUndefined()
|
||||
expect(execOutput.isError).toBe(true)
|
||||
expect(execOutput.logs).toContain('[log] starting')
|
||||
expect(vi.mocked(mcpService.abortTool)).toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
describe('server instance', () => {
|
||||
it('creates a valid MCP server instance', () => {
|
||||
expect(hubServer.server).toBeDefined()
|
||||
expect(hubServer.server.setRequestHandler).toBeDefined()
|
||||
})
|
||||
})
|
||||
})
|
||||
159
src/main/mcpServers/hub/__tests__/runtime.test.ts
Normal file
159
src/main/mcpServers/hub/__tests__/runtime.test.ts
Normal file
@ -0,0 +1,159 @@
|
||||
import { describe, expect, it, vi } from 'vitest'
|
||||
|
||||
import { Runtime } from '../runtime'
|
||||
import type { GeneratedTool } from '../types'
|
||||
|
||||
vi.mock('../mcp-bridge', () => ({
|
||||
callMcpTool: vi.fn(async (toolId: string, params: unknown) => {
|
||||
if (toolId === 'server__failing_tool') {
|
||||
throw new Error('Tool failed')
|
||||
}
|
||||
return { toolId, params, success: true }
|
||||
})
|
||||
}))
|
||||
|
||||
const createMockTool = (partial: Partial<GeneratedTool>): GeneratedTool => ({
|
||||
serverId: 'server1',
|
||||
serverName: 'server1',
|
||||
toolName: 'tool',
|
||||
functionName: 'server1_mockTool',
|
||||
jsCode: 'async function server1_mockTool() {}',
|
||||
fn: async (params) => ({ result: params }),
|
||||
signature: '{}',
|
||||
returns: 'unknown',
|
||||
...partial
|
||||
})
|
||||
|
||||
describe('Runtime', () => {
|
||||
describe('execute', () => {
|
||||
it('executes simple code and returns result', async () => {
|
||||
const runtime = new Runtime()
|
||||
const tools: GeneratedTool[] = []
|
||||
|
||||
const result = await runtime.execute('return 1 + 1', tools)
|
||||
|
||||
expect(result.result).toBe(2)
|
||||
expect(result.error).toBeUndefined()
|
||||
})
|
||||
|
||||
it('executes async code', async () => {
|
||||
const runtime = new Runtime()
|
||||
const tools: GeneratedTool[] = []
|
||||
|
||||
const result = await runtime.execute('return await Promise.resolve(42)', tools)
|
||||
|
||||
expect(result.result).toBe(42)
|
||||
})
|
||||
|
||||
it('calls tool functions', async () => {
|
||||
const runtime = new Runtime()
|
||||
const tools = [
|
||||
createMockTool({
|
||||
functionName: 'searchRepos',
|
||||
fn: async (params) => ({ repos: ['repo1', 'repo2'], query: params })
|
||||
})
|
||||
]
|
||||
|
||||
const result = await runtime.execute('return await searchRepos({ query: "test" })', tools)
|
||||
|
||||
expect(result.result).toEqual({ toolId: 'searchRepos', params: { query: 'test' }, success: true })
|
||||
})
|
||||
|
||||
it('captures console logs', async () => {
|
||||
const runtime = new Runtime()
|
||||
const tools: GeneratedTool[] = []
|
||||
|
||||
const result = await runtime.execute(
|
||||
`
|
||||
console.log("hello");
|
||||
console.warn("warning");
|
||||
return "done"
|
||||
`,
|
||||
tools
|
||||
)
|
||||
|
||||
expect(result.result).toBe('done')
|
||||
expect(result.logs).toContain('[log] hello')
|
||||
expect(result.logs).toContain('[warn] warning')
|
||||
})
|
||||
|
||||
it('handles errors gracefully', async () => {
|
||||
const runtime = new Runtime()
|
||||
const tools: GeneratedTool[] = []
|
||||
|
||||
const result = await runtime.execute('throw new Error("test error")', tools)
|
||||
|
||||
expect(result.result).toBeUndefined()
|
||||
expect(result.error).toBe('test error')
|
||||
expect(result.isError).toBe(true)
|
||||
})
|
||||
|
||||
it('supports parallel helper', async () => {
|
||||
const runtime = new Runtime()
|
||||
const tools: GeneratedTool[] = []
|
||||
|
||||
const result = await runtime.execute(
|
||||
`
|
||||
const results = await parallel(
|
||||
Promise.resolve(1),
|
||||
Promise.resolve(2),
|
||||
Promise.resolve(3)
|
||||
);
|
||||
return results
|
||||
`,
|
||||
tools
|
||||
)
|
||||
|
||||
expect(result.result).toEqual([1, 2, 3])
|
||||
})
|
||||
|
||||
it('supports settle helper', async () => {
|
||||
const runtime = new Runtime()
|
||||
const tools: GeneratedTool[] = []
|
||||
|
||||
const result = await runtime.execute(
|
||||
`
|
||||
const results = await settle(
|
||||
Promise.resolve(1),
|
||||
Promise.reject(new Error("fail"))
|
||||
);
|
||||
return results.map(r => r.status)
|
||||
`,
|
||||
tools
|
||||
)
|
||||
|
||||
expect(result.result).toEqual(['fulfilled', 'rejected'])
|
||||
})
|
||||
|
||||
it('returns last expression when no explicit return', async () => {
|
||||
const runtime = new Runtime()
|
||||
const tools: GeneratedTool[] = []
|
||||
|
||||
const result = await runtime.execute(
|
||||
`
|
||||
const x = 10;
|
||||
const y = 20;
|
||||
return x + y
|
||||
`,
|
||||
tools
|
||||
)
|
||||
|
||||
expect(result.result).toBe(30)
|
||||
})
|
||||
|
||||
it('stops execution when a tool throws', async () => {
|
||||
const runtime = new Runtime()
|
||||
const tools = [
|
||||
createMockTool({
|
||||
functionName: 'server__failing_tool'
|
||||
})
|
||||
]
|
||||
|
||||
const result = await runtime.execute('return await server__failing_tool({})', tools)
|
||||
|
||||
expect(result.result).toBeUndefined()
|
||||
expect(result.error).toBe('Tool failed')
|
||||
expect(result.isError).toBe(true)
|
||||
})
|
||||
})
|
||||
})
|
||||
118
src/main/mcpServers/hub/__tests__/search.test.ts
Normal file
118
src/main/mcpServers/hub/__tests__/search.test.ts
Normal file
@ -0,0 +1,118 @@
|
||||
import { describe, expect, it } from 'vitest'
|
||||
|
||||
import { searchTools } from '../search'
|
||||
import type { GeneratedTool } from '../types'
|
||||
|
||||
const createMockTool = (partial: Partial<GeneratedTool>): GeneratedTool => {
|
||||
const functionName = partial.functionName || 'server1_tool'
|
||||
return {
|
||||
serverId: 'server1',
|
||||
serverName: 'server1',
|
||||
toolName: partial.toolName || 'tool',
|
||||
functionName,
|
||||
jsCode: `async function ${functionName}() {}`,
|
||||
fn: async () => ({}),
|
||||
signature: '{}',
|
||||
returns: 'unknown',
|
||||
...partial
|
||||
}
|
||||
}
|
||||
|
||||
describe('search', () => {
|
||||
describe('searchTools', () => {
|
||||
it('returns all tools when query is empty', () => {
|
||||
const tools = [
|
||||
createMockTool({ toolName: 'tool1', functionName: 'tool1' }),
|
||||
createMockTool({ toolName: 'tool2', functionName: 'tool2' })
|
||||
]
|
||||
|
||||
const result = searchTools(tools, { query: '' })
|
||||
|
||||
expect(result.total).toBe(2)
|
||||
expect(result.tools).toContain('tool1')
|
||||
expect(result.tools).toContain('tool2')
|
||||
})
|
||||
|
||||
it('filters tools by single keyword', () => {
|
||||
const tools = [
|
||||
createMockTool({ toolName: 'search_repos', functionName: 'searchRepos' }),
|
||||
createMockTool({ toolName: 'get_user', functionName: 'getUser' }),
|
||||
createMockTool({ toolName: 'search_users', functionName: 'searchUsers' })
|
||||
]
|
||||
|
||||
const result = searchTools(tools, { query: 'search' })
|
||||
|
||||
expect(result.total).toBe(2)
|
||||
expect(result.tools).toContain('searchRepos')
|
||||
expect(result.tools).toContain('searchUsers')
|
||||
expect(result.tools).not.toContain('getUser')
|
||||
})
|
||||
|
||||
it('supports OR matching with comma-separated keywords', () => {
|
||||
const tools = [
|
||||
createMockTool({ toolName: 'browser_open', functionName: 'browserOpen' }),
|
||||
createMockTool({ toolName: 'chrome_launch', functionName: 'chromeLaunch' }),
|
||||
createMockTool({ toolName: 'file_read', functionName: 'fileRead' })
|
||||
]
|
||||
|
||||
const result = searchTools(tools, { query: 'browser,chrome' })
|
||||
|
||||
expect(result.total).toBe(2)
|
||||
expect(result.tools).toContain('browserOpen')
|
||||
expect(result.tools).toContain('chromeLaunch')
|
||||
expect(result.tools).not.toContain('fileRead')
|
||||
})
|
||||
|
||||
it('matches against description', () => {
|
||||
const tools = [
|
||||
createMockTool({
|
||||
toolName: 'launch',
|
||||
functionName: 'launch',
|
||||
description: 'Launch a browser instance'
|
||||
}),
|
||||
createMockTool({
|
||||
toolName: 'close',
|
||||
functionName: 'close',
|
||||
description: 'Close a window'
|
||||
})
|
||||
]
|
||||
|
||||
const result = searchTools(tools, { query: 'browser' })
|
||||
|
||||
expect(result.total).toBe(1)
|
||||
expect(result.tools).toContain('launch')
|
||||
})
|
||||
|
||||
it('respects limit parameter', () => {
|
||||
const tools = Array.from({ length: 20 }, (_, i) =>
|
||||
createMockTool({ toolName: `tool${i}`, functionName: `server1_tool${i}` })
|
||||
)
|
||||
|
||||
const result = searchTools(tools, { query: 'tool', limit: 5 })
|
||||
|
||||
expect(result.total).toBe(20)
|
||||
const matches = (result.tools.match(/async function server1_tool\d+/g) || []).length
|
||||
expect(matches).toBe(5)
|
||||
})
|
||||
|
||||
it('is case insensitive', () => {
|
||||
const tools = [createMockTool({ toolName: 'SearchRepos', functionName: 'searchRepos' })]
|
||||
|
||||
const result = searchTools(tools, { query: 'SEARCH' })
|
||||
|
||||
expect(result.total).toBe(1)
|
||||
})
|
||||
|
||||
it('ranks exact matches higher', () => {
|
||||
const tools = [
|
||||
createMockTool({ toolName: 'searching', functionName: 'searching' }),
|
||||
createMockTool({ toolName: 'search', functionName: 'search' }),
|
||||
createMockTool({ toolName: 'search_more', functionName: 'searchMore' })
|
||||
]
|
||||
|
||||
const result = searchTools(tools, { query: 'search', limit: 1 })
|
||||
|
||||
expect(result.tools).toContain('function search(')
|
||||
})
|
||||
})
|
||||
})
|
||||
152
src/main/mcpServers/hub/generator.ts
Normal file
152
src/main/mcpServers/hub/generator.ts
Normal file
@ -0,0 +1,152 @@
|
||||
import { generateMcpToolFunctionName } from '@shared/mcp'
|
||||
import type { MCPTool } from '@types'
|
||||
|
||||
import type { GeneratedTool } from './types'
|
||||
|
||||
type PropertySchema = Record<string, unknown>
|
||||
type InputSchema = {
|
||||
type?: string
|
||||
properties?: Record<string, PropertySchema>
|
||||
required?: string[]
|
||||
}
|
||||
|
||||
function schemaTypeToTS(prop: Record<string, unknown>): string {
|
||||
const type = prop.type as string | string[] | undefined
|
||||
const enumValues = prop.enum as unknown[] | undefined
|
||||
|
||||
if (enumValues && Array.isArray(enumValues)) {
|
||||
return enumValues.map((v) => (typeof v === 'string' ? `"${v}"` : String(v))).join(' | ')
|
||||
}
|
||||
|
||||
if (Array.isArray(type)) {
|
||||
return type.map((t) => primitiveTypeToTS(t)).join(' | ')
|
||||
}
|
||||
|
||||
if (type === 'array') {
|
||||
const items = prop.items as Record<string, unknown> | undefined
|
||||
if (items) {
|
||||
return `${schemaTypeToTS(items)}[]`
|
||||
}
|
||||
return 'unknown[]'
|
||||
}
|
||||
|
||||
if (type === 'object') {
|
||||
return 'object'
|
||||
}
|
||||
|
||||
return primitiveTypeToTS(type)
|
||||
}
|
||||
|
||||
function primitiveTypeToTS(type: string | undefined): string {
|
||||
switch (type) {
|
||||
case 'string':
|
||||
return 'string'
|
||||
case 'number':
|
||||
case 'integer':
|
||||
return 'number'
|
||||
case 'boolean':
|
||||
return 'boolean'
|
||||
case 'null':
|
||||
return 'null'
|
||||
default:
|
||||
return 'unknown'
|
||||
}
|
||||
}
|
||||
|
||||
function jsonSchemaToSignature(schema: Record<string, unknown> | undefined): string {
|
||||
if (!schema || typeof schema !== 'object') {
|
||||
return '{}'
|
||||
}
|
||||
|
||||
const properties = schema.properties as Record<string, Record<string, unknown>> | undefined
|
||||
if (!properties) {
|
||||
return '{}'
|
||||
}
|
||||
|
||||
const required = (schema.required as string[]) || []
|
||||
const parts: string[] = []
|
||||
|
||||
for (const [key, prop] of Object.entries(properties)) {
|
||||
const isRequired = required.includes(key)
|
||||
const typeStr = schemaTypeToTS(prop)
|
||||
parts.push(`${key}${isRequired ? '' : '?'}: ${typeStr}`)
|
||||
}
|
||||
|
||||
return `{ ${parts.join(', ')} }`
|
||||
}
|
||||
|
||||
function generateJSDoc(tool: MCPTool, inputSchema: InputSchema | undefined, returns: string): string {
|
||||
const lines: string[] = ['/**']
|
||||
|
||||
if (tool.description) {
|
||||
const desc = tool.description.split('\n')[0]
|
||||
lines.push(` * ${desc}`)
|
||||
}
|
||||
|
||||
const properties = inputSchema?.properties || {}
|
||||
const required = inputSchema?.required || []
|
||||
|
||||
if (Object.keys(properties).length > 0) {
|
||||
lines.push(` * @param {Object} params`)
|
||||
for (const [name, prop] of Object.entries(properties)) {
|
||||
const isReq = required.includes(name)
|
||||
const type = schemaTypeToTS(prop)
|
||||
const paramName = isReq ? `params.${name}` : `[params.${name}]`
|
||||
const desc = (prop.description as string)?.split('\n')[0] || ''
|
||||
lines.push(` * @param {${type}} ${paramName} ${desc}`)
|
||||
}
|
||||
}
|
||||
|
||||
lines.push(` * @returns {Promise<${returns}>}`)
|
||||
lines.push(` */`)
|
||||
|
||||
return lines.join('\n')
|
||||
}
|
||||
|
||||
export function generateToolFunction(
|
||||
tool: MCPTool,
|
||||
existingNames: Set<string>,
|
||||
callToolFn: (functionName: string, params: unknown) => Promise<unknown>
|
||||
): GeneratedTool {
|
||||
const functionName = generateMcpToolFunctionName(tool.serverName, tool.name, existingNames)
|
||||
|
||||
const inputSchema = tool.inputSchema as InputSchema | undefined
|
||||
const outputSchema = tool.outputSchema as Record<string, unknown> | undefined
|
||||
|
||||
const signature = jsonSchemaToSignature(inputSchema)
|
||||
const returns = outputSchema ? jsonSchemaToSignature(outputSchema) : 'unknown'
|
||||
|
||||
const jsDoc = generateJSDoc(tool, inputSchema, returns)
|
||||
|
||||
const jsCode = `${jsDoc}
|
||||
async function ${functionName}(params) {
|
||||
return await __callTool("${functionName}", params);
|
||||
}`
|
||||
|
||||
const fn = async (params: unknown): Promise<unknown> => {
|
||||
return await callToolFn(functionName, params)
|
||||
}
|
||||
|
||||
return {
|
||||
serverId: tool.serverId,
|
||||
serverName: tool.serverName,
|
||||
toolName: tool.name,
|
||||
functionName,
|
||||
jsCode,
|
||||
fn,
|
||||
signature,
|
||||
returns,
|
||||
description: tool.description
|
||||
}
|
||||
}
|
||||
|
||||
export function generateToolsCode(tools: GeneratedTool[]): string {
|
||||
if (tools.length === 0) {
|
||||
return '// No tools available'
|
||||
}
|
||||
|
||||
const header = `// ${tools.length} tool(s). ALWAYS use: const r = await ToolName({...}); return r;`
|
||||
const code = tools.map((t) => t.jsCode).join('\n\n')
|
||||
|
||||
return header + '\n\n' + code
|
||||
}
|
||||
184
src/main/mcpServers/hub/index.ts
Normal file
184
src/main/mcpServers/hub/index.ts
Normal file
@ -0,0 +1,184 @@
|
||||
import { loggerService } from '@logger'
|
||||
import { CacheService } from '@main/services/CacheService'
|
||||
import { Server } from '@modelcontextprotocol/sdk/server/index.js'
|
||||
import { CallToolRequestSchema, ErrorCode, ListToolsRequestSchema, McpError } from '@modelcontextprotocol/sdk/types.js'
|
||||
|
||||
import { generateToolFunction } from './generator'
|
||||
import { callMcpTool, clearToolMap, listAllTools, syncToolMapFromGeneratedTools } from './mcp-bridge'
|
||||
import { Runtime } from './runtime'
|
||||
import { searchTools } from './search'
|
||||
import type { ExecInput, GeneratedTool, SearchQuery } from './types'
|
||||
|
||||
const logger = loggerService.withContext('MCPServer:Hub')
|
||||
const TOOLS_CACHE_KEY = 'hub:tools'
|
||||
const TOOLS_CACHE_TTL = 60 * 1000 // 1 minute
|
||||
|
||||
/**
|
||||
* Hub MCP Server - A meta-server that aggregates all active MCP servers.
|
||||
*
|
||||
* This server is NOT included in builtinMCPServers because:
|
||||
* 1. It aggregates tools from all other MCP servers, not a standalone tool provider
|
||||
* 2. It's designed for LLM "code mode" - enabling AI to discover and call tools programmatically
|
||||
* 3. It should be auto-enabled when code mode features are used, not manually installed by users
|
||||
*
|
||||
* The server exposes two tools:
|
||||
* - `search`: Find available tools by keywords, returns JS function signatures
|
||||
* - `exec`: Execute JavaScript code that calls discovered tools
|
||||
*/
|
||||
export class HubServer {
|
||||
public server: Server
|
||||
private runtime: Runtime
|
||||
|
||||
constructor() {
|
||||
this.runtime = new Runtime()
|
||||
|
||||
this.server = new Server(
|
||||
{
|
||||
name: 'hub-server',
|
||||
version: '1.0.0'
|
||||
},
|
||||
{
|
||||
capabilities: {
|
||||
tools: {}
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
this.setupRequestHandlers()
|
||||
}
|
||||
|
||||
private setupRequestHandlers(): void {
|
||||
this.server.setRequestHandler(ListToolsRequestSchema, async () => {
|
||||
return {
|
||||
tools: [
|
||||
{
|
||||
name: 'search',
|
||||
description:
|
||||
'Search for available MCP tools by keywords. Use this FIRST to discover tools. Returns JavaScript async function declarations with JSDoc showing exact function names, parameters, and return types for use in `exec`.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
query: {
|
||||
type: 'string',
|
||||
description:
|
||||
'Comma-separated search keywords. A tool matches if ANY keyword appears in its name, description, or server name. Example: "chrome,browser,tab" matches tools related to Chrome OR browser OR tabs.'
|
||||
},
|
||||
limit: {
|
||||
type: 'number',
|
||||
description: 'Maximum number of tools to return (default: 10, max: 50)'
|
||||
}
|
||||
},
|
||||
required: ['query']
|
||||
}
|
||||
},
|
||||
{
|
||||
name: 'exec',
|
||||
description:
|
||||
'Execute JavaScript that calls MCP tools discovered via `search`. IMPORTANT: You MUST explicitly `return` the final value, or the result will be `undefined`.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
code: {
|
||||
type: 'string',
|
||||
description:
|
||||
'JavaScript code to execute. The code runs inside an async context, so use `await` directly. Do NOT wrap your code in `(async () => { ... })()` - this causes double-wrapping and returns undefined. All discovered tools are async functions (call as `await ToolName(params)`). Helpers: `parallel(...promises)`, `settle(...promises)`, `console.*`. You MUST `return` the final value. Examples: `const r = await Tool({ id: "1" }); return r` or `return await Tool({ x: 1 })`'
|
||||
}
|
||||
},
|
||||
required: ['code']
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
})
|
||||
|
||||
this.server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
||||
const { name, arguments: args } = request.params
|
||||
|
||||
if (!args) {
|
||||
throw new McpError(ErrorCode.InvalidParams, 'No arguments provided')
|
||||
}
|
||||
|
||||
try {
|
||||
switch (name) {
|
||||
case 'search':
|
||||
return await this.handleSearch(args as unknown as SearchQuery)
|
||||
case 'exec':
|
||||
return await this.handleExec(args as unknown as ExecInput)
|
||||
default:
|
||||
throw new McpError(ErrorCode.MethodNotFound, `Unknown tool: ${name}`)
|
||||
}
|
||||
} catch (error) {
|
||||
if (error instanceof McpError) {
|
||||
throw error
|
||||
}
|
||||
logger.error(`Error executing tool ${name}:`, error as Error)
|
||||
throw new McpError(
|
||||
ErrorCode.InternalError,
|
||||
`Error executing tool ${name}: ${error instanceof Error ? error.message : String(error)}`
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
private async fetchTools(): Promise<GeneratedTool[]> {
|
||||
const cached = CacheService.get<GeneratedTool[]>(TOOLS_CACHE_KEY)
|
||||
if (cached) {
|
||||
logger.debug('Returning cached tools')
|
||||
syncToolMapFromGeneratedTools(cached)
|
||||
return cached
|
||||
}
|
||||
|
||||
logger.debug('Fetching fresh tools')
|
||||
const allTools = await listAllTools()
|
||||
const existingNames = new Set<string>()
|
||||
const tools = allTools.map((tool) => generateToolFunction(tool, existingNames, callMcpTool))
|
||||
CacheService.set(TOOLS_CACHE_KEY, tools, TOOLS_CACHE_TTL)
|
||||
syncToolMapFromGeneratedTools(tools)
|
||||
return tools
|
||||
}
|
||||
|
||||
invalidateCache(): void {
|
||||
CacheService.remove(TOOLS_CACHE_KEY)
|
||||
clearToolMap()
|
||||
logger.debug('Tools cache invalidated')
|
||||
}
|
||||
|
||||
private async handleSearch(query: SearchQuery) {
|
||||
if (!query.query || typeof query.query !== 'string') {
|
||||
throw new McpError(ErrorCode.InvalidParams, 'query parameter is required and must be a string')
|
||||
}
|
||||
|
||||
const tools = await this.fetchTools()
|
||||
const result = searchTools(tools, query)
|
||||
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: JSON.stringify(result, null, 2)
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
private async handleExec(input: ExecInput) {
|
||||
if (!input.code || typeof input.code !== 'string') {
|
||||
throw new McpError(ErrorCode.InvalidParams, 'code parameter is required and must be a string')
|
||||
}
|
||||
|
||||
const tools = await this.fetchTools()
|
||||
const result = await this.runtime.execute(input.code, tools)
|
||||
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: JSON.stringify(result, null, 2)
|
||||
}
|
||||
],
|
||||
isError: result.isError
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export default HubServer
|
||||
96
src/main/mcpServers/hub/mcp-bridge.ts
Normal file
96
src/main/mcpServers/hub/mcp-bridge.ts
Normal file
@ -0,0 +1,96 @@
|
||||
/**
|
||||
* Bridge module for Hub server to access MCPService.
|
||||
* Re-exports the methods needed by tool-registry and runtime.
|
||||
*/
|
||||
import mcpService from '@main/services/MCPService'
|
||||
import { generateMcpToolFunctionName } from '@shared/mcp'
|
||||
import type { MCPCallToolResponse, MCPTool, MCPToolResultContent } from '@types'
|
||||
|
||||
import type { GeneratedTool } from './types'
|
||||
|
||||
export const listAllTools = () => mcpService.listAllActiveServerTools()
|
||||
|
||||
const toolFunctionNameToIdMap = new Map<string, { serverId: string; toolName: string }>()
|
||||
|
||||
export async function refreshToolMap(): Promise<void> {
|
||||
const tools = await listAllTools()
|
||||
syncToolMapFromTools(tools)
|
||||
}
|
||||
|
||||
export function syncToolMapFromTools(tools: MCPTool[]): void {
|
||||
toolFunctionNameToIdMap.clear()
|
||||
const existingNames = new Set<string>()
|
||||
for (const tool of tools) {
|
||||
const functionName = generateMcpToolFunctionName(tool.serverName, tool.name, existingNames)
|
||||
toolFunctionNameToIdMap.set(functionName, { serverId: tool.serverId, toolName: tool.name })
|
||||
}
|
||||
}
|
||||
|
||||
export function syncToolMapFromGeneratedTools(tools: GeneratedTool[]): void {
|
||||
toolFunctionNameToIdMap.clear()
|
||||
for (const tool of tools) {
|
||||
toolFunctionNameToIdMap.set(tool.functionName, { serverId: tool.serverId, toolName: tool.toolName })
|
||||
}
|
||||
}
|
||||
|
||||
export function clearToolMap(): void {
|
||||
toolFunctionNameToIdMap.clear()
|
||||
}
|
||||
|
||||
export const callMcpTool = async (functionName: string, params: unknown, callId?: string): Promise<unknown> => {
|
||||
const toolInfo = toolFunctionNameToIdMap.get(functionName)
|
||||
if (!toolInfo) {
|
||||
await refreshToolMap()
|
||||
const retryToolInfo = toolFunctionNameToIdMap.get(functionName)
|
||||
if (!retryToolInfo) {
|
||||
throw new Error(`Tool not found: ${functionName}`)
|
||||
}
|
||||
const toolId = `${retryToolInfo.serverId}__${retryToolInfo.toolName}`
|
||||
const result = await mcpService.callToolById(toolId, params, callId)
|
||||
throwIfToolError(result)
|
||||
return extractToolResult(result)
|
||||
}
|
||||
const toolId = `${toolInfo.serverId}__${toolInfo.toolName}`
|
||||
const result = await mcpService.callToolById(toolId, params, callId)
|
||||
throwIfToolError(result)
|
||||
return extractToolResult(result)
|
||||
}
|
||||
|
||||
export const abortMcpTool = async (callId: string): Promise<boolean> => {
|
||||
return mcpService.abortTool(null as unknown as Electron.IpcMainInvokeEvent, callId)
|
||||
}
|
||||
|
||||
function extractToolResult(result: MCPCallToolResponse): unknown {
|
||||
if (!result.content || result.content.length === 0) {
|
||||
return null
|
||||
}
|
||||
|
||||
const textContent = result.content.find((c) => c.type === 'text')
|
||||
if (textContent?.text) {
|
||||
try {
|
||||
return JSON.parse(textContent.text)
|
||||
} catch {
|
||||
return textContent.text
|
||||
}
|
||||
}
|
||||
|
||||
return result.content
|
||||
}
|
||||
|
||||
function throwIfToolError(result: MCPCallToolResponse): void {
|
||||
if (!result.isError) {
|
||||
return
|
||||
}
|
||||
|
||||
const textContent = extractTextContent(result.content)
|
||||
throw new Error(textContent ?? 'Tool execution failed')
|
||||
}
|
||||
|
||||
function extractTextContent(content: MCPToolResultContent[] | undefined): string | undefined {
|
||||
if (!content || content.length === 0) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
const textBlock = content.find((item) => item.type === 'text' && item.text)
|
||||
return textBlock?.text
|
||||
}
|
||||
170
src/main/mcpServers/hub/runtime.ts
Normal file
170
src/main/mcpServers/hub/runtime.ts
Normal file
@ -0,0 +1,170 @@
|
||||
import crypto from 'node:crypto'
|
||||
import { Worker } from 'node:worker_threads'
|
||||
|
||||
import { loggerService } from '@logger'
|
||||
|
||||
import { abortMcpTool, callMcpTool } from './mcp-bridge'
|
||||
import type {
|
||||
ExecOutput,
|
||||
GeneratedTool,
|
||||
HubWorkerCallToolMessage,
|
||||
HubWorkerExecMessage,
|
||||
HubWorkerMessage,
|
||||
HubWorkerResultMessage
|
||||
} from './types'
|
||||
import { hubWorkerSource } from './worker'
|
||||
|
||||
const logger = loggerService.withContext('MCPServer:Hub:Runtime')
|
||||
|
||||
const MAX_LOGS = 1000
|
||||
const EXECUTION_TIMEOUT = 60000
|
||||
|
||||
export class Runtime {
|
||||
async execute(code: string, tools: GeneratedTool[]): Promise<ExecOutput> {
|
||||
return await new Promise<ExecOutput>((resolve) => {
|
||||
const logs: string[] = []
|
||||
const activeCallIds = new Map<string, string>()
|
||||
let finished = false
|
||||
let timedOut = false
|
||||
let timeoutId: NodeJS.Timeout | null = null
|
||||
|
||||
const worker = new Worker(hubWorkerSource, { eval: true })
|
||||
|
||||
const addLog = (entry: string) => {
|
||||
if (logs.length >= MAX_LOGS) {
|
||||
return
|
||||
}
|
||||
logs.push(entry)
|
||||
}
|
||||
|
||||
const finalize = async (output: ExecOutput, terminateWorker = true) => {
|
||||
if (finished) {
|
||||
return
|
||||
}
|
||||
finished = true
|
||||
if (timeoutId) {
|
||||
clearTimeout(timeoutId)
|
||||
}
|
||||
worker.removeAllListeners()
|
||||
if (terminateWorker) {
|
||||
try {
|
||||
await worker.terminate()
|
||||
} catch (error) {
|
||||
logger.warn('Failed to terminate exec worker', error as Error)
|
||||
}
|
||||
}
|
||||
resolve(output)
|
||||
}
|
||||
|
||||
const abortActiveTools = async () => {
|
||||
const callIds = Array.from(activeCallIds.values())
|
||||
activeCallIds.clear()
|
||||
if (callIds.length === 0) {
|
||||
return
|
||||
}
|
||||
await Promise.allSettled(callIds.map((callId) => abortMcpTool(callId)))
|
||||
}
|
||||
|
||||
const handleToolCall = async (message: HubWorkerCallToolMessage) => {
|
||||
if (finished || timedOut) {
|
||||
return
|
||||
}
|
||||
const callId = crypto.randomUUID()
|
||||
activeCallIds.set(message.requestId, callId)
|
||||
|
||||
try {
|
||||
const result = await callMcpTool(message.functionName, message.params, callId)
|
||||
if (finished || timedOut) {
|
||||
return
|
||||
}
|
||||
worker.postMessage({ type: 'toolResult', requestId: message.requestId, result })
|
||||
} catch (error) {
|
||||
if (finished || timedOut) {
|
||||
return
|
||||
}
|
||||
const errorMessage = error instanceof Error ? error.message : String(error)
|
||||
worker.postMessage({ type: 'toolError', requestId: message.requestId, error: errorMessage })
|
||||
} finally {
|
||||
activeCallIds.delete(message.requestId)
|
||||
}
|
||||
}
|
||||
|
||||
const handleResult = (message: HubWorkerResultMessage) => {
|
||||
const resolvedLogs = message.logs && message.logs.length > 0 ? message.logs : logs
|
||||
void finalize({
|
||||
result: message.result,
|
||||
logs: resolvedLogs.length > 0 ? resolvedLogs : undefined
|
||||
})
|
||||
}
|
||||
|
||||
const handleError = (errorMessage: string, messageLogs?: string[], terminateWorker = true) => {
|
||||
const resolvedLogs = messageLogs && messageLogs.length > 0 ? messageLogs : logs
|
||||
void finalize(
|
||||
{
|
||||
result: undefined,
|
||||
logs: resolvedLogs.length > 0 ? resolvedLogs : undefined,
|
||||
error: errorMessage,
|
||||
isError: true
|
||||
},
|
||||
terminateWorker
|
||||
)
|
||||
}
|
||||
|
||||
const handleMessage = (message: HubWorkerMessage) => {
|
||||
if (!message || typeof message !== 'object') {
|
||||
return
|
||||
}
|
||||
switch (message.type) {
|
||||
case 'log':
|
||||
addLog(message.entry)
|
||||
break
|
||||
case 'callTool':
|
||||
void handleToolCall(message)
|
||||
break
|
||||
case 'result':
|
||||
handleResult(message)
|
||||
break
|
||||
case 'error':
|
||||
handleError(message.error, message.logs)
|
||||
break
|
||||
default:
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
timeoutId = setTimeout(() => {
|
||||
timedOut = true
|
||||
void (async () => {
|
||||
await abortActiveTools()
|
||||
try {
|
||||
await worker.terminate()
|
||||
} catch (error) {
|
||||
logger.warn('Failed to terminate exec worker after timeout', error as Error)
|
||||
}
|
||||
handleError(`Execution timed out after ${EXECUTION_TIMEOUT}ms`, undefined, false)
|
||||
})()
|
||||
}, EXECUTION_TIMEOUT)
|
||||
|
||||
worker.on('message', handleMessage)
|
||||
worker.on('error', (error) => {
|
||||
logger.error('Worker execution error', error)
|
||||
handleError(error instanceof Error ? error.message : String(error))
|
||||
})
|
||||
worker.on('exit', (code) => {
|
||||
if (finished || timedOut) {
|
||||
return
|
||||
}
|
||||
const message = code === 0 ? 'Exec worker exited unexpectedly' : `Exec worker exited with code ${code}`
|
||||
logger.error(message)
|
||||
handleError(message, undefined, false)
|
||||
})
|
||||
|
||||
const execMessage: HubWorkerExecMessage = {
|
||||
type: 'exec',
|
||||
code,
|
||||
tools: tools.map((tool) => ({ functionName: tool.functionName }))
|
||||
}
|
||||
worker.postMessage(execMessage)
|
||||
})
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user