fix(gateway): steamline gateway function

This commit is contained in:
suyao 2026-01-07 21:15:31 +08:00
parent 7211efb2b7
commit 9607ac0798
No known key found for this signature in database
11 changed files with 281 additions and 260 deletions

View File

@ -5,219 +5,25 @@
* - Model group routing: /{groupName}/v1/... routes use the group's configured model
* - Endpoint access control based on enabledEndpoints configuration
* - Model injection for simplified external app integration
*
* For assistant mode, this middleware only sets model = "assistant:{assistantId}"
* The actual assistant config resolution and parameter overrides happen in ProxyStreamService.
*/
import { buildFunctionCallToolName } from '@main/utils/mcp'
import type { MCPTool } from '@types'
import type { NextFunction, Request, Response } from 'express'
import { loggerService } from '../../services/LoggerService'
import { reduxService } from '../../services/ReduxService'
import { config } from '../config'
import { mcpApiService } from '../services/mcp'
import { getMCPServersFromRedux } from '../utils/mcp'
const logger = loggerService.withContext('GatewayMiddleware')
type AssistantConfig = {
id: string
name: string
prompt?: string
model?: { id: string; provider: string }
defaultModel?: { id: string; provider: string }
settings?: {
streamOutput?: boolean
enableTemperature?: boolean
temperature?: number
enableTopP?: boolean
topP?: number
enableMaxTokens?: boolean
maxTokens?: number
}
mcpServers?: Array<{ id: string }>
allowed_tools?: string[]
}
type ToolDefinition = {
name: string
description?: string
inputSchema: MCPTool['inputSchema']
}
const getEndpointFormat = (endpoint: string): 'openai' | 'anthropic' | 'responses' | null => {
if (endpoint.startsWith('/v1/chat/completions')) return 'openai'
if (endpoint.startsWith('/v1/messages')) return 'anthropic'
if (endpoint.startsWith('/v1/responses')) return 'responses'
return null
}
const buildAssistantModelId = (assistant: AssistantConfig): string | null => {
const model = assistant.model ?? assistant.defaultModel
if (!model?.provider || !model?.id) {
return null
}
return `${model.provider}:${model.id}`
}
const applyAssistantMessageOverrides = (
body: Record<string, any>,
assistant: AssistantConfig,
format: 'openai' | 'anthropic' | 'responses'
): Record<string, any> => {
const nextBody = { ...body }
const prompt = assistant.prompt ?? ''
if (format === 'openai') {
const messages = Array.isArray(nextBody.messages) ? nextBody.messages : []
const filtered = messages.filter((message) => message?.role !== 'system' && message?.role !== 'developer')
if (prompt.trim().length > 0) {
filtered.unshift({ role: 'system', content: prompt })
}
nextBody.messages = filtered
} else if (format === 'responses') {
nextBody.instructions = prompt
} else {
nextBody.system = prompt
}
return nextBody
}
const applyAssistantParameterOverrides = (
body: Record<string, any>,
assistant: AssistantConfig,
format: 'openai' | 'anthropic' | 'responses'
): Record<string, any> => {
const nextBody = { ...body }
const settings = assistant.settings ?? {}
if (typeof settings.streamOutput === 'boolean') {
nextBody.stream = settings.streamOutput
}
if (settings.enableTemperature && typeof settings.temperature === 'number') {
nextBody.temperature = settings.temperature
} else if ('temperature' in nextBody) {
delete nextBody.temperature
}
if (settings.enableTopP && typeof settings.topP === 'number') {
nextBody.top_p = settings.topP
} else if ('top_p' in nextBody) {
delete nextBody.top_p
}
if (settings.enableMaxTokens && typeof settings.maxTokens === 'number') {
if (format === 'responses') {
nextBody.max_output_tokens = settings.maxTokens
delete nextBody.max_tokens
} else {
nextBody.max_tokens = settings.maxTokens
if ('max_output_tokens' in nextBody) {
delete nextBody.max_output_tokens
}
}
} else {
if ('max_tokens' in nextBody) {
delete nextBody.max_tokens
}
if ('max_output_tokens' in nextBody) {
delete nextBody.max_output_tokens
}
}
delete nextBody.tool_choice
return nextBody
}
const mapToolsForOpenAI = (tools: ToolDefinition[]) =>
tools.map((toolDef) => ({
type: 'function',
function: {
name: toolDef.name,
description: toolDef.description || '',
parameters: toolDef.inputSchema
}
}))
const mapToolsForResponses = (tools: ToolDefinition[]) =>
tools.map((toolDef) => ({
type: 'function',
name: toolDef.name,
description: toolDef.description || '',
parameters: toolDef.inputSchema
}))
const mapToolsForAnthropic = (tools: ToolDefinition[]) =>
tools.map((toolDef) => ({
name: toolDef.name,
description: toolDef.description || '',
input_schema: toolDef.inputSchema
}))
const buildAssistantTools = async (assistant: AssistantConfig): Promise<ToolDefinition[]> => {
const serverIds = assistant.mcpServers?.map((server) => server.id).filter(Boolean) ?? []
if (serverIds.length === 0) {
return []
}
const allowedTools = Array.isArray(assistant.allowed_tools) ? new Set(assistant.allowed_tools) : null
const servers = await getMCPServersFromRedux()
const tools: ToolDefinition[] = []
for (const serverId of serverIds) {
const server = servers.find((entry) => entry.id === serverId)
if (!server || !server.isActive) {
continue
}
const info = await mcpApiService.getServerInfo(serverId)
if (!info?.tools || !Array.isArray(info.tools)) {
continue
}
for (const tool of info.tools as Array<{
name: string
description?: string
inputSchema?: MCPTool['inputSchema']
}>) {
if (!tool?.name || !tool.inputSchema) {
continue
}
if (server.disabledTools?.includes(tool.name)) {
continue
}
const toolName = buildFunctionCallToolName(info.name, tool.name)
if (allowedTools && !allowedTools.has(toolName)) {
continue
}
tools.push({
name: toolName,
description: tool.description,
inputSchema: tool.inputSchema
})
}
}
return tools
}
const resolveAssistantById = async (assistantId: string): Promise<AssistantConfig | null> => {
const assistants = (await reduxService.select('state.assistants.assistants')) as AssistantConfig[] | null
return assistants?.find((assistant) => assistant.id === assistantId) ?? null
}
/**
* Gateway middleware for model group routing
*
* This middleware:
* 1. Extracts group name from URL path if present
* 2. Looks up the group by matching name directly
* 3. Injects the group's model into the request
* 3. Injects the group's model into the request (or assistant ID for assistant mode)
* 4. Checks if the endpoint is enabled
*/
export const gatewayMiddleware = async (req: Request, res: Response, next: NextFunction): Promise<void> => {
@ -240,69 +46,18 @@ export const gatewayMiddleware = async (req: Request, res: Response, next: NextF
return
}
const endpoint = req.path.startsWith('/') ? req.path : `/${req.path}`
const endpointFormat = getEndpointFormat(endpoint)
if (group.mode === 'assistant' && group.assistantId) {
if (!endpointFormat) {
res.status(400).json({
error: {
type: 'invalid_request_error',
message: `Unsupported endpoint ${endpoint}`
}
})
return
}
const assistant = await resolveAssistantById(group.assistantId)
if (!assistant) {
res.status(400).json({
error: {
type: 'invalid_request_error',
message: `Assistant '${group.assistantId}' not found`
}
})
return
}
const modelId = buildAssistantModelId(assistant)
if (!modelId) {
res.status(400).json({
error: {
type: 'invalid_request_error',
message: `Assistant '${group.assistantId}' is missing a model`
}
})
return
}
let nextBody = {
req.body = {
...req.body,
model: modelId
model: `assistant:${group.assistantId}`
}
nextBody = applyAssistantMessageOverrides(nextBody, assistant, endpointFormat)
nextBody = applyAssistantParameterOverrides(nextBody, assistant, endpointFormat)
const tools = await buildAssistantTools(assistant)
if (endpointFormat === 'openai') {
nextBody.tools = tools.length > 0 ? mapToolsForOpenAI(tools) : undefined
} else if (endpointFormat === 'responses') {
nextBody.tools = tools.length > 0 ? mapToolsForResponses(tools) : undefined
} else {
nextBody.tools = tools.length > 0 ? mapToolsForAnthropic(tools) : undefined
}
req.body = nextBody
logger.debug('Injected assistant preset into request', {
logger.debug('Using assistant mode', {
groupName,
assistantId: assistant.id,
model: modelId,
toolCount: tools.length
assistantId: group.assistantId
})
} else {
// Inject the group's model into the request
// Model mode: inject the group's model into the request
req.body = {
...req.body,
model: `${group.providerId}:${group.modelId}`

View File

@ -12,6 +12,7 @@ import { loggerService } from '@logger'
import { generateSignature as cherryaiGenerateSignature } from '@main/integration/cherryai'
import anthropicService from '@main/services/AnthropicService'
import copilotService from '@main/services/CopilotService'
import mcpService from '@main/services/MCPService'
import { reduxService } from '@main/services/ReduxService'
import {
type AiSdkConfig,
@ -25,6 +26,7 @@ import {
import { COPILOT_DEFAULT_HEADERS } from '@shared/aiCore/constant'
import type { MinimalProvider } from '@shared/types'
import { defaultAppHeaders } from '@shared/utils'
import type { Assistant, MCPTool } from '@types'
import type { Provider } from '@types'
import type { Provider as AiSdkProvider } from 'ai'
import { simulateStreamingMiddleware, stepCountIs, wrapLanguageModel } from 'ai'
@ -44,6 +46,108 @@ initializeSharedProviders({
error: (message, error) => logger.error(message, error)
})
/**
* Resolve assistant configuration by ID from Redux store
*/
async function resolveAssistantById(assistantId: string): Promise<Assistant | null> {
const assistants = (await reduxService.select('state.assistants.assistants')) as Assistant[] | null
return assistants?.find((assistant) => assistant.id === assistantId) ?? null
}
/**
* Get provider by ID from Redux store
*/
async function getProviderById(providerId: string): Promise<Provider | null> {
const providers = (await reduxService.select('state.llm.providers')) as Provider[] | null
return providers?.find((p) => p.id === providerId) ?? null
}
/**
* Build MCP tools for assistant using Promise.allSettled for concurrent fetching
*/
async function buildAssistantTools(assistant: Assistant): Promise<MCPTool[]> {
const servers = assistant.mcpServers ?? []
if (servers.length === 0) {
return []
}
logger.debug('Fetching MCP tools for assistant', { serverCount: servers.length })
const results = await Promise.allSettled(servers.map((server) => mcpService.listTools(null as never, server)))
const tools: MCPTool[] = []
for (let i = 0; i < results.length; i++) {
const result = results[i]
if (result.status === 'fulfilled') {
tools.push(...result.value)
} else {
logger.warn('Failed to fetch tools from MCP server', {
serverId: servers[i].id,
serverName: servers[i].name,
error: result.reason
})
}
}
logger.debug('Built MCP tools for assistant', { toolCount: tools.length })
return tools
}
/**
* Apply assistant overrides to request params based on input format
*
* Only handles format-specific field differences:
* - System prompt: system (Anthropic) / instructions (Responses) / messages[0] (OpenAI)
* - MaxTokens: max_tokens / max_output_tokens
*
* AI SDK's MessageConverter handles the rest of the parameter transformations.
*/
function applyAssistantOverrides(params: InputParams, assistant: Assistant, inputFormat: InputFormat): InputParams {
const nextParams = { ...params } as Record<string, unknown>
const settings = assistant.settings ?? {}
const prompt = assistant.prompt?.trim() ?? ''
// 1. System prompt (format-specific field)
if (prompt.length > 0) {
if (inputFormat === 'openai') {
const messages = Array.isArray(nextParams.messages) ? [...nextParams.messages] : []
const filtered = messages.filter((m: { role?: string }) => m?.role !== 'system' && m?.role !== 'developer')
filtered.unshift({ role: 'system', content: prompt })
nextParams.messages = filtered
} else if (inputFormat === 'openai-responses') {
nextParams.instructions = prompt
} else {
nextParams.system = prompt
}
}
// 2. Stream output
if (typeof settings.streamOutput === 'boolean') {
nextParams.stream = settings.streamOutput
}
// 3. Temperature
if (settings.enableTemperature && typeof settings.temperature === 'number') {
nextParams.temperature = settings.temperature
}
// 4. TopP
if (settings.enableTopP && typeof settings.topP === 'number') {
nextParams.top_p = settings.topP
}
// 5. MaxTokens (format-specific field name)
if (settings.enableMaxTokens && typeof settings.maxTokens === 'number') {
if (inputFormat === 'openai-responses') {
nextParams.max_output_tokens = settings.maxTokens
} else {
nextParams.max_tokens = settings.maxTokens
}
}
return nextParams as InputParams
}
/**
* Middleware type alias
*/
@ -59,8 +163,8 @@ type InputParams = InputParamsMap[InputFormat]
*/
export interface MessageConfig {
response: Response
provider: Provider
modelId: string
provider?: Provider
modelId?: string
params: InputParams
inputFormat?: InputFormat
outputFormat?: OutputFormat
@ -297,9 +401,6 @@ async function executeStream(config: ExecuteStreamConfig): Promise<{
export async function processMessage(config: MessageConfig): Promise<void> {
const {
response,
provider,
modelId,
params,
inputFormat = 'anthropic',
outputFormat = 'anthropic',
onError,
@ -308,6 +409,83 @@ export async function processMessage(config: MessageConfig): Promise<void> {
plugins = []
} = config
let { provider, modelId, params } = config
// Check if model starts with "assistant:" prefix - auto-detect assistant mode
const modelFromParams = 'model' in params ? (params as { model?: string }).model : undefined
const isAssistantMode = modelFromParams?.startsWith('assistant:')
// Handle assistant mode: resolve assistant config and apply overrides
if (isAssistantMode) {
const assistantId = modelFromParams!.slice('assistant:'.length)
const assistant = await resolveAssistantById(assistantId)
if (!assistant) {
throw new Error(`Assistant '${assistantId}' not found`)
}
// Get model from assistant config
const assistantModel = assistant.model ?? assistant.defaultModel
if (!assistantModel?.provider || !assistantModel?.id) {
throw new Error(`Assistant '${assistantId}' is missing a model configuration`)
}
// Resolve provider
const resolvedProvider = await getProviderById(assistantModel.provider)
if (!resolvedProvider) {
throw new Error(`Provider '${assistantModel.provider}' not found for assistant '${assistantId}'`)
}
provider = resolvedProvider
modelId = assistantModel.id
// Apply assistant overrides to params
params = applyAssistantOverrides(params, assistant, inputFormat)
// Build and inject MCP tools
const assistantTools = await buildAssistantTools(assistant)
if (assistantTools.length > 0) {
// Inject tools based on input format
const paramsWithTools = { ...params } as Record<string, unknown>
if (inputFormat === 'openai') {
paramsWithTools.tools = assistantTools.map((tool) => ({
type: 'function',
function: {
name: tool.name,
description: tool.description || '',
parameters: tool.inputSchema
}
}))
} else if (inputFormat === 'openai-responses') {
paramsWithTools.tools = assistantTools.map((tool) => ({
type: 'function',
name: tool.name,
description: tool.description || '',
parameters: tool.inputSchema
}))
} else {
// anthropic
paramsWithTools.tools = assistantTools.map((tool) => ({
name: tool.name,
description: tool.description || '',
input_schema: tool.inputSchema
}))
}
params = paramsWithTools as InputParams
}
logger.debug('Resolved assistant config', {
assistantId,
providerId: provider.id,
modelId,
toolCount: assistantTools.length
})
}
if (!provider || !modelId) {
throw new Error('Provider and modelId are required when not using assistant mode')
}
const isStreaming = 'stream' in params && params.stream === true
logger.info(`Starting ${isStreaming ? 'streaming' : 'non-streaming'} message`, {
@ -317,7 +495,8 @@ export async function processMessage(config: MessageConfig): Promise<void> {
inputFormat,
outputFormat,
middlewareCount: middlewares.length,
pluginCount: plugins.length
pluginCount: plugins.length,
assistantMode: isAssistantMode
})
// Create abort controller with timeout

View File

@ -353,6 +353,13 @@
"description": "[to be translated]:Create model groups with unique URLs for different provider/model combinations",
"empty": "[to be translated]:No model groups configured. Click 'Add Group' to create one.",
"label": "[to be translated]:Model Groups",
"mode": {
"assistant": "[to be translated]:Assistant Preset",
"assistantHint": "[to be translated]:Assistant preset overrides request parameters.",
"assistantPlaceholder": "[to be translated]:Select assistant",
"label": "[to be translated]:Mode",
"model": "[to be translated]:Direct Model"
},
"namePlaceholder": "[to be translated]:Group name"
},
"networkAccess": {

View File

@ -353,6 +353,13 @@
"description": "[to be translated]:Create model groups with unique URLs for different provider/model combinations",
"empty": "[to be translated]:No model groups configured. Click 'Add Group' to create one.",
"label": "[to be translated]:Model Groups",
"mode": {
"assistant": "[to be translated]:Assistant Preset",
"assistantHint": "[to be translated]:Assistant preset overrides request parameters.",
"assistantPlaceholder": "[to be translated]:Select assistant",
"label": "[to be translated]:Mode",
"model": "[to be translated]:Direct Model"
},
"namePlaceholder": "[to be translated]:Group name"
},
"networkAccess": {
@ -1350,6 +1357,7 @@
"backup": {
"file_format": "Backup-Dateiformat fehlerhaft"
},
"base64DataTruncated": "[to be translated]:Base64 image data truncated, size",
"boundary": {
"default": {
"devtools": "Debug-Panel öffnen",
@ -1430,6 +1438,8 @@
"text": "Text",
"toolInput": "Tool-Eingabe",
"toolName": "Tool-Name",
"truncated": "[to be translated]:Data truncated, original size",
"truncatedBadge": "[to be translated]:Truncated",
"unknown": "Unbekannter Fehler",
"usage": "Nutzung",
"user_message_not_found": "Ursprüngliche Benutzernachricht nicht gefunden",

View File

@ -353,6 +353,13 @@
"description": "[to be translated]:Create model groups with unique URLs for different provider/model combinations",
"empty": "[to be translated]:No model groups configured. Click 'Add Group' to create one.",
"label": "[to be translated]:Model Groups",
"mode": {
"assistant": "[to be translated]:Assistant Preset",
"assistantHint": "[to be translated]:Assistant preset overrides request parameters.",
"assistantPlaceholder": "[to be translated]:Select assistant",
"label": "[to be translated]:Mode",
"model": "[to be translated]:Direct Model"
},
"namePlaceholder": "[to be translated]:Group name"
},
"networkAccess": {
@ -1350,6 +1357,7 @@
"backup": {
"file_format": "Λάθος μορφή αρχείου που επιστρέφεται"
},
"base64DataTruncated": "[to be translated]:Base64 image data truncated, size",
"boundary": {
"default": {
"devtools": "Άνοιγμα πίνακα αποσφαλμάτωσης",
@ -1430,6 +1438,8 @@
"text": "κείμενο",
"toolInput": "εισαγωγή εργαλείου",
"toolName": "Όνομα εργαλείου",
"truncated": "[to be translated]:Data truncated, original size",
"truncatedBadge": "[to be translated]:Truncated",
"unknown": "Άγνωστο σφάλμα",
"usage": "δοσολογία",
"user_message_not_found": "Αδυναμία εύρεσης της αρχικής μηνύματος χρήστη",

View File

@ -353,6 +353,13 @@
"description": "[to be translated]:Create model groups with unique URLs for different provider/model combinations",
"empty": "[to be translated]:No model groups configured. Click 'Add Group' to create one.",
"label": "[to be translated]:Model Groups",
"mode": {
"assistant": "[to be translated]:Assistant Preset",
"assistantHint": "[to be translated]:Assistant preset overrides request parameters.",
"assistantPlaceholder": "[to be translated]:Select assistant",
"label": "[to be translated]:Mode",
"model": "[to be translated]:Direct Model"
},
"namePlaceholder": "[to be translated]:Group name"
},
"networkAccess": {
@ -1350,6 +1357,7 @@
"backup": {
"file_format": "Formato de archivo de copia de seguridad incorrecto"
},
"base64DataTruncated": "[to be translated]:Base64 image data truncated, size",
"boundary": {
"default": {
"devtools": "Abrir el panel de depuración",
@ -1430,6 +1438,8 @@
"text": "Texto",
"toolInput": "Herramienta de entrada",
"toolName": "Nombre de la herramienta",
"truncated": "[to be translated]:Data truncated, original size",
"truncatedBadge": "[to be translated]:Truncated",
"unknown": "Error desconocido",
"usage": "Cantidad de uso",
"user_message_not_found": "No se pudo encontrar el mensaje original del usuario",

View File

@ -353,6 +353,13 @@
"description": "[to be translated]:Create model groups with unique URLs for different provider/model combinations",
"empty": "[to be translated]:No model groups configured. Click 'Add Group' to create one.",
"label": "[to be translated]:Model Groups",
"mode": {
"assistant": "[to be translated]:Assistant Preset",
"assistantHint": "[to be translated]:Assistant preset overrides request parameters.",
"assistantPlaceholder": "[to be translated]:Select assistant",
"label": "[to be translated]:Mode",
"model": "[to be translated]:Direct Model"
},
"namePlaceholder": "[to be translated]:Group name"
},
"networkAccess": {
@ -1350,6 +1357,7 @@
"backup": {
"file_format": "Le format du fichier de sauvegarde est incorrect"
},
"base64DataTruncated": "[to be translated]:Base64 image data truncated, size",
"boundary": {
"default": {
"devtools": "Ouvrir le panneau de débogage",
@ -1430,6 +1438,8 @@
"text": "texte",
"toolInput": "entrée de l'outil",
"toolName": "Nom de l'outil",
"truncated": "[to be translated]:Data truncated, original size",
"truncatedBadge": "[to be translated]:Truncated",
"unknown": "Неизвестная ошибка",
"usage": "Quantité",
"user_message_not_found": "Impossible de trouver le message d'utilisateur original",

View File

@ -353,6 +353,13 @@
"description": "[to be translated]:Create model groups with unique URLs for different provider/model combinations",
"empty": "[to be translated]:No model groups configured. Click 'Add Group' to create one.",
"label": "[to be translated]:Model Groups",
"mode": {
"assistant": "[to be translated]:Assistant Preset",
"assistantHint": "[to be translated]:Assistant preset overrides request parameters.",
"assistantPlaceholder": "[to be translated]:Select assistant",
"label": "[to be translated]:Mode",
"model": "[to be translated]:Direct Model"
},
"namePlaceholder": "[to be translated]:Group name"
},
"networkAccess": {
@ -1350,6 +1357,7 @@
"backup": {
"file_format": "バックアップファイルの形式エラー"
},
"base64DataTruncated": "[to be translated]:Base64 image data truncated, size",
"boundary": {
"default": {
"devtools": "デバッグパネルを開く",
@ -1430,6 +1438,8 @@
"text": "テキスト",
"toolInput": "ツール入力",
"toolName": "ツール名",
"truncated": "[to be translated]:Data truncated, original size",
"truncatedBadge": "[to be translated]:Truncated",
"unknown": "不明なエラー",
"usage": "用量",
"user_message_not_found": "元のユーザーメッセージを見つけることができませんでした",

View File

@ -353,6 +353,13 @@
"description": "[to be translated]:Create model groups with unique URLs for different provider/model combinations",
"empty": "[to be translated]:No model groups configured. Click 'Add Group' to create one.",
"label": "[to be translated]:Model Groups",
"mode": {
"assistant": "[to be translated]:Assistant Preset",
"assistantHint": "[to be translated]:Assistant preset overrides request parameters.",
"assistantPlaceholder": "[to be translated]:Select assistant",
"label": "[to be translated]:Mode",
"model": "[to be translated]:Direct Model"
},
"namePlaceholder": "[to be translated]:Group name"
},
"networkAccess": {
@ -1350,6 +1357,7 @@
"backup": {
"file_format": "Formato do arquivo de backup está incorreto"
},
"base64DataTruncated": "[to be translated]:Base64 image data truncated, size",
"boundary": {
"default": {
"devtools": "Abrir o painel de depuração",
@ -1430,6 +1438,8 @@
"text": "texto",
"toolInput": "ferramenta de entrada",
"toolName": "Nome da ferramenta",
"truncated": "[to be translated]:Data truncated, original size",
"truncatedBadge": "[to be translated]:Truncated",
"unknown": "Erro desconhecido",
"usage": "dosagem",
"user_message_not_found": "Não foi possível encontrar a mensagem original do usuário",

View File

@ -353,6 +353,13 @@
"description": "[to be translated]:Create model groups with unique URLs for different provider/model combinations",
"empty": "[to be translated]:No model groups configured. Click 'Add Group' to create one.",
"label": "[to be translated]:Model Groups",
"mode": {
"assistant": "[to be translated]:Assistant Preset",
"assistantHint": "[to be translated]:Assistant preset overrides request parameters.",
"assistantPlaceholder": "[to be translated]:Select assistant",
"label": "[to be translated]:Mode",
"model": "[to be translated]:Direct Model"
},
"namePlaceholder": "[to be translated]:Group name"
},
"networkAccess": {
@ -1350,6 +1357,7 @@
"backup": {
"file_format": "Eroare format fișier backup"
},
"base64DataTruncated": "[to be translated]:Base64 image data truncated, size",
"boundary": {
"default": {
"devtools": "Deschide panoul de depanare",
@ -1430,6 +1438,8 @@
"text": "Text",
"toolInput": "Intrare instrument",
"toolName": "Nume instrument",
"truncated": "[to be translated]:Data truncated, original size",
"truncatedBadge": "[to be translated]:Truncated",
"unknown": "Eroare necunoscută",
"usage": "Utilizare",
"user_message_not_found": "Nu se poate găsi mesajul original al utilizatorului pentru a retrimite",

View File

@ -353,6 +353,13 @@
"description": "[to be translated]:Create model groups with unique URLs for different provider/model combinations",
"empty": "[to be translated]:No model groups configured. Click 'Add Group' to create one.",
"label": "[to be translated]:Model Groups",
"mode": {
"assistant": "[to be translated]:Assistant Preset",
"assistantHint": "[to be translated]:Assistant preset overrides request parameters.",
"assistantPlaceholder": "[to be translated]:Select assistant",
"label": "[to be translated]:Mode",
"model": "[to be translated]:Direct Model"
},
"namePlaceholder": "[to be translated]:Group name"
},
"networkAccess": {
@ -1350,6 +1357,7 @@
"backup": {
"file_format": "Ошибка формата файла резервной копии"
},
"base64DataTruncated": "[to be translated]:Base64 image data truncated, size",
"boundary": {
"default": {
"devtools": "Открыть панель отладки",
@ -1430,6 +1438,8 @@
"text": "текст",
"toolInput": "ввод инструмента",
"toolName": "имя инструмента",
"truncated": "[to be translated]:Data truncated, original size",
"truncatedBadge": "[to be translated]:Truncated",
"unknown": "Неизвестная ошибка",
"usage": "Дозировка",
"user_message_not_found": "Не удалось найти исходное сообщение пользователя",