refactor: remove obsolete Claude Code mapping functions and integrate new utility functions for finish reason mapping and usage conversion

This commit is contained in:
suyao 2025-12-29 23:38:54 +08:00
parent 5bb19b3e30
commit fd12792d6b
No known key found for this signature in database
8 changed files with 242 additions and 138 deletions

View File

@ -1,34 +0,0 @@
// ported from https://github.com/ben-vargas/ai-sdk-provider-claude-code/blob/main/src/map-claude-code-finish-reason.ts#L22
import type { LanguageModelV2FinishReason } from '@ai-sdk/provider'
/**
* Maps Claude Code SDK result subtypes to AI SDK finish reasons.
*
* @param subtype - The result subtype from Claude Code SDK
* @returns The corresponding AI SDK finish reason
*
* @example
* ```typescript
* const finishReason = mapClaudeCodeFinishReason('error_max_turns');
* // Returns: 'length'
* ```
*
* @remarks
* Mappings:
* - 'success' -> 'stop' (normal completion)
* - 'error_max_turns' -> 'length' (hit turn limit)
* - 'error_during_execution' -> 'error' (execution error)
* - default -> 'stop' (unknown subtypes treated as normal completion)
*/
export function mapClaudeCodeFinishReason(subtype?: string): LanguageModelV2FinishReason {
switch (subtype) {
case 'success':
return 'stop'
case 'error_max_turns':
return 'length'
case 'error_during_execution':
return 'error'
default:
return 'stop'
}
}

View File

@ -21,13 +21,12 @@
*/
import type { SDKMessage } from '@anthropic-ai/claude-agent-sdk'
import type { BetaStopReason } from '@anthropic-ai/sdk/resources/beta/messages/messages.mjs'
import { loggerService } from '@logger'
import type { FinishReason, LanguageModelUsage, ProviderMetadata, TextStreamPart } from 'ai'
import type { LanguageModelUsage, ProviderMetadata, TextStreamPart } from 'ai'
import { v4 as uuidv4 } from 'uuid'
import { ClaudeStreamState } from './claude-stream-state'
import { mapClaudeCodeFinishReason } from './map-claude-code-finish-reason'
import { convertClaudeCodeUsage, mapClaudeCodeFinishReason, mapClaudeCodeStopReason } from './utils'
const logger = loggerService.withContext('ClaudeCodeTransform')
@ -47,23 +46,19 @@ type ToolResultContent = {
is_error?: boolean
}
/**
* Maps Anthropic stop reasons to the AiSDK equivalents so higher level
* consumers can treat completion states uniformly across providers.
*/
const finishReasonMapping: Record<BetaStopReason, FinishReason> = {
end_turn: 'stop',
max_tokens: 'length',
stop_sequence: 'stop',
tool_use: 'tool-calls',
pause_turn: 'unknown',
refusal: 'content-filter'
}
const emptyUsage: LanguageModelUsage = {
inputTokens: 0,
outputTokens: 0,
totalTokens: 0
totalTokens: 0,
inputTokenDetails: {
cacheReadTokens: 0,
cacheWriteTokens: 0,
noCacheTokens: 0
},
outputTokenDetails: {
textTokens: 0,
reasoningTokens: 0
}
}
/**
@ -292,8 +287,7 @@ function finalizeNonStreamingStep(
state: ClaudeStreamState,
chunks: AgentStreamPart[]
): AgentStreamPart[] {
const usage = calculateUsageFromMessage(message)
const finishReason = inferFinishReason(message.message.stop_reason)
const finishReason = mapClaudeCodeStopReason(message.message.stop_reason)
chunks.push({
type: 'finish-step',
response: {
@ -301,8 +295,9 @@ function finalizeNonStreamingStep(
timestamp: new Date(),
modelId: message.message.model ?? ''
},
usage: usage ?? emptyUsage,
usage: convertClaudeCodeUsage(message.message.usage),
finishReason,
rawFinishReason: message.message.stop_reason ?? undefined,
providerMetadata: sdkMessageToProviderMetadata(message)
})
state.resetStep()
@ -478,10 +473,8 @@ function handleStreamEvent(
}
case 'message_delta': {
const finishReason = event.delta.stop_reason
? mapStopReason(event.delta.stop_reason as BetaStopReason)
: undefined
const usage = convertUsage(event.usage)
const finishReason = mapClaudeCodeStopReason(event.delta.stop_reason)
const usage = convertClaudeCodeUsage(event.usage)
state.setPendingUsage(usage, finishReason)
break
}
@ -499,6 +492,7 @@ function handleStreamEvent(
modelId: ''
},
usage: pending.usage ?? emptyUsage,
rawFinishReason: pending.finishReason ?? 'stop',
finishReason: pending.finishReason ?? 'stop',
providerMetadata
})
@ -682,20 +676,12 @@ function handleSystemMessage(message: Extract<SDKMessage, { type: 'system' }>):
function handleResultMessage(message: Extract<SDKMessage, { type: 'result' }>): AgentStreamPart[] {
const chunks: AgentStreamPart[] = []
let usage: LanguageModelUsage | undefined
if ('usage' in message) {
usage = {
inputTokens: message.usage.input_tokens ?? 0,
outputTokens: message.usage.output_tokens ?? 0,
totalTokens: (message.usage.input_tokens ?? 0) + (message.usage.output_tokens ?? 0)
}
}
if (message.subtype === 'success') {
chunks.push({
type: 'finish',
totalUsage: usage ?? emptyUsage,
totalUsage: convertClaudeCodeUsage(message.usage),
finishReason: mapClaudeCodeFinishReason(message.subtype),
rawFinishReason: message.subtype,
providerMetadata: {
...sdkMessageToProviderMetadata(message),
usage: message.usage,
@ -715,59 +701,4 @@ function handleResultMessage(message: Extract<SDKMessage, { type: 'result' }>):
return chunks
}
/**
* Normalises usage payloads so the caller always receives numeric values even
* when the provider omits certain fields.
*/
function convertUsage(
usage?: {
input_tokens?: number | null
output_tokens?: number | null
} | null
): LanguageModelUsage | undefined {
if (!usage) {
return undefined
}
const inputTokens = usage.input_tokens ?? 0
const outputTokens = usage.output_tokens ?? 0
return {
inputTokens,
outputTokens,
totalTokens: inputTokens + outputTokens
}
}
/**
* Anthropic-only wrapper around {@link finishReasonMapping} that defaults to
* `unknown` to avoid surprising downstream consumers when new stop reasons are
* introduced.
*/
function mapStopReason(reason: BetaStopReason): FinishReason {
return finishReasonMapping[reason] ?? 'unknown'
}
/**
* Extracts token accounting details from an assistant message, if available.
*/
function calculateUsageFromMessage(
message: Extract<SDKMessage, { type: 'assistant' }>
): LanguageModelUsage | undefined {
const usage = message.message.usage
if (!usage) return undefined
return {
inputTokens: usage.input_tokens ?? 0,
outputTokens: usage.output_tokens ?? 0,
totalTokens: (usage.input_tokens ?? 0) + (usage.output_tokens ?? 0)
}
}
/**
* Converts Anthropic stop reasons into AiSDK finish reasons, falling back to a
* generic `stop` if the provider omits the detail entirely.
*/
function inferFinishReason(stopReason: BetaStopReason | null | undefined): FinishReason {
if (!stopReason) return 'stop'
return mapStopReason(stopReason)
}
export { ClaudeStreamState }

View File

@ -0,0 +1,107 @@
// ported from https://github.com/ben-vargas/ai-sdk-provider-claude-code/blob/main/src/map-claude-code-finish-reason.ts#L22
import type { JSONObject } from '@ai-sdk/provider'
import type { BetaStopReason } from '@anthropic-ai/sdk/resources/beta/messages/messages.mjs'
import type { FinishReason, LanguageModelUsage } from 'ai'
/**
* Maps Claude Code SDK result subtypes to AI SDK finish reasons.
*
* @param subtype - The result subtype from Claude Code SDK
* @returns The corresponding AI SDK finish reason with unified and raw values
*
* @example
* ```typescript
* const finishReason = mapClaudeCodeFinishReason('error_max_turns');
* // Returns: 'length'
* ```
**/
export function mapClaudeCodeFinishReason(subtype?: string): FinishReason {
switch (subtype) {
case 'success':
return 'stop'
case 'error_max_turns':
return 'length'
case 'error_during_execution':
return 'error'
case undefined:
return 'stop'
default:
// Unknown subtypes mapped to 'other' to distinguish from genuine completion
return 'other'
}
}
/**
* Maps Anthropic stop reasons to the AiSDK equivalents so higher level
* consumers can treat completion states uniformly across providers.
*/
const finishReasonMapping: Record<BetaStopReason, FinishReason> = {
end_turn: 'stop',
max_tokens: 'length',
stop_sequence: 'stop',
tool_use: 'tool-calls',
pause_turn: 'other',
refusal: 'content-filter'
}
/**
* Maps Claude Code SDK result subtypes to AI SDK finish reasons.
*
* @param subtype - The result subtype from Claude Code SDK
* @returns The corresponding AI SDK finish reason with unified and raw values
*
* @example
* ```typescript
* const finishReason = mapClaudeCodeFinishReason('error_max_turns');
* // Returns: 'length'
* ```
**/
export function mapClaudeCodeStopReason(claudeStopReason: BetaStopReason | null): FinishReason {
if (claudeStopReason === null) {
return 'stop'
}
return finishReasonMapping[claudeStopReason] || 'other'
}
type ClaudeCodeUsage = {
input_tokens?: number | null
output_tokens?: number | null
cache_creation_input_tokens?: number | null
cache_read_input_tokens?: number | null
}
/**
* Converts Claude Code SDK usage to AI SDK v6 stable usage format.
*
* Maps Claude's flat token counts to the nested structure required by AI SDK v6:
* - `cache_creation_input_tokens` `inputTokens.cacheWrite`
* - `cache_read_input_tokens` `inputTokens.cacheRead`
* - `input_tokens` `inputTokens.noCache`
* - `inputTokens.total` = sum of all input tokens
* - `output_tokens` `outputTokens.total`
*
* @param usage - Raw usage data from Claude Code SDK
* @returns Formatted usage object for AI SDK v6
*/
export function convertClaudeCodeUsage(usage: ClaudeCodeUsage): LanguageModelUsage {
const inputTokens = usage.input_tokens ?? 0
const outputTokens = usage.output_tokens ?? 0
const cacheWrite = usage.cache_creation_input_tokens ?? 0
const cacheRead = usage.cache_read_input_tokens ?? 0
return {
inputTokens,
outputTokens,
totalTokens: inputTokens + cacheWrite + cacheRead,
inputTokenDetails: {
noCacheTokens: inputTokens,
cacheReadTokens: cacheRead,
cacheWriteTokens: cacheWrite
},
outputTokenDetails: {
textTokens: undefined,
reasoningTokens: undefined
},
raw: usage as JSONObject
}
}

View File

@ -6,13 +6,15 @@
* AiSdkSpanAdapter AI SDK trace
*/
import { definePlugin } from '@cherrystudio/ai-core'
import type { AiPlugin} from '@cherrystudio/ai-core';
import { definePlugin, type StreamTextParams, type StreamTextResult } from '@cherrystudio/ai-core'
import { loggerService } from '@logger'
import type { Context, Span, SpanContext, Tracer } from '@opentelemetry/api'
import { context as otelContext, trace } from '@opentelemetry/api'
import { currentSpan } from '@renderer/services/SpanManagerService'
import { webTraceService } from '@renderer/services/WebTraceService'
import type { Assistant } from '@renderer/types'
import type { TelemetrySettings } from 'ai'
import { AiSdkSpanAdapter } from '../trace/AiSdkSpanAdapter'
@ -59,6 +61,107 @@ class AdapterTracer {
})
}
startSpan(name: string, options?: any, context?: any): Span {
logger.debug('AdapterTracer.startSpan called', {
spanName: name,
topicId: this.topicId,
modelName: this.modelName
})
// 创建包含父 SpanContext 的上下文(如果有的话)
const createContextWithParent = () => {
if (this.cachedParentContext) {
return this.cachedParentContext
}
if (this.parentSpanContext) {
try {
const ctx = trace.setSpanContext(otelContext.active(), this.parentSpanContext)
logger.debug('Created active context with parent SpanContext for startSpan', {
spanName: name,
parentTraceId: this.parentSpanContext.traceId,
parentSpanId: this.parentSpanContext.spanId,
topicId: this.topicId
})
return ctx
} catch (error) {
logger.warn('Failed to create context with parent SpanContext in startSpan', error as Error)
}
}
return otelContext.active()
}
const ctx = context ?? createContextWithParent()
const span = this.originalTracer.startSpan(name, options, ctx)
// 注入父子关系属性(兜底重建层级用)
try {
if (this.parentSpanContext) {
span.setAttribute('trace.parentSpanId', this.parentSpanContext.spanId)
span.setAttribute('trace.parentTraceId', this.parentSpanContext.traceId)
}
if (this.topicId) {
span.setAttribute('trace.topicId', this.topicId)
}
} catch (e) {
logger.debug('Failed to set trace parent attributes in startSpan', e as Error)
}
// 包装span的end方法
const originalEnd = span.end.bind(span)
span.end = (endTime?: any) => {
logger.debug('AI SDK span.end() called in startSpan - about to convert span', {
spanName: name,
spanId: span.spanContext().spanId,
traceId: span.spanContext().traceId,
topicId: this.topicId,
modelName: this.modelName
})
// 调用原始 end 方法
originalEnd(endTime)
// 转换并保存 span 数据
try {
logger.debug('Converting AI SDK span to SpanEntity (from startSpan)', {
spanName: name,
spanId: span.spanContext().spanId,
traceId: span.spanContext().traceId,
topicId: this.topicId,
modelName: this.modelName
})
logger.silly('span', span)
const spanEntity = AiSdkSpanAdapter.convertToSpanEntity({
span,
topicId: this.topicId,
modelName: this.modelName
})
// 保存转换后的数据
window.api.trace.saveEntity(spanEntity)
logger.debug('AI SDK span converted and saved successfully (from startSpan)', {
spanName: name,
spanId: span.spanContext().spanId,
traceId: span.spanContext().traceId,
topicId: this.topicId,
modelName: this.modelName,
hasUsage: !!spanEntity.usage,
usage: spanEntity.usage
})
} catch (error) {
logger.error('Failed to convert AI SDK span (from startSpan)', error as Error, {
spanName: name,
spanId: span.spanContext().spanId,
traceId: span.spanContext().traceId,
topicId: this.topicId,
modelName: this.modelName
})
}
}
return span
}
startActiveSpan<F extends (span: Span) => any>(name: string, fn: F): ReturnType<F>
startActiveSpan<F extends (span: Span) => any>(name: string, options: any, fn: F): ReturnType<F>
startActiveSpan<F extends (span: Span) => any>(name: string, options: any, context: any, fn: F): ReturnType<F>
@ -187,10 +290,10 @@ class AdapterTracer {
}
}
export function createTelemetryPlugin(config: TelemetryPluginConfig) {
export function createTelemetryPlugin(config: TelemetryPluginConfig): AiPlugin<StreamTextParams, StreamTextResult> {
const { enabled = true, recordInputs = true, recordOutputs = true, topicId } = config
return definePlugin({
return definePlugin<StreamTextParams, StreamTextResult>({
name: 'telemetryPlugin',
enforce: 'pre', // 在其他插件之前执行,确保 telemetry 配置被正确注入
@ -275,7 +378,7 @@ export function createTelemetryPlugin(config: TelemetryPluginConfig) {
isEnabled: true,
recordInputs,
recordOutputs,
tracer: adapterTracer, // 使用包装后的 tracer
tracer: adapterTracer,
functionId: `ai-request-${context.requestId}`,
metadata: {
providerId: context.providerId,
@ -286,11 +389,11 @@ export function createTelemetryPlugin(config: TelemetryPluginConfig) {
// 确保topicId也作为标准属性传递
'trace.topicId': effectiveTopicId,
'trace.modelName': modelName,
// 添加父span信息用于调试
parentSpanId: parentSpanContext?.spanId,
parentTraceId: parentSpanContext?.traceId
// 添加父span信息用于调试(只在有值时添加)
...(parentSpanContext?.spanId && { parentSpanId: parentSpanContext.spanId }),
...(parentSpanContext?.traceId && { parentTraceId: parentSpanContext.traceId })
}
}
} satisfies TelemetrySettings
// 如果有父span尝试在telemetry配置中设置父上下文
if (parentSpan) {

View File

@ -17,7 +17,6 @@ export const knowledgeSearchTool = (
userMessage?: string
) => {
return tool({
name: 'builtin_knowledge_search',
description: `Knowledge base search tool for retrieving information from user's private knowledge base. This searches your local collection of documents, web content, notes, and other materials you have stored.
This tool has been configured with search parameters based on the conversation context:
@ -103,7 +102,7 @@ You can use this tool as-is, or provide additionalContext to refine the search f
// 返回结果
return knowledgeReferencesData
},
toModelOutput: (results) => {
toModelOutput: ({ output: results }) => {
let summary = 'No search needed based on the query analysis.'
if (results.length > 0) {
summary = `Found ${results.length} relevant sources. Use [number] format to cite specific information.`

View File

@ -11,7 +11,6 @@ import { MemoryProcessor } from '../../services/MemoryProcessor'
*/
export const memorySearchTool = () => {
return tool({
name: 'builtin_memory_search',
description: 'Search through conversation memories and stored facts for relevant context',
inputSchema: z.object({
query: z.string().describe('Search query to find relevant memories'),

View File

@ -20,7 +20,6 @@ export const webSearchToolWithPreExtractedKeywords = (
const webSearchProvider = WebSearchService.getWebSearchProvider(webSearchProviderId)
return tool({
name: 'builtin_web_search',
description: `Web search tool for finding current information, news, and real-time data from the internet.
This tool has been configured with search parameters based on the conversation context:
@ -71,7 +70,7 @@ You can use this tool as-is to search with the prepared queries, or provide addi
return searchResults
},
toModelOutput: (results) => {
toModelOutput: ({ output: results }) => {
let summary = 'No search needed based on the query analysis.'
if (results.query && results.results.length > 0) {
summary = `Found ${results.results.length} relevant sources. Use [number] format to cite specific information.`

View File

@ -1,4 +1,4 @@
import type { LanguageModelV2Source } from '@ai-sdk/provider'
import type { LanguageModelV3Source } from '@ai-sdk/provider'
import type { WebSearchResultBlock } from '@anthropic-ai/sdk/resources'
import type OpenAI from '@cherrystudio/openai'
import type { GenerateImagesConfig, GroundingMetadata, PersonGeneration } from '@google/genai'
@ -643,7 +643,7 @@ export type WebSearchProviderResponse = {
results: WebSearchProviderResult[]
}
export type AISDKWebSearchResult = Omit<Extract<LanguageModelV2Source, { sourceType: 'url' }>, 'sourceType'>
export type AISDKWebSearchResult = Omit<Extract<LanguageModelV3Source, { sourceType: 'url' }>, 'sourceType'>
export type WebSearchResults =
| WebSearchProviderResponse