mirror of
https://gitee.com/wanwujie/sub2api
synced 2026-04-08 01:00:21 +08:00
将 /v1/chat/completions 端点从 ResponseWriter 劫持模式重构为独立的 类型安全转换路径,与 Anthropic Messages 端点架构对齐: - 在 apicompat 包新增 Chat Completions 完整类型定义和双向转换器 - 新增 ForwardAsChatCompletions service 方法,走 Responses API 上游 - Handler 改为独立的账号选择/failover 循环,不再劫持 Responses handler - 提取 handleCompatErrorResponse 为 Chat Completions 和 Messages 共用 - 删除旧的 forwardChatCompletions 直传路径及相关死代码
313 lines
8.5 KiB
Go
313 lines
8.5 KiB
Go
package apicompat
|
|
|
|
import (
|
|
"encoding/json"
|
|
"fmt"
|
|
)
|
|
|
|
// ChatCompletionsToResponses converts a Chat Completions request into a
|
|
// Responses API request. The upstream always streams, so Stream is forced to
|
|
// true. store is always false and reasoning.encrypted_content is always
|
|
// included so that the response translator has full context.
|
|
func ChatCompletionsToResponses(req *ChatCompletionsRequest) (*ResponsesRequest, error) {
|
|
input, err := convertChatMessagesToResponsesInput(req.Messages)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
inputJSON, err := json.Marshal(input)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
out := &ResponsesRequest{
|
|
Model: req.Model,
|
|
Input: inputJSON,
|
|
Temperature: req.Temperature,
|
|
TopP: req.TopP,
|
|
Stream: true, // upstream always streams
|
|
Include: []string{"reasoning.encrypted_content"},
|
|
ServiceTier: req.ServiceTier,
|
|
}
|
|
|
|
storeFalse := false
|
|
out.Store = &storeFalse
|
|
|
|
// max_tokens / max_completion_tokens → max_output_tokens, prefer max_completion_tokens
|
|
maxTokens := 0
|
|
if req.MaxTokens != nil {
|
|
maxTokens = *req.MaxTokens
|
|
}
|
|
if req.MaxCompletionTokens != nil {
|
|
maxTokens = *req.MaxCompletionTokens
|
|
}
|
|
if maxTokens > 0 {
|
|
v := maxTokens
|
|
if v < minMaxOutputTokens {
|
|
v = minMaxOutputTokens
|
|
}
|
|
out.MaxOutputTokens = &v
|
|
}
|
|
|
|
// reasoning_effort → reasoning.effort + reasoning.summary="auto"
|
|
if req.ReasoningEffort != "" {
|
|
out.Reasoning = &ResponsesReasoning{
|
|
Effort: req.ReasoningEffort,
|
|
Summary: "auto",
|
|
}
|
|
}
|
|
|
|
// tools[] and legacy functions[] → ResponsesTool[]
|
|
if len(req.Tools) > 0 || len(req.Functions) > 0 {
|
|
out.Tools = convertChatToolsToResponses(req.Tools, req.Functions)
|
|
}
|
|
|
|
// tool_choice: already compatible format — pass through directly.
|
|
// Legacy function_call needs mapping.
|
|
if len(req.ToolChoice) > 0 {
|
|
out.ToolChoice = req.ToolChoice
|
|
} else if len(req.FunctionCall) > 0 {
|
|
tc, err := convertChatFunctionCallToToolChoice(req.FunctionCall)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("convert function_call: %w", err)
|
|
}
|
|
out.ToolChoice = tc
|
|
}
|
|
|
|
return out, nil
|
|
}
|
|
|
|
// convertChatMessagesToResponsesInput converts the Chat Completions messages
|
|
// array into a Responses API input items array.
|
|
func convertChatMessagesToResponsesInput(msgs []ChatMessage) ([]ResponsesInputItem, error) {
|
|
var out []ResponsesInputItem
|
|
for _, m := range msgs {
|
|
items, err := chatMessageToResponsesItems(m)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
out = append(out, items...)
|
|
}
|
|
return out, nil
|
|
}
|
|
|
|
// chatMessageToResponsesItems converts a single ChatMessage into one or more
|
|
// ResponsesInputItem values.
|
|
func chatMessageToResponsesItems(m ChatMessage) ([]ResponsesInputItem, error) {
|
|
switch m.Role {
|
|
case "system":
|
|
return chatSystemToResponses(m)
|
|
case "user":
|
|
return chatUserToResponses(m)
|
|
case "assistant":
|
|
return chatAssistantToResponses(m)
|
|
case "tool":
|
|
return chatToolToResponses(m)
|
|
case "function":
|
|
return chatFunctionToResponses(m)
|
|
default:
|
|
return chatUserToResponses(m)
|
|
}
|
|
}
|
|
|
|
// chatSystemToResponses converts a system message.
|
|
func chatSystemToResponses(m ChatMessage) ([]ResponsesInputItem, error) {
|
|
text, err := parseChatContent(m.Content)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
content, err := json.Marshal(text)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return []ResponsesInputItem{{Role: "system", Content: content}}, nil
|
|
}
|
|
|
|
// chatUserToResponses converts a user message, handling both plain strings and
|
|
// multi-modal content arrays.
|
|
func chatUserToResponses(m ChatMessage) ([]ResponsesInputItem, error) {
|
|
// Try plain string first.
|
|
var s string
|
|
if err := json.Unmarshal(m.Content, &s); err == nil {
|
|
content, _ := json.Marshal(s)
|
|
return []ResponsesInputItem{{Role: "user", Content: content}}, nil
|
|
}
|
|
|
|
var parts []ChatContentPart
|
|
if err := json.Unmarshal(m.Content, &parts); err != nil {
|
|
return nil, fmt.Errorf("parse user content: %w", err)
|
|
}
|
|
|
|
var responseParts []ResponsesContentPart
|
|
for _, p := range parts {
|
|
switch p.Type {
|
|
case "text":
|
|
if p.Text != "" {
|
|
responseParts = append(responseParts, ResponsesContentPart{
|
|
Type: "input_text",
|
|
Text: p.Text,
|
|
})
|
|
}
|
|
case "image_url":
|
|
if p.ImageURL != nil && p.ImageURL.URL != "" {
|
|
responseParts = append(responseParts, ResponsesContentPart{
|
|
Type: "input_image",
|
|
ImageURL: p.ImageURL.URL,
|
|
})
|
|
}
|
|
}
|
|
}
|
|
|
|
content, err := json.Marshal(responseParts)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return []ResponsesInputItem{{Role: "user", Content: content}}, nil
|
|
}
|
|
|
|
// chatAssistantToResponses converts an assistant message. If there is both
|
|
// text content and tool_calls, the text is emitted as an assistant message
|
|
// first, then each tool_call becomes a function_call item. If the content is
|
|
// empty/nil and there are tool_calls, only function_call items are emitted.
|
|
func chatAssistantToResponses(m ChatMessage) ([]ResponsesInputItem, error) {
|
|
var items []ResponsesInputItem
|
|
|
|
// Emit assistant message with output_text if content is non-empty.
|
|
if len(m.Content) > 0 {
|
|
var s string
|
|
if err := json.Unmarshal(m.Content, &s); err == nil && s != "" {
|
|
parts := []ResponsesContentPart{{Type: "output_text", Text: s}}
|
|
partsJSON, err := json.Marshal(parts)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, ResponsesInputItem{Role: "assistant", Content: partsJSON})
|
|
}
|
|
}
|
|
|
|
// Emit one function_call item per tool_call.
|
|
for _, tc := range m.ToolCalls {
|
|
args := tc.Function.Arguments
|
|
if args == "" {
|
|
args = "{}"
|
|
}
|
|
items = append(items, ResponsesInputItem{
|
|
Type: "function_call",
|
|
CallID: tc.ID,
|
|
Name: tc.Function.Name,
|
|
Arguments: args,
|
|
ID: tc.ID,
|
|
})
|
|
}
|
|
|
|
return items, nil
|
|
}
|
|
|
|
// chatToolToResponses converts a tool result message (role=tool) into a
|
|
// function_call_output item.
|
|
func chatToolToResponses(m ChatMessage) ([]ResponsesInputItem, error) {
|
|
output, err := parseChatContent(m.Content)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
if output == "" {
|
|
output = "(empty)"
|
|
}
|
|
return []ResponsesInputItem{{
|
|
Type: "function_call_output",
|
|
CallID: m.ToolCallID,
|
|
Output: output,
|
|
}}, nil
|
|
}
|
|
|
|
// chatFunctionToResponses converts a legacy function result message
|
|
// (role=function) into a function_call_output item. The Name field is used as
|
|
// call_id since legacy function calls do not carry a separate call_id.
|
|
func chatFunctionToResponses(m ChatMessage) ([]ResponsesInputItem, error) {
|
|
output, err := parseChatContent(m.Content)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
if output == "" {
|
|
output = "(empty)"
|
|
}
|
|
return []ResponsesInputItem{{
|
|
Type: "function_call_output",
|
|
CallID: m.Name,
|
|
Output: output,
|
|
}}, nil
|
|
}
|
|
|
|
// parseChatContent returns the string value of a ChatMessage Content field.
|
|
// Content must be a JSON string. Returns "" if content is null or empty.
|
|
func parseChatContent(raw json.RawMessage) (string, error) {
|
|
if len(raw) == 0 {
|
|
return "", nil
|
|
}
|
|
var s string
|
|
if err := json.Unmarshal(raw, &s); err != nil {
|
|
return "", fmt.Errorf("parse content as string: %w", err)
|
|
}
|
|
return s, nil
|
|
}
|
|
|
|
// convertChatToolsToResponses maps Chat Completions tool definitions and legacy
|
|
// function definitions to Responses API tool definitions.
|
|
func convertChatToolsToResponses(tools []ChatTool, functions []ChatFunction) []ResponsesTool {
|
|
var out []ResponsesTool
|
|
|
|
for _, t := range tools {
|
|
if t.Type != "function" || t.Function == nil {
|
|
continue
|
|
}
|
|
rt := ResponsesTool{
|
|
Type: "function",
|
|
Name: t.Function.Name,
|
|
Description: t.Function.Description,
|
|
Parameters: t.Function.Parameters,
|
|
Strict: t.Function.Strict,
|
|
}
|
|
out = append(out, rt)
|
|
}
|
|
|
|
// Legacy functions[] are treated as function-type tools.
|
|
for _, f := range functions {
|
|
rt := ResponsesTool{
|
|
Type: "function",
|
|
Name: f.Name,
|
|
Description: f.Description,
|
|
Parameters: f.Parameters,
|
|
Strict: f.Strict,
|
|
}
|
|
out = append(out, rt)
|
|
}
|
|
|
|
return out
|
|
}
|
|
|
|
// convertChatFunctionCallToToolChoice maps the legacy function_call field to a
|
|
// Responses API tool_choice value.
|
|
//
|
|
// "auto" → "auto"
|
|
// "none" → "none"
|
|
// {"name":"X"} → {"type":"function","function":{"name":"X"}}
|
|
func convertChatFunctionCallToToolChoice(raw json.RawMessage) (json.RawMessage, error) {
|
|
// Try string first ("auto", "none", etc.) — pass through as-is.
|
|
var s string
|
|
if err := json.Unmarshal(raw, &s); err == nil {
|
|
return json.Marshal(s)
|
|
}
|
|
|
|
// Object form: {"name":"X"}
|
|
var obj struct {
|
|
Name string `json:"name"`
|
|
}
|
|
if err := json.Unmarshal(raw, &obj); err != nil {
|
|
return nil, err
|
|
}
|
|
return json.Marshal(map[string]any{
|
|
"type": "function",
|
|
"function": map[string]string{"name": obj.Name},
|
|
})
|
|
}
|