mirror of
https://gitee.com/wanwujie/sub2api
synced 2026-04-24 16:44:45 +08:00
Merge pull request #1134 from yasu-dev221/fix/openai-compat-prompt-cache-key
fix(openai): add fallback prompt_cache_key for compat codex OAuth requests
This commit is contained in:
@@ -43,23 +43,38 @@ func (s *OpenAIGatewayService) ForwardAsChatCompletions(
|
||||
clientStream := chatReq.Stream
|
||||
includeUsage := chatReq.StreamOptions != nil && chatReq.StreamOptions.IncludeUsage
|
||||
|
||||
// 2. Convert to Responses and forward
|
||||
// 2. Resolve model mapping early so compat prompt_cache_key injection can
|
||||
// derive a stable seed from the final upstream model family.
|
||||
mappedModel := resolveOpenAIForwardModel(account, originalModel, defaultMappedModel)
|
||||
|
||||
promptCacheKey = strings.TrimSpace(promptCacheKey)
|
||||
compatPromptCacheInjected := false
|
||||
if promptCacheKey == "" && account.Type == AccountTypeOAuth && shouldAutoInjectPromptCacheKeyForCompat(mappedModel) {
|
||||
promptCacheKey = deriveCompatPromptCacheKey(&chatReq, mappedModel)
|
||||
compatPromptCacheInjected = promptCacheKey != ""
|
||||
}
|
||||
|
||||
// 3. Convert to Responses and forward
|
||||
// ChatCompletionsToResponses always sets Stream=true (upstream always streams).
|
||||
responsesReq, err := apicompat.ChatCompletionsToResponses(&chatReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("convert chat completions to responses: %w", err)
|
||||
}
|
||||
|
||||
// 3. Model mapping
|
||||
mappedModel := resolveOpenAIForwardModel(account, originalModel, defaultMappedModel)
|
||||
responsesReq.Model = mappedModel
|
||||
|
||||
logger.L().Debug("openai chat_completions: model mapping applied",
|
||||
logFields := []zap.Field{
|
||||
zap.Int64("account_id", account.ID),
|
||||
zap.String("original_model", originalModel),
|
||||
zap.String("mapped_model", mappedModel),
|
||||
zap.Bool("stream", clientStream),
|
||||
)
|
||||
}
|
||||
if compatPromptCacheInjected {
|
||||
logFields = append(logFields,
|
||||
zap.Bool("compat_prompt_cache_key_injected", true),
|
||||
zap.String("compat_prompt_cache_key_sha256", hashSensitiveValueForLog(promptCacheKey)),
|
||||
)
|
||||
}
|
||||
logger.L().Debug("openai chat_completions: model mapping applied", logFields...)
|
||||
|
||||
// 4. Marshal Responses request body, then apply OAuth codex transform
|
||||
responsesBody, err := json.Marshal(responsesReq)
|
||||
|
||||
Reference in New Issue
Block a user