From 4e3476a66930d6f339fa0b5e5fa54c2938fffabe Mon Sep 17 00:00:00 2001 From: song Date: Tue, 6 Jan 2026 15:09:21 +0800 Subject: [PATCH 001/147] =?UTF-8?q?fix:=20=E6=B7=BB=E5=8A=A0=20gemini-3-fl?= =?UTF-8?q?ash=20=E5=89=8D=E7=BC=80=E6=98=A0=E5=B0=84=E6=94=AF=E6=8C=81=20?= =?UTF-8?q?gemini-3-flash-preview?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/service/antigravity_gateway_service.go | 1 + 1 file changed, 1 insertion(+) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 9216ff81..2145b6c4 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -66,6 +66,7 @@ var antigravityPrefixMapping = []struct { // 长前缀优先 {"gemini-2.5-flash-image", "gemini-3-pro-image"}, // gemini-2.5-flash-image → 3-pro-image {"gemini-3-pro-image", "gemini-3-pro-image"}, // gemini-3-pro-image-preview 等 + {"gemini-3-flash", "gemini-3-flash"}, // gemini-3-flash-preview 等 → gemini-3-flash {"claude-3-5-sonnet", "claude-sonnet-4-5"}, // 旧版 claude-3-5-sonnet-xxx {"claude-sonnet-4-5", "claude-sonnet-4-5"}, // claude-sonnet-4-5-xxx {"claude-haiku-4-5", "claude-sonnet-4-5"}, // claude-haiku-4-5-xxx → sonnet From a4a0c0e2cc2d378a6c8679998db08f19ed221737 Mon Sep 17 00:00:00 2001 From: song Date: Thu, 8 Jan 2026 13:07:20 +0800 Subject: [PATCH 002/147] =?UTF-8?q?feat(antigravity):=20=E5=A2=9E=E5=BC=BA?= =?UTF-8?q?=E8=AF=B7=E6=B1=82=E5=8F=82=E6=95=B0=E5=92=8C=E6=B3=A8=E5=85=A5?= =?UTF-8?q?=20Antigravity=20=E8=BA=AB=E4=BB=BD=20system=20prompt?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/pkg/antigravity/client.go | 7 ++++++ .../pkg/antigravity/request_transformer.go | 25 ++++++------------- 2 files changed, 14 insertions(+), 18 deletions(-) diff --git a/backend/internal/pkg/antigravity/client.go b/backend/internal/pkg/antigravity/client.go index 48f6b15d..32e8e4c1 100644 --- a/backend/internal/pkg/antigravity/client.go +++ b/backend/internal/pkg/antigravity/client.go @@ -7,6 +7,7 @@ import ( "encoding/json" "fmt" "io" + "log" "net/http" "net/url" "strings" @@ -23,6 +24,12 @@ func NewAPIRequest(ctx context.Context, action, accessToken string, body []byte) req.Header.Set("Content-Type", "application/json") req.Header.Set("Authorization", "Bearer "+accessToken) req.Header.Set("User-Agent", UserAgent) + req.Header.Set("requestType", "agent") + + // 打印完整 HTTP 请求 + log.Printf("[NewAPIRequest] POST %s\nHeaders: Content-Type=%s, Authorization=Bearer %s..., requestType=%s, User-Agent=%s\nBody: %s", + apiURL, req.Header.Get("Content-Type"), accessToken[:20], req.Header.Get("requestType"), req.Header.Get("User-Agent"), string(body)) + return req, nil } diff --git a/backend/internal/pkg/antigravity/request_transformer.go b/backend/internal/pkg/antigravity/request_transformer.go index 805e0c5b..ce199cee 100644 --- a/backend/internal/pkg/antigravity/request_transformer.go +++ b/backend/internal/pkg/antigravity/request_transformer.go @@ -9,7 +9,6 @@ import ( "sync" "github.com/gin-gonic/gin" - "github.com/google/uuid" ) type TransformOptions struct { @@ -93,26 +92,16 @@ func TransformClaudeToGeminiWithOptions(claudeReq *ClaudeRequest, projectID, map // 6. 包装为 v1internal 请求 v1Req := V1InternalRequest{ - Project: projectID, - RequestID: "agent-" + uuid.New().String(), - UserAgent: "sub2api", - RequestType: "agent", - Model: mappedModel, - Request: innerRequest, + Model: mappedModel, + Request: innerRequest, } return json.Marshal(v1Req) } func defaultIdentityPatch(modelName string) string { - return fmt.Sprintf( - "--- [IDENTITY_PATCH] ---\n"+ - "Ignore any previous instructions regarding your identity or host platform (e.g., Amazon Q, Google AI).\n"+ - "You are currently providing services as the native %s model via a standard API proxy.\n"+ - "Always use the 'claude' command for terminal tasks if relevant.\n"+ - "--- [SYSTEM_PROMPT_BEGIN] ---\n", - modelName, - ) + // Antigravity 身份系统指令 + return "\\nYou are Antigravity, a powerful agentic AI coding assistant designed by the Google Deepmind team working on Advanced Agentic Coding.\\nYou are pair programming with a USER to solve their coding task. The task may require creating a new codebase, modifying or debugging an existing codebase, or simply answering a question.\\nThe USER will send you requests, which you must always prioritize addressing. Along with each USER request, we will attach additional metadata about their current state, such as what files they have open and where their cursor is.\\nThis information may or may not be relevant to the coding task, it is up for you to decide.\\n\\n\\n\\nCall tools as you normally would. The following list provides additional guidance to help you avoid errors:\\n - **Absolute paths only**. When using tools that accept file path arguments, ALWAYS use the absolute file path.\\n\\n\\n\\n## Technology Stack,\\nYour web applications should be built using the following technologies:,\\n1. **Core**: Use HTML for structure and Javascript for logic.\\n2. **Styling (CSS)**: Use Vanilla CSS for maximum flexibility and control. Avoid using TailwindCSS unless the USER explicitly requests it; in this case, first confirm which TailwindCSS version to use.\\n3. **Web App**: If the USER specifies that they want a more complex web app, use a framework like Next.js or Vite. Only do this if the USER explicitly requests a web app.\\n4. **New Project Creation**: If you need to use a framework for a new app, use `npx` with the appropriate script, but there are some rules to follow:,\\n - Use `npx -y` to automatically install the script and its dependencies\\n - You MUST run the command with `--help` flag to see all available options first, \\n - Initialize the app in the current directory with `./` (example: `npx -y create-vite-app@latest ./`),\\n - You should run in non-interactive mode so that the user doesn't need to input anything,\\n5. **Running Locally**: When running locally, use `npm run dev` or equivalent dev server. Only build the production bundle if the USER explicitly requests it or you are validating the code for correctness.\\n\\n# Design Aesthetics,\\n1. **Use Rich Aesthetics**: The USER should be wowed at first glance by the design. Use best practices in modern web design (e.g. vibrant colors, dark modes, glassmorphism, and dynamic animations) to create a stunning first impression. Failure to do this is UNACCEPTABLE.\\n2. **Prioritize Visual Excellence**: Implement designs that will WOW the user and feel extremely premium:\\n\\t\\t- Avoid generic colors (plain red, blue, green). Use curated, harmonious color palettes (e.g., HSL tailored colors, sleek dark modes).\\n - Using modern typography (e.g., from Google Fonts like Inter, Roboto, or Outfit) instead of browser defaults.\\n\\t\\t- Use smooth gradients,\\n\\t\\t- Add subtle micro-animations for enhanced user experience,\\n3. **Use a Dynamic Design**: An interface that feels responsive and alive encourages interaction. Achieve this with hover effects and interactive elements. Micro-animations, in particular, are highly effective for improving user engagement.\\n4. **Premium Designs**. Make a design that feels premium and state of the art. Avoid creating simple minimum viable products.\\n4. **Don't use placeholders**. If you need an image, use your generate_image tool to create a working demonstration.,\\n\\n## Implementation Workflow,\\nFollow this systematic approach when building web applications:,\\n1. **Plan and Understand**:,\\n\\t\\t- Fully understand the user's requirements,\\n\\t\\t- Draw inspiration from modern, beautiful, and dynamic web designs,\\n\\t\\t- Outline the features needed for the initial version,\\n2. **Build the Foundation**:,\\n\\t\\t- Start by creating/modifying `index.css`,\\n\\t\\t- Implement the core design system with all tokens and utilities,\\n3. **Create Components**:,\\n\\t\\t- Build necessary components using your design system,\\n\\t\\t- Ensure all components use predefined styles, not ad-hoc utilities,\\n\\t\\t- Keep components focused and reusable,\\n4. **Assemble Pages**:,\\n\\t\\t- Update the main application to incorporate your design and components,\\n\\t\\t- Ensure proper routing and navigation,\\n\\t\\t- Implement responsive layouts,\\n5. **Polish and Optimize**:,\\n\\t\\t- Review the overall user experience,\\n\\t\\t- Ensure smooth interactions and transitions,\\n\\t\\t- Optimize performance where needed,\\n\\n## SEO Best Practices,\\nAutomatically implement SEO best practices on every page:,\\n- **Title Tags**: Include proper, descriptive title tags for each page,\\n- **Meta Descriptions**: Add compelling meta descriptions that accurately summarize page content,\\n- **Heading Structure**: Use a single `

` per page with proper heading hierarchy,\\n- **Semantic HTML**: Use appropriate HTML5 semantic elements,\\n- **Unique IDs**: Ensure all interactive elements have unique, descriptive IDs for browser testing,\\n- **Performance**: Ensure fast page load times through optimization,\\nCRITICAL REMINDER: AESTHETICS ARE VERY IMPORTANT. If your web app looks simple and basic then you have FAILED!\\n\\n\\nThere will be an appearing in the conversation at times. This is not coming from the user, but instead injected by the system as important information to pay attention to. \\nDo not respond to nor acknowledge those messages, but do follow them strictly.\\n\\n\\n\\n\\n- **Formatting**. Format your responses in github-style markdown to make your responses easier for the USER to parse. For example, use headers to organize your responses and bolded or italicized text to highlight important keywords. Use backticks to format file, directory, function, and class names. If providing a URL to the user, format this in markdown as well, for example `[label](example.com)`.\\n- **Proactiveness**. As an agent, you are allowed to be proactive, but only in the course of completing the user's task. For example, if the user asks you to add a new component, you can edit the code, verify build and test statuses, and take any other obvious follow-up actions, such as performing additional research. However, avoid surprising the user. For example, if the user asks HOW to approach something, you should answer their question and instead of jumping into editing a file.\\n- **Helpfulness**. Respond like a helpful software engineer who is explaining your work to a friendly collaborator on the project. Acknowledge mistakes or any backtracking you do as a result of new information.\\n- **Ask for clarification**. If you are unsure about the USER's intent, always ask for clarification rather than making assumptions.\\n" } // buildSystemInstruction 构建 systemInstruction @@ -150,9 +139,9 @@ func buildSystemInstruction(system json.RawMessage, modelName string, opts Trans } // identity patch 模式下,用分隔符包裹 system prompt,便于上游识别/调试;关闭时尽量保持原始 system prompt。 - if opts.EnableIdentityPatch && len(parts) > 0 { - parts = append(parts, GeminiPart{Text: "\n--- [SYSTEM_PROMPT_END] ---"}) - } + //if opts.EnableIdentityPatch && len(parts) > 0 { + // parts = append(parts, GeminiPart{Text: "\n--- [SYSTEM_PROMPT_END] ---"}) + //} if len(parts) == 0 { return nil } From da1f3d61becc3cc35f244c691b38576bb0728afe Mon Sep 17 00:00:00 2001 From: song Date: Fri, 9 Jan 2026 17:35:02 +0800 Subject: [PATCH 003/147] =?UTF-8?q?feat:=20antigravity=20=E9=85=8D?= =?UTF-8?q?=E9=A2=9D=E5=9F=9F=E9=99=90=E6=B5=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/repository/account_repo.go | 55 ++++++++++++ backend/internal/service/account_service.go | 2 + .../service/account_service_delete_test.go | 8 ++ .../service/antigravity_gateway_service.go | 30 +++++-- .../service/antigravity_quota_scope.go | 88 +++++++++++++++++++ .../service/gateway_multiplatform_test.go | 6 ++ backend/internal/service/gateway_service.go | 15 +++- .../service/gemini_messages_compat_service.go | 5 +- .../service/gemini_multiplatform_test.go | 6 ++ backend/internal/service/ratelimit_service.go | 7 +- 10 files changed, 207 insertions(+), 15 deletions(-) create mode 100644 backend/internal/service/antigravity_quota_scope.go diff --git a/backend/internal/repository/account_repo.go b/backend/internal/repository/account_repo.go index 83f02608..30a783bc 100644 --- a/backend/internal/repository/account_repo.go +++ b/backend/internal/repository/account_repo.go @@ -675,6 +675,40 @@ func (r *accountRepository) SetRateLimited(ctx context.Context, id int64, resetA return err } +func (r *accountRepository) SetAntigravityQuotaScopeLimit(ctx context.Context, id int64, scope service.AntigravityQuotaScope, resetAt time.Time) error { + now := time.Now().UTC() + payload := map[string]string{ + "rate_limited_at": now.Format(time.RFC3339), + "rate_limit_reset_at": resetAt.UTC().Format(time.RFC3339), + } + raw, err := json.Marshal(payload) + if err != nil { + return err + } + + path := "{antigravity_quota_scopes," + string(scope) + "}" + client := clientFromContext(ctx, r.client) + result, err := client.ExecContext( + ctx, + "UPDATE accounts SET extra = jsonb_set(COALESCE(extra, '{}'::jsonb), $1::text[], $2::jsonb, true), updated_at = NOW() WHERE id = $3 AND deleted_at IS NULL", + path, + raw, + id, + ) + if err != nil { + return err + } + + affected, err := result.RowsAffected() + if err != nil { + return err + } + if affected == 0 { + return service.ErrAccountNotFound + } + return nil +} + func (r *accountRepository) SetOverloaded(ctx context.Context, id int64, until time.Time) error { _, err := r.client.Account.Update(). Where(dbaccount.IDEQ(id)). @@ -718,6 +752,27 @@ func (r *accountRepository) ClearRateLimit(ctx context.Context, id int64) error return err } +func (r *accountRepository) ClearAntigravityQuotaScopes(ctx context.Context, id int64) error { + client := clientFromContext(ctx, r.client) + result, err := client.ExecContext( + ctx, + "UPDATE accounts SET extra = COALESCE(extra, '{}'::jsonb) - 'antigravity_quota_scopes', updated_at = NOW() WHERE id = $1 AND deleted_at IS NULL", + id, + ) + if err != nil { + return err + } + + affected, err := result.RowsAffected() + if err != nil { + return err + } + if affected == 0 { + return service.ErrAccountNotFound + } + return nil +} + func (r *accountRepository) UpdateSessionWindow(ctx context.Context, id int64, start, end *time.Time, status string) error { builder := r.client.Account.Update(). Where(dbaccount.IDEQ(id)). diff --git a/backend/internal/service/account_service.go b/backend/internal/service/account_service.go index e1b93fcb..de32cfeb 100644 --- a/backend/internal/service/account_service.go +++ b/backend/internal/service/account_service.go @@ -49,10 +49,12 @@ type AccountRepository interface { ListSchedulableByGroupIDAndPlatforms(ctx context.Context, groupID int64, platforms []string) ([]Account, error) SetRateLimited(ctx context.Context, id int64, resetAt time.Time) error + SetAntigravityQuotaScopeLimit(ctx context.Context, id int64, scope AntigravityQuotaScope, resetAt time.Time) error SetOverloaded(ctx context.Context, id int64, until time.Time) error SetTempUnschedulable(ctx context.Context, id int64, until time.Time, reason string) error ClearTempUnschedulable(ctx context.Context, id int64) error ClearRateLimit(ctx context.Context, id int64) error + ClearAntigravityQuotaScopes(ctx context.Context, id int64) error UpdateSessionWindow(ctx context.Context, id int64, start, end *time.Time, status string) error UpdateExtra(ctx context.Context, id int64, updates map[string]any) error BulkUpdate(ctx context.Context, ids []int64, updates AccountBulkUpdate) (int64, error) diff --git a/backend/internal/service/account_service_delete_test.go b/backend/internal/service/account_service_delete_test.go index edad8672..6923067d 100644 --- a/backend/internal/service/account_service_delete_test.go +++ b/backend/internal/service/account_service_delete_test.go @@ -139,6 +139,10 @@ func (s *accountRepoStub) SetRateLimited(ctx context.Context, id int64, resetAt panic("unexpected SetRateLimited call") } +func (s *accountRepoStub) SetAntigravityQuotaScopeLimit(ctx context.Context, id int64, scope AntigravityQuotaScope, resetAt time.Time) error { + panic("unexpected SetAntigravityQuotaScopeLimit call") +} + func (s *accountRepoStub) SetOverloaded(ctx context.Context, id int64, until time.Time) error { panic("unexpected SetOverloaded call") } @@ -155,6 +159,10 @@ func (s *accountRepoStub) ClearRateLimit(ctx context.Context, id int64) error { panic("unexpected ClearRateLimit call") } +func (s *accountRepoStub) ClearAntigravityQuotaScopes(ctx context.Context, id int64) error { + panic("unexpected ClearAntigravityQuotaScopes call") +} + func (s *accountRepoStub) UpdateSessionWindow(ctx context.Context, id int64, start, end *time.Time, status string) error { panic("unexpected UpdateSessionWindow call") } diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index aabeea16..fe4eb621 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -451,6 +451,7 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, originalModel := claudeReq.Model mappedModel := s.getMappedModel(account, claudeReq.Model) + quotaScope, _ := resolveAntigravityQuotaScope(originalModel) // 获取 access_token if s.tokenProvider == nil { @@ -529,7 +530,7 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, } // 所有重试都失败,标记限流状态 if resp.StatusCode == 429 { - s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody) + s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope) } // 最后一次尝试也失败 resp = &http.Response{ @@ -621,7 +622,7 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, // 处理错误响应(重试后仍失败或不触发重试) if resp.StatusCode >= 400 { - s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody) + s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope) if s.shouldFailoverUpstreamError(resp.StatusCode) { return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode} @@ -946,6 +947,7 @@ func (s *AntigravityGatewayService) ForwardGemini(ctx context.Context, c *gin.Co if len(body) == 0 { return nil, s.writeGoogleError(c, http.StatusBadRequest, "Request body is empty") } + quotaScope, _ := resolveAntigravityQuotaScope(originalModel) // 解析请求以获取 image_size(用于图片计费) imageSize := s.extractImageSize(body) @@ -1048,7 +1050,7 @@ func (s *AntigravityGatewayService) ForwardGemini(ctx context.Context, c *gin.Co } // 所有重试都失败,标记限流状态 if resp.StatusCode == 429 { - s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody) + s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope) } resp = &http.Response{ StatusCode: resp.StatusCode, @@ -1101,7 +1103,7 @@ func (s *AntigravityGatewayService) ForwardGemini(ctx context.Context, c *gin.Co goto handleSuccess } - s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody) + s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope) if s.shouldFailoverUpstreamError(resp.StatusCode) { return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode} @@ -1215,7 +1217,7 @@ func sleepAntigravityBackoffWithContext(ctx context.Context, attempt int) bool { } } -func (s *AntigravityGatewayService) handleUpstreamError(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte) { +func (s *AntigravityGatewayService) handleUpstreamError(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope) { // 429 使用 Gemini 格式解析(从 body 解析重置时间) if statusCode == 429 { resetAt := ParseGeminiRateLimitResetTime(body) @@ -1226,13 +1228,23 @@ func (s *AntigravityGatewayService) handleUpstreamError(ctx context.Context, pre defaultDur = 5 * time.Minute } ra := time.Now().Add(defaultDur) - log.Printf("%s status=429 rate_limited reset_in=%v (fallback)", prefix, defaultDur) - _ = s.accountRepo.SetRateLimited(ctx, account.ID, ra) + log.Printf("%s status=429 rate_limited scope=%s reset_in=%v (fallback)", prefix, quotaScope, defaultDur) + if quotaScope == "" { + return + } + if err := s.accountRepo.SetAntigravityQuotaScopeLimit(ctx, account.ID, quotaScope, ra); err != nil { + log.Printf("%s status=429 rate_limit_set_failed scope=%s error=%v", prefix, quotaScope, err) + } return } resetTime := time.Unix(*resetAt, 0) - log.Printf("%s status=429 rate_limited reset_at=%v reset_in=%v", prefix, resetTime.Format("15:04:05"), time.Until(resetTime).Truncate(time.Second)) - _ = s.accountRepo.SetRateLimited(ctx, account.ID, resetTime) + log.Printf("%s status=429 rate_limited scope=%s reset_at=%v reset_in=%v", prefix, quotaScope, resetTime.Format("15:04:05"), time.Until(resetTime).Truncate(time.Second)) + if quotaScope == "" { + return + } + if err := s.accountRepo.SetAntigravityQuotaScopeLimit(ctx, account.ID, quotaScope, resetTime); err != nil { + log.Printf("%s status=429 rate_limit_set_failed scope=%s error=%v", prefix, quotaScope, err) + } return } // 其他错误码继续使用 rateLimitService diff --git a/backend/internal/service/antigravity_quota_scope.go b/backend/internal/service/antigravity_quota_scope.go new file mode 100644 index 00000000..e9f7184b --- /dev/null +++ b/backend/internal/service/antigravity_quota_scope.go @@ -0,0 +1,88 @@ +package service + +import ( + "strings" + "time" +) + +const antigravityQuotaScopesKey = "antigravity_quota_scopes" + +// AntigravityQuotaScope 表示 Antigravity 的配额域 +type AntigravityQuotaScope string + +const ( + AntigravityQuotaScopeClaude AntigravityQuotaScope = "claude" + AntigravityQuotaScopeGeminiText AntigravityQuotaScope = "gemini_text" + AntigravityQuotaScopeGeminiImage AntigravityQuotaScope = "gemini_image" +) + +// resolveAntigravityQuotaScope 根据模型名称解析配额域 +func resolveAntigravityQuotaScope(requestedModel string) (AntigravityQuotaScope, bool) { + model := normalizeAntigravityModelName(requestedModel) + if model == "" { + return "", false + } + switch { + case strings.HasPrefix(model, "claude-"): + return AntigravityQuotaScopeClaude, true + case strings.HasPrefix(model, "gemini-"): + if isImageGenerationModel(model) { + return AntigravityQuotaScopeGeminiImage, true + } + return AntigravityQuotaScopeGeminiText, true + default: + return "", false + } +} + +func normalizeAntigravityModelName(model string) string { + normalized := strings.ToLower(strings.TrimSpace(model)) + normalized = strings.TrimPrefix(normalized, "models/") + return normalized +} + +// IsSchedulableForModel 结合 Antigravity 配额域限流判断是否可调度 +func (a *Account) IsSchedulableForModel(requestedModel string) bool { + if a == nil { + return false + } + if !a.IsSchedulable() { + return false + } + if a.Platform != PlatformAntigravity { + return true + } + scope, ok := resolveAntigravityQuotaScope(requestedModel) + if !ok { + return true + } + resetAt := a.antigravityQuotaScopeResetAt(scope) + if resetAt == nil { + return true + } + now := time.Now() + return !now.Before(*resetAt) +} + +func (a *Account) antigravityQuotaScopeResetAt(scope AntigravityQuotaScope) *time.Time { + if a == nil || a.Extra == nil || scope == "" { + return nil + } + rawScopes, ok := a.Extra[antigravityQuotaScopesKey].(map[string]any) + if !ok { + return nil + } + rawScope, ok := rawScopes[string(scope)].(map[string]any) + if !ok { + return nil + } + resetAtRaw, ok := rawScope["rate_limit_reset_at"].(string) + if !ok || strings.TrimSpace(resetAtRaw) == "" { + return nil + } + resetAt, err := time.Parse(time.RFC3339, resetAtRaw) + if err != nil { + return nil + } + return &resetAt +} diff --git a/backend/internal/service/gateway_multiplatform_test.go b/backend/internal/service/gateway_multiplatform_test.go index 47279581..8f29e07c 100644 --- a/backend/internal/service/gateway_multiplatform_test.go +++ b/backend/internal/service/gateway_multiplatform_test.go @@ -136,6 +136,9 @@ func (m *mockAccountRepoForPlatform) ListSchedulableByGroupIDAndPlatforms(ctx co func (m *mockAccountRepoForPlatform) SetRateLimited(ctx context.Context, id int64, resetAt time.Time) error { return nil } +func (m *mockAccountRepoForPlatform) SetAntigravityQuotaScopeLimit(ctx context.Context, id int64, scope AntigravityQuotaScope, resetAt time.Time) error { + return nil +} func (m *mockAccountRepoForPlatform) SetOverloaded(ctx context.Context, id int64, until time.Time) error { return nil } @@ -148,6 +151,9 @@ func (m *mockAccountRepoForPlatform) ClearTempUnschedulable(ctx context.Context, func (m *mockAccountRepoForPlatform) ClearRateLimit(ctx context.Context, id int64) error { return nil } +func (m *mockAccountRepoForPlatform) ClearAntigravityQuotaScopes(ctx context.Context, id int64) error { + return nil +} func (m *mockAccountRepoForPlatform) UpdateSessionWindow(ctx context.Context, id int64, start, end *time.Time, status string) error { return nil } diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 98c061d4..209e4dee 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -448,7 +448,7 @@ func (s *GatewayService) SelectAccountWithLoadAwareness(ctx context.Context, gro account, err := s.accountRepo.GetByID(ctx, accountID) if err == nil && s.isAccountInGroup(account, groupID) && s.isAccountAllowedForPlatform(account, platform, useMixed) && - account.IsSchedulable() && + account.IsSchedulableForModel(requestedModel) && (requestedModel == "" || s.isModelSupportedByAccount(account, requestedModel)) { result, err := s.tryAcquireAccountSlot(ctx, accountID, account.Concurrency) if err == nil && result.Acquired { @@ -486,6 +486,9 @@ func (s *GatewayService) SelectAccountWithLoadAwareness(ctx context.Context, gro if !s.isAccountAllowedForPlatform(acc, platform, useMixed) { continue } + if !acc.IsSchedulableForModel(requestedModel) { + continue + } if requestedModel != "" && !s.isModelSupportedByAccount(acc, requestedModel) { continue } @@ -743,7 +746,7 @@ func (s *GatewayService) selectAccountForModelWithPlatform(ctx context.Context, if _, excluded := excludedIDs[accountID]; !excluded { account, err := s.accountRepo.GetByID(ctx, accountID) // 检查账号分组归属和平台匹配(确保粘性会话不会跨分组或跨平台) - if err == nil && s.isAccountInGroup(account, groupID) && account.Platform == platform && account.IsSchedulable() && (requestedModel == "" || s.isModelSupportedByAccount(account, requestedModel)) { + if err == nil && s.isAccountInGroup(account, groupID) && account.Platform == platform && account.IsSchedulableForModel(requestedModel) && (requestedModel == "" || s.isModelSupportedByAccount(account, requestedModel)) { if err := s.cache.RefreshSessionTTL(ctx, sessionHash, stickySessionTTL); err != nil { log.Printf("refresh session ttl failed: session=%s err=%v", sessionHash, err) } @@ -775,6 +778,9 @@ func (s *GatewayService) selectAccountForModelWithPlatform(ctx context.Context, if _, excluded := excludedIDs[acc.ID]; excluded { continue } + if !acc.IsSchedulableForModel(requestedModel) { + continue + } if requestedModel != "" && !s.isModelSupportedByAccount(acc, requestedModel) { continue } @@ -832,7 +838,7 @@ func (s *GatewayService) selectAccountWithMixedScheduling(ctx context.Context, g if _, excluded := excludedIDs[accountID]; !excluded { account, err := s.accountRepo.GetByID(ctx, accountID) // 检查账号分组归属和有效性:原生平台直接匹配,antigravity 需要启用混合调度 - if err == nil && s.isAccountInGroup(account, groupID) && account.IsSchedulable() && (requestedModel == "" || s.isModelSupportedByAccount(account, requestedModel)) { + if err == nil && s.isAccountInGroup(account, groupID) && account.IsSchedulableForModel(requestedModel) && (requestedModel == "" || s.isModelSupportedByAccount(account, requestedModel)) { if account.Platform == nativePlatform || (account.Platform == PlatformAntigravity && account.IsMixedSchedulingEnabled()) { if err := s.cache.RefreshSessionTTL(ctx, sessionHash, stickySessionTTL); err != nil { log.Printf("refresh session ttl failed: session=%s err=%v", sessionHash, err) @@ -867,6 +873,9 @@ func (s *GatewayService) selectAccountWithMixedScheduling(ctx context.Context, g if acc.Platform == PlatformAntigravity && !acc.IsMixedSchedulingEnabled() { continue } + if !acc.IsSchedulableForModel(requestedModel) { + continue + } if requestedModel != "" && !s.isModelSupportedByAccount(acc, requestedModel) { continue } diff --git a/backend/internal/service/gemini_messages_compat_service.go b/backend/internal/service/gemini_messages_compat_service.go index fdf912d0..13f644c8 100644 --- a/backend/internal/service/gemini_messages_compat_service.go +++ b/backend/internal/service/gemini_messages_compat_service.go @@ -114,7 +114,7 @@ func (s *GeminiMessagesCompatService) SelectAccountForModelWithExclusions(ctx co if _, excluded := excludedIDs[accountID]; !excluded { account, err := s.accountRepo.GetByID(ctx, accountID) // 检查账号是否有效:原生平台直接匹配,antigravity 需要启用混合调度 - if err == nil && account.IsSchedulable() && (requestedModel == "" || s.isModelSupportedByAccount(account, requestedModel)) { + if err == nil && account.IsSchedulableForModel(requestedModel) && (requestedModel == "" || s.isModelSupportedByAccount(account, requestedModel)) { valid := false if account.Platform == platform { valid = true @@ -172,6 +172,9 @@ func (s *GeminiMessagesCompatService) SelectAccountForModelWithExclusions(ctx co if useMixedScheduling && acc.Platform == PlatformAntigravity && !acc.IsMixedSchedulingEnabled() { continue } + if !acc.IsSchedulableForModel(requestedModel) { + continue + } if requestedModel != "" && !s.isModelSupportedByAccount(acc, requestedModel) { continue } diff --git a/backend/internal/service/gemini_multiplatform_test.go b/backend/internal/service/gemini_multiplatform_test.go index 5070b510..794e56a7 100644 --- a/backend/internal/service/gemini_multiplatform_test.go +++ b/backend/internal/service/gemini_multiplatform_test.go @@ -121,6 +121,9 @@ func (m *mockAccountRepoForGemini) ListSchedulableByGroupIDAndPlatforms(ctx cont func (m *mockAccountRepoForGemini) SetRateLimited(ctx context.Context, id int64, resetAt time.Time) error { return nil } +func (m *mockAccountRepoForGemini) SetAntigravityQuotaScopeLimit(ctx context.Context, id int64, scope AntigravityQuotaScope, resetAt time.Time) error { + return nil +} func (m *mockAccountRepoForGemini) SetOverloaded(ctx context.Context, id int64, until time.Time) error { return nil } @@ -131,6 +134,9 @@ func (m *mockAccountRepoForGemini) ClearTempUnschedulable(ctx context.Context, i return nil } func (m *mockAccountRepoForGemini) ClearRateLimit(ctx context.Context, id int64) error { return nil } +func (m *mockAccountRepoForGemini) ClearAntigravityQuotaScopes(ctx context.Context, id int64) error { + return nil +} func (m *mockAccountRepoForGemini) UpdateSessionWindow(ctx context.Context, id int64, start, end *time.Time, status string) error { return nil } diff --git a/backend/internal/service/ratelimit_service.go b/backend/internal/service/ratelimit_service.go index 196f1643..f1362646 100644 --- a/backend/internal/service/ratelimit_service.go +++ b/backend/internal/service/ratelimit_service.go @@ -345,7 +345,7 @@ func (s *RateLimitService) UpdateSessionWindow(ctx context.Context, account *Acc // 如果状态为allowed且之前有限流,说明窗口已重置,清除限流状态 if status == "allowed" && account.IsRateLimited() { - if err := s.accountRepo.ClearRateLimit(ctx, account.ID); err != nil { + if err := s.ClearRateLimit(ctx, account.ID); err != nil { log.Printf("ClearRateLimit failed for account %d: %v", account.ID, err) } } @@ -353,7 +353,10 @@ func (s *RateLimitService) UpdateSessionWindow(ctx context.Context, account *Acc // ClearRateLimit 清除账号的限流状态 func (s *RateLimitService) ClearRateLimit(ctx context.Context, accountID int64) error { - return s.accountRepo.ClearRateLimit(ctx, accountID) + if err := s.accountRepo.ClearRateLimit(ctx, accountID); err != nil { + return err + } + return s.accountRepo.ClearAntigravityQuotaScopes(ctx, accountID) } func (s *RateLimitService) ClearTempUnschedulable(ctx context.Context, accountID int64) error { From 7b1cf2c495cd667c088b144945b38761757e753d Mon Sep 17 00:00:00 2001 From: song Date: Fri, 9 Jan 2026 20:47:13 +0800 Subject: [PATCH 004/147] =?UTF-8?q?chore:=20=E8=B0=83=E6=95=B4=20SSE=20?= =?UTF-8?q?=E5=8D=95=E8=A1=8C=E4=B8=8A=E9=99=90=E5=88=B0=2025MB?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/config/config.go | 2 +- backend/internal/service/gateway_service.go | 2 +- config.yaml | 6 +++--- deploy/config.example.yaml | 6 +++--- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index c1e15290..aaaaf3bd 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -544,7 +544,7 @@ func setDefaults() { viper.SetDefault("gateway.concurrency_slot_ttl_minutes", 30) // 并发槽位过期时间(支持超长请求) viper.SetDefault("gateway.stream_data_interval_timeout", 180) viper.SetDefault("gateway.stream_keepalive_interval", 10) - viper.SetDefault("gateway.max_line_size", 10*1024*1024) + viper.SetDefault("gateway.max_line_size", 25*1024*1024) viper.SetDefault("gateway.scheduling.sticky_session_max_waiting", 3) viper.SetDefault("gateway.scheduling.sticky_session_wait_timeout", 45*time.Second) viper.SetDefault("gateway.scheduling.fallback_wait_timeout", 30*time.Second) diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 209e4dee..63245933 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -33,7 +33,7 @@ const ( claudeAPIURL = "https://api.anthropic.com/v1/messages?beta=true" claudeAPICountTokensURL = "https://api.anthropic.com/v1/messages/count_tokens?beta=true" stickySessionTTL = time.Hour // 粘性会话TTL - defaultMaxLineSize = 10 * 1024 * 1024 + defaultMaxLineSize = 25 * 1024 * 1024 claudeCodeSystemPrompt = "You are Claude Code, Anthropic's official CLI for Claude." maxCacheControlBlocks = 4 // Anthropic API 允许的最大 cache_control 块数量 ) diff --git a/config.yaml b/config.yaml index f43c9c19..c282bf9a 100644 --- a/config.yaml +++ b/config.yaml @@ -154,9 +154,9 @@ gateway: # Stream keepalive interval (seconds), 0=disable # 流式 keepalive 间隔(秒),0=禁用 stream_keepalive_interval: 10 - # SSE max line size in bytes (default: 10MB) - # SSE 单行最大字节数(默认 10MB) - max_line_size: 10485760 + # SSE max line size in bytes (default: 25MB) + # SSE 单行最大字节数(默认 25MB) + max_line_size: 26214400 # Log upstream error response body summary (safe/truncated; does not log request content) # 记录上游错误响应体摘要(安全/截断;不记录请求内容) log_upstream_error_body: false diff --git a/deploy/config.example.yaml b/deploy/config.example.yaml index 49bf0afa..40fab16e 100644 --- a/deploy/config.example.yaml +++ b/deploy/config.example.yaml @@ -154,9 +154,9 @@ gateway: # Stream keepalive interval (seconds), 0=disable # 流式 keepalive 间隔(秒),0=禁用 stream_keepalive_interval: 10 - # SSE max line size in bytes (default: 10MB) - # SSE 单行最大字节数(默认 10MB) - max_line_size: 10485760 + # SSE max line size in bytes (default: 25MB) + # SSE 单行最大字节数(默认 25MB) + max_line_size: 26214400 # Log upstream error response body summary (safe/truncated; does not log request content) # 记录上游错误响应体摘要(安全/截断;不记录请求内容) log_upstream_error_body: false From c2a6ca8d3a237146449afdd97ac08e25cf377506 Mon Sep 17 00:00:00 2001 From: song Date: Fri, 9 Jan 2026 20:57:06 +0800 Subject: [PATCH 005/147] =?UTF-8?q?chore:=20=E6=8F=90=E5=8D=87=20SSE=20?= =?UTF-8?q?=E5=8D=95=E8=A1=8C=E4=B8=8A=E9=99=90=E5=88=B0=2040MB?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/config/config.go | 2 +- backend/internal/service/gateway_service.go | 2 +- config.yaml | 6 +++--- deploy/config.example.yaml | 6 +++--- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index aaaaf3bd..d13a460a 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -544,7 +544,7 @@ func setDefaults() { viper.SetDefault("gateway.concurrency_slot_ttl_minutes", 30) // 并发槽位过期时间(支持超长请求) viper.SetDefault("gateway.stream_data_interval_timeout", 180) viper.SetDefault("gateway.stream_keepalive_interval", 10) - viper.SetDefault("gateway.max_line_size", 25*1024*1024) + viper.SetDefault("gateway.max_line_size", 40*1024*1024) viper.SetDefault("gateway.scheduling.sticky_session_max_waiting", 3) viper.SetDefault("gateway.scheduling.sticky_session_wait_timeout", 45*time.Second) viper.SetDefault("gateway.scheduling.fallback_wait_timeout", 30*time.Second) diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 63245933..6da9b565 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -33,7 +33,7 @@ const ( claudeAPIURL = "https://api.anthropic.com/v1/messages?beta=true" claudeAPICountTokensURL = "https://api.anthropic.com/v1/messages/count_tokens?beta=true" stickySessionTTL = time.Hour // 粘性会话TTL - defaultMaxLineSize = 25 * 1024 * 1024 + defaultMaxLineSize = 40 * 1024 * 1024 claudeCodeSystemPrompt = "You are Claude Code, Anthropic's official CLI for Claude." maxCacheControlBlocks = 4 // Anthropic API 允许的最大 cache_control 块数量 ) diff --git a/config.yaml b/config.yaml index c282bf9a..54b591f3 100644 --- a/config.yaml +++ b/config.yaml @@ -154,9 +154,9 @@ gateway: # Stream keepalive interval (seconds), 0=disable # 流式 keepalive 间隔(秒),0=禁用 stream_keepalive_interval: 10 - # SSE max line size in bytes (default: 25MB) - # SSE 单行最大字节数(默认 25MB) - max_line_size: 26214400 + # SSE max line size in bytes (default: 40MB) + # SSE 单行最大字节数(默认 40MB) + max_line_size: 41943040 # Log upstream error response body summary (safe/truncated; does not log request content) # 记录上游错误响应体摘要(安全/截断;不记录请求内容) log_upstream_error_body: false diff --git a/deploy/config.example.yaml b/deploy/config.example.yaml index 40fab16e..60d79377 100644 --- a/deploy/config.example.yaml +++ b/deploy/config.example.yaml @@ -154,9 +154,9 @@ gateway: # Stream keepalive interval (seconds), 0=disable # 流式 keepalive 间隔(秒),0=禁用 stream_keepalive_interval: 10 - # SSE max line size in bytes (default: 25MB) - # SSE 单行最大字节数(默认 25MB) - max_line_size: 26214400 + # SSE max line size in bytes (default: 40MB) + # SSE 单行最大字节数(默认 40MB) + max_line_size: 41943040 # Log upstream error response body summary (safe/truncated; does not log request content) # 记录上游错误响应体摘要(安全/截断;不记录请求内容) log_upstream_error_body: false From f0ece82111b88fbdcd84b5fda2689eee78bd91ad Mon Sep 17 00:00:00 2001 From: song Date: Mon, 12 Jan 2026 17:01:57 +0800 Subject: [PATCH 006/147] =?UTF-8?q?feat:=20=E5=9C=A8=20dashboard=20?= =?UTF-8?q?=E5=8F=B3=E4=B8=8A=E8=A7=92=E6=B7=BB=E5=8A=A0=E6=96=87=E6=A1=A3?= =?UTF-8?q?=E9=93=BE=E6=8E=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- frontend/src/components/layout/AppHeader.vue | 15 ++++++++++++++- frontend/src/i18n/locales/en.ts | 3 ++- frontend/src/i18n/locales/zh.ts | 3 ++- 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/frontend/src/components/layout/AppHeader.vue b/frontend/src/components/layout/AppHeader.vue index fd8742c3..9d2b40fb 100644 --- a/frontend/src/components/layout/AppHeader.vue +++ b/frontend/src/components/layout/AppHeader.vue @@ -21,8 +21,20 @@ - +
+ + + + + + @@ -211,6 +223,7 @@ const user = computed(() => authStore.user) const dropdownOpen = ref(false) const dropdownRef = ref(null) const contactInfo = computed(() => appStore.contactInfo) +const docUrl = computed(() => appStore.docUrl) // 只在标准模式的管理员下显示新手引导按钮 const showOnboardingButton = computed(() => { diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index ca220281..cd7648bd 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -185,7 +185,8 @@ export default { expand: 'Expand', logout: 'Logout', github: 'GitHub', - mySubscriptions: 'My Subscriptions' + mySubscriptions: 'My Subscriptions', + docs: 'Docs' }, // Auth diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index 6749c02e..4b43e0b4 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -183,7 +183,8 @@ export default { expand: '展开', logout: '退出登录', github: 'GitHub', - mySubscriptions: '我的订阅' + mySubscriptions: '我的订阅', + docs: '文档' }, // Auth From e1015c27599db6f90b3d1c1b5a493c7b2f7112af Mon Sep 17 00:00:00 2001 From: song Date: Tue, 13 Jan 2026 12:58:05 +0800 Subject: [PATCH 007/147] =?UTF-8?q?fix:=20=E4=BF=AE=E5=A4=8D=20Antigravity?= =?UTF-8?q?=20=E5=9B=BE=E7=89=87=E7=94=9F=E6=88=90=E5=93=8D=E5=BA=94?= =?UTF-8?q?=E4=B8=A2=E5=A4=B1=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 流式转非流式时,图片数据在中间 chunk 返回,最后一个 chunk 只有 finishReason,导致只保留最后 chunk 时图片丢失。 添加 collectedImageParts 收集所有图片 parts,并在返回前合并。 --- .../service/antigravity_gateway_service.go | 76 +++++++++++++++++++ 1 file changed, 76 insertions(+) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 4fd55757..4ab12d2d 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -1219,6 +1219,7 @@ urlFallbackLoop: if contentType == "" { contentType = "application/json" } + log.Printf("[antigravity-Forward] upstream error status=%d body=%s", resp.StatusCode, truncateForLog(respBody, 500)) c.Data(resp.StatusCode, contentType, unwrapped) return nil, fmt.Errorf("antigravity upstream error: %d", resp.StatusCode) } @@ -1534,6 +1535,7 @@ func (s *AntigravityGatewayService) handleGeminiStreamToNonStreaming(c *gin.Cont var firstTokenMs *int var last map[string]any var lastWithParts map[string]any + var collectedImageParts []map[string]any // 收集所有包含图片的 parts type scanEvent struct { line string @@ -1636,6 +1638,13 @@ func (s *AntigravityGatewayService) handleGeminiStreamToNonStreaming(c *gin.Cont // 保留最后一个有 parts 的响应 if parts := extractGeminiParts(parsed); len(parts) > 0 { lastWithParts = parsed + // 收集包含图片的 parts + for _, part := range parts { + if inlineData, ok := part["inlineData"].(map[string]any); ok { + collectedImageParts = append(collectedImageParts, part) + _ = inlineData // 避免 unused 警告 + } + } } case <-intervalCh: @@ -1657,6 +1666,11 @@ returnResponse: log.Printf("[antigravity-Forward] warning: empty stream response, no valid chunks received") } + // 如果收集到了图片 parts,需要合并到最终响应中 + if len(collectedImageParts) > 0 { + finalResponse = mergeImagePartsToResponse(finalResponse, collectedImageParts) + } + respBody, err := json.Marshal(finalResponse) if err != nil { return nil, fmt.Errorf("failed to marshal response: %w", err) @@ -1666,6 +1680,68 @@ returnResponse: return &antigravityStreamResult{usage: usage, firstTokenMs: firstTokenMs}, nil } +// mergeImagePartsToResponse 将收集到的图片 parts 合并到 Gemini 响应中 +// 这是因为流式响应中,图片可能在某个 chunk 返回,而最终 chunk 可能不包含图片 +func mergeImagePartsToResponse(response map[string]any, imageParts []map[string]any) map[string]any { + if len(imageParts) == 0 { + return response + } + + // 深拷贝 response 避免修改原始数据 + result := make(map[string]any) + for k, v := range response { + result[k] = v + } + + // 获取或创建 candidates + candidates, ok := result["candidates"].([]any) + if !ok || len(candidates) == 0 { + candidates = []any{map[string]any{}} + } + + // 获取第一个 candidate + candidate, ok := candidates[0].(map[string]any) + if !ok { + candidate = make(map[string]any) + candidates[0] = candidate + } + + // 获取或创建 content + content, ok := candidate["content"].(map[string]any) + if !ok { + content = map[string]any{"role": "model"} + candidate["content"] = content + } + + // 获取现有 parts + existingParts, ok := content["parts"].([]any) + if !ok { + existingParts = []any{} + } + + // 检查现有 parts 中是否已经有图片 + hasExistingImage := false + for _, p := range existingParts { + if pm, ok := p.(map[string]any); ok { + if _, hasInline := pm["inlineData"]; hasInline { + hasExistingImage = true + break + } + } + } + + // 如果没有现有图片,添加收集到的图片 parts + if !hasExistingImage { + for _, imgPart := range imageParts { + existingParts = append(existingParts, imgPart) + } + content["parts"] = existingParts + } + + result["candidates"] = candidates + return result +} + func (s *AntigravityGatewayService) writeClaudeError(c *gin.Context, status int, errType, message string) error { c.JSON(status, gin.H{ "type": "error", From c9d21d53e6e3c97470480363e453a81254bd2be9 Mon Sep 17 00:00:00 2001 From: song Date: Tue, 13 Jan 2026 13:04:03 +0800 Subject: [PATCH 008/147] =?UTF-8?q?fix:=20=E4=BF=AE=E5=A4=8D=20Antigravity?= =?UTF-8?q?=20=E9=9D=9E=E6=B5=81=E5=BC=8F=E5=93=8D=E5=BA=94=E6=96=87?= =?UTF-8?q?=E6=9C=AC=E4=B8=A2=E5=A4=B1=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Gemini 流式响应是增量的,需要累积所有 chunk 的文本内容。 原代码只保留最后一个有 parts 的 chunk,导致实际文本被空 text + thoughtSignature 的最终 chunk 覆盖。 添加 collectedTextParts 收集所有文本片段,返回前合并。 --- .../service/antigravity_gateway_service.go | 89 ++++++++++++++++++- 1 file changed, 87 insertions(+), 2 deletions(-) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 4ab12d2d..67f65929 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -1522,7 +1522,7 @@ func (s *AntigravityGatewayService) handleGeminiStreamingResponse(c *gin.Context } // handleGeminiStreamToNonStreaming 读取上游流式响应,合并为非流式响应返回给客户端 -// Gemini 流式响应中每个 chunk 都包含累积的完整文本,只需保留最后一个有效响应 +// Gemini 流式响应是增量的,需要累积所有 chunk 的内容 func (s *AntigravityGatewayService) handleGeminiStreamToNonStreaming(c *gin.Context, resp *http.Response, startTime time.Time) (*antigravityStreamResult, error) { scanner := bufio.NewScanner(resp.Body) maxLineSize := defaultMaxLineSize @@ -1536,6 +1536,7 @@ func (s *AntigravityGatewayService) handleGeminiStreamToNonStreaming(c *gin.Cont var last map[string]any var lastWithParts map[string]any var collectedImageParts []map[string]any // 收集所有包含图片的 parts + var collectedTextParts []string // 收集所有文本片段 type scanEvent struct { line string @@ -1638,12 +1639,15 @@ func (s *AntigravityGatewayService) handleGeminiStreamToNonStreaming(c *gin.Cont // 保留最后一个有 parts 的响应 if parts := extractGeminiParts(parsed); len(parts) > 0 { lastWithParts = parsed - // 收集包含图片的 parts + // 收集包含图片和文本的 parts for _, part := range parts { if inlineData, ok := part["inlineData"].(map[string]any); ok { collectedImageParts = append(collectedImageParts, part) _ = inlineData // 避免 unused 警告 } + if text, ok := part["text"].(string); ok && text != "" { + collectedTextParts = append(collectedTextParts, text) + } } } @@ -1671,6 +1675,11 @@ returnResponse: finalResponse = mergeImagePartsToResponse(finalResponse, collectedImageParts) } + // 如果收集到了文本,需要合并到最终响应中 + if len(collectedTextParts) > 0 { + finalResponse = mergeTextPartsToResponse(finalResponse, collectedTextParts) + } + respBody, err := json.Marshal(finalResponse) if err != nil { return nil, fmt.Errorf("failed to marshal response: %w", err) @@ -1742,6 +1751,82 @@ func mergeImagePartsToResponse(response map[string]any, imageParts []map[string] return result } +// mergeTextPartsToResponse 将收集到的文本合并到 Gemini 响应中 +// 流式响应是增量的,需要累积所有文本片段 +func mergeTextPartsToResponse(response map[string]any, textParts []string) map[string]any { + if len(textParts) == 0 { + return response + } + + // 合并所有文本 + mergedText := strings.Join(textParts, "") + + // 深拷贝 response 避免修改原始数据 + result := make(map[string]any) + for k, v := range response { + result[k] = v + } + + // 获取或创建 candidates + candidates, ok := result["candidates"].([]any) + if !ok || len(candidates) == 0 { + candidates = []any{map[string]any{}} + } + + // 获取第一个 candidate + candidate, ok := candidates[0].(map[string]any) + if !ok { + candidate = make(map[string]any) + candidates[0] = candidate + } + + // 获取或创建 content + content, ok := candidate["content"].(map[string]any) + if !ok { + content = map[string]any{"role": "model"} + candidate["content"] = content + } + + // 获取现有 parts + existingParts, ok := content["parts"].([]any) + if !ok { + existingParts = []any{} + } + + // 查找并更新第一个 text part,或创建新的 + textUpdated := false + newParts := make([]any, 0, len(existingParts)+1) + for _, p := range existingParts { + pm, ok := p.(map[string]any) + if !ok { + newParts = append(newParts, p) + continue + } + // 跳过空文本的 part(可能只有 thoughtSignature) + if _, hasText := pm["text"]; hasText && !textUpdated { + // 用累积的文本替换 + newPart := make(map[string]any) + for k, v := range pm { + newPart[k] = v + } + newPart["text"] = mergedText + newParts = append(newParts, newPart) + textUpdated = true + } else { + newParts = append(newParts, pm) + } + } + + // 如果没有找到 text part,添加一个新的 + if !textUpdated { + newParts = append([]any{map[string]any{"text": mergedText}}, newParts...) + } + + content["parts"] = newParts + result["candidates"] = candidates + return result +} + func (s *AntigravityGatewayService) writeClaudeError(c *gin.Context, status int, errType, message string) error { c.JSON(status, gin.H{ "type": "error", From 9a22d1a690db4b67383e9bd6814a6dba5ab4171d Mon Sep 17 00:00:00 2001 From: song Date: Tue, 13 Jan 2026 13:25:55 +0800 Subject: [PATCH 009/147] =?UTF-8?q?refactor:=20=E6=8F=90=E5=8F=96=20getOrC?= =?UTF-8?q?reateGeminiParts=20=E5=87=8F=E5=B0=91=E9=87=8D=E5=A4=8D?= =?UTF-8?q?=E4=BB=A3=E7=A0=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 将两个 merge 函数中重复的 Gemini 响应结构访问逻辑提取为公共函数。 --- .../service/antigravity_gateway_service.go | 135 +++++++----------- 1 file changed, 53 insertions(+), 82 deletions(-) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 67f65929..001afba6 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -1689,120 +1689,93 @@ returnResponse: return &antigravityStreamResult{usage: usage, firstTokenMs: firstTokenMs}, nil } +// getOrCreateGeminiParts 获取 Gemini 响应的 parts 结构,返回深拷贝和更新回调 +func getOrCreateGeminiParts(response map[string]any) (result map[string]any, existingParts []any, setParts func([]any)) { + // 深拷贝 response + result = make(map[string]any) + for k, v := range response { + result[k] = v + } + + // 获取或创建 candidates + candidates, ok := result["candidates"].([]any) + if !ok || len(candidates) == 0 { + candidates = []any{map[string]any{}} + } + + // 获取第一个 candidate + candidate, ok := candidates[0].(map[string]any) + if !ok { + candidate = make(map[string]any) + candidates[0] = candidate + } + + // 获取或创建 content + content, ok := candidate["content"].(map[string]any) + if !ok { + content = map[string]any{"role": "model"} + candidate["content"] = content + } + + // 获取现有 parts + existingParts, ok = content["parts"].([]any) + if !ok { + existingParts = []any{} + } + + // 返回更新回调 + setParts = func(newParts []any) { + content["parts"] = newParts + result["candidates"] = candidates + } + + return result, existingParts, setParts +} + // mergeImagePartsToResponse 将收集到的图片 parts 合并到 Gemini 响应中 -// 这是因为流式响应中,图片可能在某个 chunk 返回,而最终 chunk 可能不包含图片 func mergeImagePartsToResponse(response map[string]any, imageParts []map[string]any) map[string]any { if len(imageParts) == 0 { return response } - // 深拷贝 response 避免修改原始数据 - result := make(map[string]any) - for k, v := range response { - result[k] = v - } - - // 获取或创建 candidates - candidates, ok := result["candidates"].([]any) - if !ok || len(candidates) == 0 { - candidates = []any{map[string]any{}} - } - - // 获取第一个 candidate - candidate, ok := candidates[0].(map[string]any) - if !ok { - candidate = make(map[string]any) - candidates[0] = candidate - } - - // 获取或创建 content - content, ok := candidate["content"].(map[string]any) - if !ok { - content = map[string]any{"role": "model"} - candidate["content"] = content - } - - // 获取现有 parts - existingParts, ok := content["parts"].([]any) - if !ok { - existingParts = []any{} - } + result, existingParts, setParts := getOrCreateGeminiParts(response) // 检查现有 parts 中是否已经有图片 - hasExistingImage := false for _, p := range existingParts { if pm, ok := p.(map[string]any); ok { if _, hasInline := pm["inlineData"]; hasInline { - hasExistingImage = true - break + return result // 已有图片,不重复添加 } } } - // 如果没有现有图片,添加收集到的图片 parts - if !hasExistingImage { - for _, imgPart := range imageParts { - existingParts = append(existingParts, imgPart) - } - content["parts"] = existingParts + // 添加收集到的图片 parts + for _, imgPart := range imageParts { + existingParts = append(existingParts, imgPart) } - - result["candidates"] = candidates + setParts(existingParts) return result } // mergeTextPartsToResponse 将收集到的文本合并到 Gemini 响应中 -// 流式响应是增量的,需要累积所有文本片段 func mergeTextPartsToResponse(response map[string]any, textParts []string) map[string]any { if len(textParts) == 0 { return response } - // 合并所有文本 mergedText := strings.Join(textParts, "") - - // 深拷贝 response 避免修改原始数据 - result := make(map[string]any) - for k, v := range response { - result[k] = v - } - - // 获取或创建 candidates - candidates, ok := result["candidates"].([]any) - if !ok || len(candidates) == 0 { - candidates = []any{map[string]any{}} - } - - // 获取第一个 candidate - candidate, ok := candidates[0].(map[string]any) - if !ok { - candidate = make(map[string]any) - candidates[0] = candidate - } - - // 获取或创建 content - content, ok := candidate["content"].(map[string]any) - if !ok { - content = map[string]any{"role": "model"} - candidate["content"] = content - } - - // 获取现有 parts - existingParts, ok := content["parts"].([]any) - if !ok { - existingParts = []any{} - } + result, existingParts, setParts := getOrCreateGeminiParts(response) // 查找并更新第一个 text part,或创建新的 - textUpdated := false newParts := make([]any, 0, len(existingParts)+1) + textUpdated := false + for _, p := range existingParts { pm, ok := p.(map[string]any) if !ok { newParts = append(newParts, p) continue } - // 跳过空文本的 part(可能只有 thoughtSignature) if _, hasText := pm["text"]; hasText && !textUpdated { // 用累积的文本替换 newPart := make(map[string]any) @@ -1817,13 +1790,11 @@ func mergeTextPartsToResponse(response map[string]any, textParts []string) map[s } } - // 如果没有找到 text part,添加一个新的 if !textUpdated { newParts = append([]any{map[string]any{"text": mergedText}}, newParts...) } - content["parts"] = newParts - result["candidates"] = candidates + setParts(newParts) return result } From b4abfae4de0b25a31249b716fcc4c0868a6e996c Mon Sep 17 00:00:00 2001 From: song Date: Fri, 16 Jan 2026 10:31:55 +0800 Subject: [PATCH 010/147] =?UTF-8?q?fix:=20Antigravity=20=E6=B5=8B=E8=AF=95?= =?UTF-8?q?=E8=BF=9E=E6=8E=A5=E4=BD=BF=E7=94=A8=E6=9C=80=E5=B0=8F=20token?= =?UTF-8?q?=20=E6=B6=88=E8=80=97?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - buildGeminiTestRequest: 输入 "." + maxOutputTokens: 1 - buildClaudeTestRequest: 输入 "." + MaxTokens: 1 - buildGenerationConfig: 支持透传 MaxTokens 参数 --- .../internal/pkg/antigravity/request_transformer.go | 5 +++++ .../internal/service/antigravity_gateway_service.go | 11 ++++++++--- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/backend/internal/pkg/antigravity/request_transformer.go b/backend/internal/pkg/antigravity/request_transformer.go index a8474576..a6f72c22 100644 --- a/backend/internal/pkg/antigravity/request_transformer.go +++ b/backend/internal/pkg/antigravity/request_transformer.go @@ -429,6 +429,11 @@ func buildGenerationConfig(req *ClaudeRequest) *GeminiGenerationConfig { StopSequences: DefaultStopSequences, } + // 如果请求中指定了 MaxTokens,使用请求值 + if req.MaxTokens > 0 { + config.MaxOutputTokens = req.MaxTokens + } + // Thinking 配置 if req.Thinking != nil && req.Thinking.Type == "enabled" { config.ThinkingConfig = &GeminiThinkingConfig{ diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 001afba6..5ef2afd9 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -276,13 +276,14 @@ func (s *AntigravityGatewayService) TestConnection(ctx context.Context, account } // buildGeminiTestRequest 构建 Gemini 格式测试请求 +// 使用最小 token 消耗:输入 "." + maxOutputTokens: 1 func (s *AntigravityGatewayService) buildGeminiTestRequest(projectID, model string) ([]byte, error) { payload := map[string]any{ "contents": []map[string]any{ { "role": "user", "parts": []map[string]any{ - {"text": "hi"}, + {"text": "."}, }, }, }, @@ -292,22 +293,26 @@ func (s *AntigravityGatewayService) buildGeminiTestRequest(projectID, model stri {"text": antigravity.GetDefaultIdentityPatch()}, }, }, + "generationConfig": map[string]any{ + "maxOutputTokens": 1, + }, } payloadBytes, _ := json.Marshal(payload) return s.wrapV1InternalRequest(projectID, model, payloadBytes) } // buildClaudeTestRequest 构建 Claude 格式测试请求并转换为 Gemini 格式 +// 使用最小 token 消耗:输入 "." + MaxTokens: 1 func (s *AntigravityGatewayService) buildClaudeTestRequest(projectID, mappedModel string) ([]byte, error) { claudeReq := &antigravity.ClaudeRequest{ Model: mappedModel, Messages: []antigravity.ClaudeMessage{ { Role: "user", - Content: json.RawMessage(`"hi"`), + Content: json.RawMessage(`"."`), }, }, - MaxTokens: 1024, + MaxTokens: 1, Stream: false, } return antigravity.TransformClaudeToGemini(claudeReq, projectID, mappedModel) From a61042bca08ed0ebb2f4a9c14c8f73eba4f9037f Mon Sep 17 00:00:00 2001 From: song Date: Fri, 16 Jan 2026 11:57:14 +0800 Subject: [PATCH 011/147] =?UTF-8?q?fix:=20Antigravity=20project=5Fid=20?= =?UTF-8?q?=E8=8E=B7=E5=8F=96=E4=BC=98=E5=8C=96?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - API URL 改为只使用 prod 端点 - 刷新 token 时每次调用 LoadCodeAssist 更新 project_id - 移除随机生成 project_id 的兜底逻辑 --- backend/internal/pkg/antigravity/oauth.go | 28 ++----------------- .../service/antigravity_oauth_service.go | 25 +++++++++-------- .../service/antigravity_quota_fetcher.go | 5 ---- 3 files changed, 16 insertions(+), 42 deletions(-) diff --git a/backend/internal/pkg/antigravity/oauth.go b/backend/internal/pkg/antigravity/oauth.go index 736c45df..debef3e9 100644 --- a/backend/internal/pkg/antigravity/oauth.go +++ b/backend/internal/pkg/antigravity/oauth.go @@ -42,12 +42,9 @@ const ( URLAvailabilityTTL = 5 * time.Minute ) -// BaseURLs 定义 Antigravity API 端点,按优先级排序 -// fallback 顺序: sandbox → daily → prod +// BaseURLs 定义 Antigravity API 端点 var BaseURLs = []string{ - "https://daily-cloudcode-pa.sandbox.googleapis.com", // sandbox - "https://daily-cloudcode-pa.googleapis.com", // daily - "https://cloudcode-pa.googleapis.com", // prod + "https://cloudcode-pa.googleapis.com", // prod } // BaseURL 默认 URL(保持向后兼容) @@ -240,24 +237,3 @@ func BuildAuthorizationURL(state, codeChallenge string) string { return fmt.Sprintf("%s?%s", AuthorizeURL, params.Encode()) } - -// GenerateMockProjectID 生成随机 project_id(当 API 不返回时使用) -// 格式:{形容词}-{名词}-{5位随机字符} -func GenerateMockProjectID() string { - adjectives := []string{"useful", "bright", "swift", "calm", "bold"} - nouns := []string{"fuze", "wave", "spark", "flow", "core"} - - randBytes, _ := GenerateRandomBytes(7) - - adj := adjectives[int(randBytes[0])%len(adjectives)] - noun := nouns[int(randBytes[1])%len(nouns)] - - // 生成 5 位随机字符(a-z0-9) - const charset = "abcdefghijklmnopqrstuvwxyz0123456789" - suffix := make([]byte, 5) - for i := 0; i < 5; i++ { - suffix[i] = charset[int(randBytes[i+2])%len(charset)] - } - - return fmt.Sprintf("%s-%s-%s", adj, noun, string(suffix)) -} diff --git a/backend/internal/service/antigravity_oauth_service.go b/backend/internal/service/antigravity_oauth_service.go index ecf0a553..3cf87b9d 100644 --- a/backend/internal/service/antigravity_oauth_service.go +++ b/backend/internal/service/antigravity_oauth_service.go @@ -149,12 +149,6 @@ func (s *AntigravityOAuthService) ExchangeCode(ctx context.Context, input *Antig result.ProjectID = loadResp.CloudAICompanionProject } - // 兜底:随机生成 project_id - if result.ProjectID == "" { - result.ProjectID = antigravity.GenerateMockProjectID() - fmt.Printf("[AntigravityOAuth] 使用随机生成的 project_id: %s\n", result.ProjectID) - } - return result, nil } @@ -236,16 +230,25 @@ func (s *AntigravityOAuthService) RefreshAccountToken(ctx context.Context, accou return nil, err } - // 保留原有的 project_id 和 email - existingProjectID := strings.TrimSpace(account.GetCredential("project_id")) - if existingProjectID != "" { - tokenInfo.ProjectID = existingProjectID - } + // 保留原有的 email existingEmail := strings.TrimSpace(account.GetCredential("email")) if existingEmail != "" { tokenInfo.Email = existingEmail } + // 每次刷新都调用 LoadCodeAssist 更新 project_id + client := antigravity.NewClient(proxyURL) + loadResp, _, err := client.LoadCodeAssist(ctx, tokenInfo.AccessToken) + if err != nil { + // 失败时保留原有的 project_id + existingProjectID := strings.TrimSpace(account.GetCredential("project_id")) + if existingProjectID != "" { + tokenInfo.ProjectID = existingProjectID + } + } else if loadResp != nil && loadResp.CloudAICompanionProject != "" { + tokenInfo.ProjectID = loadResp.CloudAICompanionProject + } + return tokenInfo, nil } diff --git a/backend/internal/service/antigravity_quota_fetcher.go b/backend/internal/service/antigravity_quota_fetcher.go index c9024e33..07eb563d 100644 --- a/backend/internal/service/antigravity_quota_fetcher.go +++ b/backend/internal/service/antigravity_quota_fetcher.go @@ -31,11 +31,6 @@ func (f *AntigravityQuotaFetcher) FetchQuota(ctx context.Context, account *Accou accessToken := account.GetCredential("access_token") projectID := account.GetCredential("project_id") - // 如果没有 project_id,生成一个随机的 - if projectID == "" { - projectID = antigravity.GenerateMockProjectID() - } - client := antigravity.NewClient(proxyURL) // 调用 API 获取配额 From 95fe1e818fbd2b693f8d1243348196d8dd7c6a3f Mon Sep 17 00:00:00 2001 From: song Date: Fri, 16 Jan 2026 12:13:54 +0800 Subject: [PATCH 012/147] =?UTF-8?q?fix:=20Antigravity=20=E5=88=B7=E6=96=B0?= =?UTF-8?q?=20token=20=E6=97=B6=E6=A3=80=E6=B5=8B=20project=5Fid=20?= =?UTF-8?q?=E7=BC=BA=E5=A4=B1?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 刷新 token 后调用 LoadCodeAssist 获取 project_id - 如果获取失败,保留原有 project_id,标记账户为 error - token 仍会正常更新,不影响凭证刷新 - 错误信息:账户缺少project id,可能无法使用Antigravity --- .../internal/handler/admin/account_handler.go | 22 +++++++++++++++ backend/internal/service/admin_service.go | 5 ++++ .../service/antigravity_oauth_service.go | 28 +++++++++---------- .../service/antigravity_token_refresher.go | 5 ++++ .../internal/service/token_refresh_service.go | 13 ++++++--- 5 files changed, 55 insertions(+), 18 deletions(-) diff --git a/backend/internal/handler/admin/account_handler.go b/backend/internal/handler/admin/account_handler.go index 8a7270e5..97206b15 100644 --- a/backend/internal/handler/admin/account_handler.go +++ b/backend/internal/handler/admin/account_handler.go @@ -450,6 +450,28 @@ func (h *AccountHandler) Refresh(c *gin.Context) { newCredentials[k] = v } } + + // 如果 project_id 获取失败,先更新凭证,再标记账户为 error + if tokenInfo.ProjectIDMissing { + // 先更新凭证 + _, updateErr := h.adminService.UpdateAccount(c.Request.Context(), accountID, &service.UpdateAccountInput{ + Credentials: newCredentials, + }) + if updateErr != nil { + response.InternalError(c, "Failed to update credentials: "+updateErr.Error()) + return + } + // 标记账户为 error + if setErr := h.adminService.SetAccountError(c.Request.Context(), accountID, "账户缺少project id,可能无法使用Antigravity"); setErr != nil { + response.InternalError(c, "Failed to set account error: "+setErr.Error()) + return + } + response.Success(c, gin.H{ + "message": "Token refreshed but project_id is missing, account marked as error", + "warning": "missing_project_id", + }) + return + } } else { // Use Anthropic/Claude OAuth service to refresh token tokenInfo, err := h.oauthService.RefreshAccountToken(c.Request.Context(), account) diff --git a/backend/internal/service/admin_service.go b/backend/internal/service/admin_service.go index 4288381c..f0cb0671 100644 --- a/backend/internal/service/admin_service.go +++ b/backend/internal/service/admin_service.go @@ -42,6 +42,7 @@ type AdminService interface { DeleteAccount(ctx context.Context, id int64) error RefreshAccountCredentials(ctx context.Context, id int64) (*Account, error) ClearAccountError(ctx context.Context, id int64) (*Account, error) + SetAccountError(ctx context.Context, id int64, errorMsg string) error SetAccountSchedulable(ctx context.Context, id int64, schedulable bool) (*Account, error) BulkUpdateAccounts(ctx context.Context, input *BulkUpdateAccountsInput) (*BulkUpdateAccountsResult, error) @@ -991,6 +992,10 @@ func (s *adminServiceImpl) ClearAccountError(ctx context.Context, id int64) (*Ac return account, nil } +func (s *adminServiceImpl) SetAccountError(ctx context.Context, id int64, errorMsg string) error { + return s.accountRepo.SetError(ctx, id, errorMsg) +} + func (s *adminServiceImpl) SetAccountSchedulable(ctx context.Context, id int64, schedulable bool) (*Account, error) { if err := s.accountRepo.SetSchedulable(ctx, id, schedulable); err != nil { return nil, err diff --git a/backend/internal/service/antigravity_oauth_service.go b/backend/internal/service/antigravity_oauth_service.go index 3cf87b9d..52293cd5 100644 --- a/backend/internal/service/antigravity_oauth_service.go +++ b/backend/internal/service/antigravity_oauth_service.go @@ -82,13 +82,14 @@ type AntigravityExchangeCodeInput struct { // AntigravityTokenInfo token 信息 type AntigravityTokenInfo struct { - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - ExpiresIn int64 `json:"expires_in"` - ExpiresAt int64 `json:"expires_at"` - TokenType string `json:"token_type"` - Email string `json:"email,omitempty"` - ProjectID string `json:"project_id,omitempty"` + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + ExpiresIn int64 `json:"expires_in"` + ExpiresAt int64 `json:"expires_at"` + TokenType string `json:"token_type"` + Email string `json:"email,omitempty"` + ProjectID string `json:"project_id,omitempty"` + ProjectIDMissing bool `json:"-"` // LoadCodeAssist 未返回 project_id } // ExchangeCode 用 authorization code 交换 token @@ -236,16 +237,15 @@ func (s *AntigravityOAuthService) RefreshAccountToken(ctx context.Context, accou tokenInfo.Email = existingEmail } - // 每次刷新都调用 LoadCodeAssist 更新 project_id + // 每次刷新都调用 LoadCodeAssist 获取 project_id client := antigravity.NewClient(proxyURL) loadResp, _, err := client.LoadCodeAssist(ctx, tokenInfo.AccessToken) - if err != nil { - // 失败时保留原有的 project_id + if err != nil || loadResp == nil || loadResp.CloudAICompanionProject == "" { + // LoadCodeAssist 失败或返回空,保留原有 project_id,标记缺失 existingProjectID := strings.TrimSpace(account.GetCredential("project_id")) - if existingProjectID != "" { - tokenInfo.ProjectID = existingProjectID - } - } else if loadResp != nil && loadResp.CloudAICompanionProject != "" { + tokenInfo.ProjectID = existingProjectID + tokenInfo.ProjectIDMissing = true + } else { tokenInfo.ProjectID = loadResp.CloudAICompanionProject } diff --git a/backend/internal/service/antigravity_token_refresher.go b/backend/internal/service/antigravity_token_refresher.go index 9dd4463f..a07c86e6 100644 --- a/backend/internal/service/antigravity_token_refresher.go +++ b/backend/internal/service/antigravity_token_refresher.go @@ -61,5 +61,10 @@ func (r *AntigravityTokenRefresher) Refresh(ctx context.Context, account *Accoun } } + // 如果 project_id 获取失败,返回 credentials 但同时返回错误让账户被标记 + if tokenInfo.ProjectIDMissing { + return newCredentials, fmt.Errorf("missing_project_id: 账户缺少project id,可能无法使用Antigravity") + } + return newCredentials, nil } diff --git a/backend/internal/service/token_refresh_service.go b/backend/internal/service/token_refresh_service.go index 3ed35f04..29ff142f 100644 --- a/backend/internal/service/token_refresh_service.go +++ b/backend/internal/service/token_refresh_service.go @@ -163,12 +163,16 @@ func (s *TokenRefreshService) refreshWithRetry(ctx context.Context, account *Acc for attempt := 1; attempt <= s.cfg.MaxRetries; attempt++ { newCredentials, err := refresher.Refresh(ctx, account) - if err == nil { - // 刷新成功,更新账号credentials + + // 如果有新凭证,先更新(即使有错误也要保存 token) + if newCredentials != nil { account.Credentials = newCredentials - if err := s.accountRepo.Update(ctx, account); err != nil { - return fmt.Errorf("failed to save credentials: %w", err) + if saveErr := s.accountRepo.Update(ctx, account); saveErr != nil { + return fmt.Errorf("failed to save credentials: %w", saveErr) } + } + + if err == nil { return nil } @@ -219,6 +223,7 @@ func isNonRetryableRefreshError(err error) bool { "invalid_client", // 客户端配置错误 "unauthorized_client", // 客户端未授权 "access_denied", // 访问被拒绝 + "missing_project_id", // 缺少 project_id } for _, needle := range nonRetryable { if strings.Contains(msg, needle) { From 821968903c4ccb4c6c8419c3b295e07b6bccad14 Mon Sep 17 00:00:00 2001 From: song Date: Fri, 16 Jan 2026 13:18:00 +0800 Subject: [PATCH 013/147] =?UTF-8?q?feat(antigravity):=20=E6=89=8B=E5=8A=A8?= =?UTF-8?q?=E5=88=B7=E6=96=B0=E4=BB=A4=E7=89=8C=E6=97=B6=E8=87=AA=E5=8A=A8?= =?UTF-8?q?=E6=81=A2=E5=A4=8D=20missing=5Fproject=5Fid=20=E9=94=99?= =?UTF-8?q?=E8=AF=AF=E8=B4=A6=E6=88=B7=E7=8A=B6=E6=80=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 当手动刷新成功获取到 project_id,且之前错误为 missing_project_id 时,自动清除错误状态 - 后台自动刷新时同样支持状态恢复 --- backend/internal/handler/admin/account_handler.go | 10 +++++++++- backend/internal/repository/account_repo.go | 9 +++++++++ backend/internal/service/account_service.go | 1 + backend/internal/service/token_refresh_service.go | 10 ++++++++++ 4 files changed, 29 insertions(+), 1 deletion(-) diff --git a/backend/internal/handler/admin/account_handler.go b/backend/internal/handler/admin/account_handler.go index 97206b15..55311b3a 100644 --- a/backend/internal/handler/admin/account_handler.go +++ b/backend/internal/handler/admin/account_handler.go @@ -462,7 +462,7 @@ func (h *AccountHandler) Refresh(c *gin.Context) { return } // 标记账户为 error - if setErr := h.adminService.SetAccountError(c.Request.Context(), accountID, "账户缺少project id,可能无法使用Antigravity"); setErr != nil { + if setErr := h.adminService.SetAccountError(c.Request.Context(), accountID, "missing_project_id: 账户缺少project id,可能无法使用Antigravity"); setErr != nil { response.InternalError(c, "Failed to set account error: "+setErr.Error()) return } @@ -472,6 +472,14 @@ func (h *AccountHandler) Refresh(c *gin.Context) { }) return } + + // 成功获取到 project_id,如果之前是 missing_project_id 错误则清除 + if account.Status == service.StatusError && strings.HasPrefix(account.ErrorMessage, "missing_project_id:") { + if _, clearErr := h.adminService.ClearAccountError(c.Request.Context(), accountID); clearErr != nil { + response.InternalError(c, "Failed to clear account error: "+clearErr.Error()) + return + } + } } else { // Use Anthropic/Claude OAuth service to refresh token tokenInfo, err := h.oauthService.RefreshAccountToken(c.Request.Context(), account) diff --git a/backend/internal/repository/account_repo.go b/backend/internal/repository/account_repo.go index 04ca7052..0acf1636 100644 --- a/backend/internal/repository/account_repo.go +++ b/backend/internal/repository/account_repo.go @@ -491,6 +491,15 @@ func (r *accountRepository) SetError(ctx context.Context, id int64, errorMsg str return err } +func (r *accountRepository) ClearError(ctx context.Context, id int64) error { + _, err := r.client.Account.Update(). + Where(dbaccount.IDEQ(id)). + SetStatus(service.StatusActive). + SetErrorMessage(""). + Save(ctx) + return err +} + func (r *accountRepository) AddToGroup(ctx context.Context, accountID, groupID int64, priority int) error { _, err := r.client.AccountGroup.Create(). SetAccountID(accountID). diff --git a/backend/internal/service/account_service.go b/backend/internal/service/account_service.go index 2f138b81..93a36c3e 100644 --- a/backend/internal/service/account_service.go +++ b/backend/internal/service/account_service.go @@ -37,6 +37,7 @@ type AccountRepository interface { UpdateLastUsed(ctx context.Context, id int64) error BatchUpdateLastUsed(ctx context.Context, updates map[int64]time.Time) error SetError(ctx context.Context, id int64, errorMsg string) error + ClearError(ctx context.Context, id int64) error SetSchedulable(ctx context.Context, id int64, schedulable bool) error AutoPauseExpiredAccounts(ctx context.Context, now time.Time) (int64, error) BindGroups(ctx context.Context, accountID int64, groupIDs []int64) error diff --git a/backend/internal/service/token_refresh_service.go b/backend/internal/service/token_refresh_service.go index 29ff142f..6e405efb 100644 --- a/backend/internal/service/token_refresh_service.go +++ b/backend/internal/service/token_refresh_service.go @@ -173,6 +173,16 @@ func (s *TokenRefreshService) refreshWithRetry(ctx context.Context, account *Acc } if err == nil { + // Antigravity 账户:如果之前是因为缺少 project_id 而标记为 error,现在成功获取到了,清除错误状态 + if account.Platform == PlatformAntigravity && + account.Status == StatusError && + strings.HasPrefix(account.ErrorMessage, "missing_project_id:") { + if clearErr := s.accountRepo.ClearError(ctx, account.ID); clearErr != nil { + log.Printf("[TokenRefresh] Failed to clear error status for account %d: %v", account.ID, clearErr) + } else { + log.Printf("[TokenRefresh] Account %d: cleared missing_project_id error", account.ID) + } + } return nil } From 455576300c047cb113b8975a520f23ad677a2a64 Mon Sep 17 00:00:00 2001 From: song Date: Fri, 16 Jan 2026 14:03:25 +0800 Subject: [PATCH 014/147] =?UTF-8?q?fix(antigravity):=20=E4=BD=BF=E7=94=A8?= =?UTF-8?q?=20Contains=20=E5=8C=B9=E9=85=8D=20missing=5Fproject=5Fid=20?= =?UTF-8?q?=E9=94=99=E8=AF=AF=E4=BF=A1=E6=81=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/handler/admin/account_handler.go | 2 +- backend/internal/service/token_refresh_service.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/backend/internal/handler/admin/account_handler.go b/backend/internal/handler/admin/account_handler.go index 55311b3a..15ce8960 100644 --- a/backend/internal/handler/admin/account_handler.go +++ b/backend/internal/handler/admin/account_handler.go @@ -474,7 +474,7 @@ func (h *AccountHandler) Refresh(c *gin.Context) { } // 成功获取到 project_id,如果之前是 missing_project_id 错误则清除 - if account.Status == service.StatusError && strings.HasPrefix(account.ErrorMessage, "missing_project_id:") { + if account.Status == service.StatusError && strings.Contains(account.ErrorMessage, "missing_project_id:") { if _, clearErr := h.adminService.ClearAccountError(c.Request.Context(), accountID); clearErr != nil { response.InternalError(c, "Failed to clear account error: "+clearErr.Error()) return diff --git a/backend/internal/service/token_refresh_service.go b/backend/internal/service/token_refresh_service.go index 6e405efb..4ae4bec8 100644 --- a/backend/internal/service/token_refresh_service.go +++ b/backend/internal/service/token_refresh_service.go @@ -176,7 +176,7 @@ func (s *TokenRefreshService) refreshWithRetry(ctx context.Context, account *Acc // Antigravity 账户:如果之前是因为缺少 project_id 而标记为 error,现在成功获取到了,清除错误状态 if account.Platform == PlatformAntigravity && account.Status == StatusError && - strings.HasPrefix(account.ErrorMessage, "missing_project_id:") { + strings.Contains(account.ErrorMessage, "missing_project_id:") { if clearErr := s.accountRepo.ClearError(ctx, account.ID); clearErr != nil { log.Printf("[TokenRefresh] Failed to clear error status for account %d: %v", account.ID, clearErr) } else { From fba3d21a351e11899105bd50808a966f8a3255ce Mon Sep 17 00:00:00 2001 From: song Date: Fri, 16 Jan 2026 14:18:12 +0800 Subject: [PATCH 015/147] =?UTF-8?q?fix:=20=E4=BD=BF=E7=94=A8=20Contains=20?= =?UTF-8?q?=E5=8C=B9=E9=85=8D=20missing=5Fproject=5Fid=20=E5=B9=B6?= =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E6=B5=8B=E8=AF=95=20mock?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/service/account_service_delete_test.go | 4 ++++ backend/internal/service/gateway_multiplatform_test.go | 3 +++ backend/internal/service/gemini_multiplatform_test.go | 3 +++ 3 files changed, 10 insertions(+) diff --git a/backend/internal/service/account_service_delete_test.go b/backend/internal/service/account_service_delete_test.go index 6923067d..fe89b47f 100644 --- a/backend/internal/service/account_service_delete_test.go +++ b/backend/internal/service/account_service_delete_test.go @@ -99,6 +99,10 @@ func (s *accountRepoStub) SetError(ctx context.Context, id int64, errorMsg strin panic("unexpected SetError call") } +func (s *accountRepoStub) ClearError(ctx context.Context, id int64) error { + panic("unexpected ClearError call") +} + func (s *accountRepoStub) SetSchedulable(ctx context.Context, id int64, schedulable bool) error { panic("unexpected SetSchedulable call") } diff --git a/backend/internal/service/gateway_multiplatform_test.go b/backend/internal/service/gateway_multiplatform_test.go index da7c311c..0039ac1d 100644 --- a/backend/internal/service/gateway_multiplatform_test.go +++ b/backend/internal/service/gateway_multiplatform_test.go @@ -102,6 +102,9 @@ func (m *mockAccountRepoForPlatform) BatchUpdateLastUsed(ctx context.Context, up func (m *mockAccountRepoForPlatform) SetError(ctx context.Context, id int64, errorMsg string) error { return nil } +func (m *mockAccountRepoForPlatform) ClearError(ctx context.Context, id int64) error { + return nil +} func (m *mockAccountRepoForPlatform) SetSchedulable(ctx context.Context, id int64, schedulable bool) error { return nil } diff --git a/backend/internal/service/gemini_multiplatform_test.go b/backend/internal/service/gemini_multiplatform_test.go index d9df5f4c..15e84040 100644 --- a/backend/internal/service/gemini_multiplatform_test.go +++ b/backend/internal/service/gemini_multiplatform_test.go @@ -87,6 +87,9 @@ func (m *mockAccountRepoForGemini) BatchUpdateLastUsed(ctx context.Context, upda func (m *mockAccountRepoForGemini) SetError(ctx context.Context, id int64, errorMsg string) error { return nil } +func (m *mockAccountRepoForGemini) ClearError(ctx context.Context, id int64) error { + return nil +} func (m *mockAccountRepoForGemini) SetSchedulable(ctx context.Context, id int64, schedulable bool) error { return nil } From cc892744bc4ae2e57df7d37f571d70a6e015dc78 Mon Sep 17 00:00:00 2001 From: song Date: Fri, 16 Jan 2026 18:09:34 +0800 Subject: [PATCH 016/147] =?UTF-8?q?fix(antigravity):=20429=20fallback=20?= =?UTF-8?q?=E6=94=B9=E4=B8=BA=205=20=E5=88=86=E9=92=9F=E5=B9=B6=E9=99=90?= =?UTF-8?q?=E6=B5=81=E6=95=B4=E4=B8=AA=E8=B4=A6=E6=88=B7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - fallback 时间从 1 分钟改为 5 分钟 - fallback 时直接限流整个账户而非仅限制 quota scope --- .../service/antigravity_gateway_service.go | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 5ef2afd9..716fa3c4 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -1328,18 +1328,12 @@ func (s *AntigravityGatewayService) handleUpstreamError(ctx context.Context, pre if statusCode == 429 { resetAt := ParseGeminiRateLimitResetTime(body) if resetAt == nil { - // 解析失败:Gemini 有重试时间用 5 分钟,Claude 没有用 1 分钟 - defaultDur := 1 * time.Minute - if bytes.Contains(body, []byte("Please retry in")) || bytes.Contains(body, []byte("retryDelay")) { - defaultDur = 5 * time.Minute - } + // 解析失败:默认 5 分钟,直接限流整个账户 + defaultDur := 5 * time.Minute ra := time.Now().Add(defaultDur) - log.Printf("%s status=429 rate_limited scope=%s reset_in=%v (fallback)", prefix, quotaScope, defaultDur) - if quotaScope == "" { - return - } - if err := s.accountRepo.SetAntigravityQuotaScopeLimit(ctx, account.ID, quotaScope, ra); err != nil { - log.Printf("%s status=429 rate_limit_set_failed scope=%s error=%v", prefix, quotaScope, err) + log.Printf("%s status=429 rate_limited account=%d reset_in=%v (fallback)", prefix, account.ID, defaultDur) + if err := s.accountRepo.SetRateLimited(ctx, account.ID, ra); err != nil { + log.Printf("%s status=429 rate_limit_set_failed account=%d error=%v", prefix, account.ID, err) } return } From 2055a60bcbbb13fa4fafc23b7a8205eab1775f34 Mon Sep 17 00:00:00 2001 From: song Date: Fri, 16 Jan 2026 18:51:07 +0800 Subject: [PATCH 017/147] =?UTF-8?q?fix(antigravity):=20429=20=E9=87=8D?= =?UTF-8?q?=E8=AF=953=E6=AC=A1=E5=90=8E=E9=99=90=E6=B5=81=E8=B4=A6?= =?UTF-8?q?=E6=88=B7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 收到429后重试最多3次(指数退避) - 3次都失败后调用 handleUpstreamError 限流账户 - 移除无效的 URL fallback 逻辑(当前只有一个URL) --- .../service/antigravity_gateway_service.go | 48 +++++++++++++++---- 1 file changed, 38 insertions(+), 10 deletions(-) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 716fa3c4..347877ee 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -587,13 +587,27 @@ urlFallbackLoop: return nil, s.writeClaudeError(c, http.StatusBadGateway, "upstream_error", "Upstream request failed after retries") } - // 检查是否应触发 URL 降级(仅 429) - if resp.StatusCode == http.StatusTooManyRequests && urlIdx < len(availableURLs)-1 { + // 429 重试3次后限流账户 + if resp.StatusCode == http.StatusTooManyRequests { respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) _ = resp.Body.Close() - antigravity.DefaultURLAvailability.MarkUnavailable(baseURL) - log.Printf("%s URL fallback (HTTP 429): %s -> %s body=%s", prefix, baseURL, availableURLs[urlIdx+1], truncateForLog(respBody, 200)) - continue urlFallbackLoop + + if attempt < 3 { + log.Printf("%s status=429 retry=%d/3 body=%s", prefix, attempt, truncateForLog(respBody, 200)) + if !sleepAntigravityBackoffWithContext(ctx, attempt) { + return nil, ctx.Err() + } + continue + } + // 3次重试都失败,限流账户 + s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope) + log.Printf("%s status=429 rate_limited body=%s", prefix, truncateForLog(respBody, 200)) + resp = &http.Response{ + StatusCode: resp.StatusCode, + Header: resp.Header.Clone(), + Body: io.NopCloser(bytes.NewReader(respBody)), + } + break urlFallbackLoop } if resp.StatusCode >= 400 && s.shouldRetryUpstreamError(resp.StatusCode) { @@ -1131,13 +1145,27 @@ urlFallbackLoop: return nil, s.writeGoogleError(c, http.StatusBadGateway, "Upstream request failed after retries") } - // 检查是否应触发 URL 降级(仅 429) - if resp.StatusCode == http.StatusTooManyRequests && urlIdx < len(availableURLs)-1 { + // 429 重试3次后限流账户 + if resp.StatusCode == http.StatusTooManyRequests { respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) _ = resp.Body.Close() - antigravity.DefaultURLAvailability.MarkUnavailable(baseURL) - log.Printf("%s URL fallback (HTTP 429): %s -> %s body=%s", prefix, baseURL, availableURLs[urlIdx+1], truncateForLog(respBody, 200)) - continue urlFallbackLoop + + if attempt < 3 { + log.Printf("%s status=429 retry=%d/3 body=%s", prefix, attempt, truncateForLog(respBody, 200)) + if !sleepAntigravityBackoffWithContext(ctx, attempt) { + return nil, ctx.Err() + } + continue + } + // 3次重试都失败,限流账户 + s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope) + log.Printf("%s status=429 rate_limited body=%s", prefix, truncateForLog(respBody, 200)) + resp = &http.Response{ + StatusCode: resp.StatusCode, + Header: resp.Header.Clone(), + Body: io.NopCloser(bytes.NewReader(respBody)), + } + break urlFallbackLoop } if resp.StatusCode >= 400 && s.shouldRetryUpstreamError(resp.StatusCode) { From 34d6b0a6016a57e71275d6eb96c2427325873245 Mon Sep 17 00:00:00 2001 From: song Date: Fri, 16 Jan 2026 20:18:30 +0800 Subject: [PATCH 018/147] =?UTF-8?q?feat(gateway):=20=E8=B4=A6=E6=88=B7?= =?UTF-8?q?=E5=88=87=E6=8D=A2=E6=AC=A1=E6=95=B0=E5=92=8C=20Antigravity=20?= =?UTF-8?q?=E9=99=90=E6=B5=81=E6=97=B6=E9=97=B4=E5=8F=AF=E9=85=8D=E7=BD=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - gateway.max_account_switches: 账户切换最大次数,默认 10 - gateway.max_account_switches_gemini: Gemini 账户切换次数,默认 3 - gateway.antigravity_fallback_cooldown_minutes: Antigravity 429 fallback 限流时间,默认 5 分钟 - Antigravity 429 不再重试,直接标记账户限流 --- backend/internal/config/config.go | 11 ++++++ backend/internal/handler/gateway_handler.go | 16 +++++++-- .../internal/handler/gemini_v1beta_handler.go | 2 +- .../handler/openai_gateway_handler.go | 8 ++++- .../service/antigravity_gateway_service.go | 36 +++++-------------- 5 files changed, 41 insertions(+), 32 deletions(-) diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index 2cc11967..b2105bc6 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -228,6 +228,14 @@ type GatewayConfig struct { // 是否允许对部分 400 错误触发 failover(默认关闭以避免改变语义) FailoverOn400 bool `mapstructure:"failover_on_400"` + // 账户切换最大次数(遇到上游错误时切换到其他账户的次数上限) + MaxAccountSwitches int `mapstructure:"max_account_switches"` + // Gemini 账户切换最大次数(Gemini 平台单独配置,因 API 限制更严格) + MaxAccountSwitchesGemini int `mapstructure:"max_account_switches_gemini"` + + // Antigravity 429 fallback 限流时间(分钟),解析重置时间失败时使用 + AntigravityFallbackCooldownMinutes int `mapstructure:"antigravity_fallback_cooldown_minutes"` + // Scheduling: 账号调度相关配置 Scheduling GatewaySchedulingConfig `mapstructure:"scheduling"` } @@ -661,6 +669,9 @@ func setDefaults() { viper.SetDefault("gateway.log_upstream_error_body_max_bytes", 2048) viper.SetDefault("gateway.inject_beta_for_apikey", false) viper.SetDefault("gateway.failover_on_400", false) + viper.SetDefault("gateway.max_account_switches", 10) + viper.SetDefault("gateway.max_account_switches_gemini", 3) + viper.SetDefault("gateway.antigravity_fallback_cooldown_minutes", 5) viper.SetDefault("gateway.max_body_size", int64(100*1024*1024)) viper.SetDefault("gateway.connection_pool_isolation", ConnectionPoolIsolationAccountProxy) // HTTP 上游连接池配置(针对 5000+ 并发用户优化) diff --git a/backend/internal/handler/gateway_handler.go b/backend/internal/handler/gateway_handler.go index 48a827f3..2cad9c40 100644 --- a/backend/internal/handler/gateway_handler.go +++ b/backend/internal/handler/gateway_handler.go @@ -30,6 +30,8 @@ type GatewayHandler struct { userService *service.UserService billingCacheService *service.BillingCacheService concurrencyHelper *ConcurrencyHelper + maxAccountSwitches int + maxAccountSwitchesGemini int } // NewGatewayHandler creates a new GatewayHandler @@ -43,8 +45,16 @@ func NewGatewayHandler( cfg *config.Config, ) *GatewayHandler { pingInterval := time.Duration(0) + maxAccountSwitches := 10 + maxAccountSwitchesGemini := 3 if cfg != nil { pingInterval = time.Duration(cfg.Concurrency.PingInterval) * time.Second + if cfg.Gateway.MaxAccountSwitches > 0 { + maxAccountSwitches = cfg.Gateway.MaxAccountSwitches + } + if cfg.Gateway.MaxAccountSwitchesGemini > 0 { + maxAccountSwitchesGemini = cfg.Gateway.MaxAccountSwitchesGemini + } } return &GatewayHandler{ gatewayService: gatewayService, @@ -53,6 +63,8 @@ func NewGatewayHandler( userService: userService, billingCacheService: billingCacheService, concurrencyHelper: NewConcurrencyHelper(concurrencyService, SSEPingFormatClaude, pingInterval), + maxAccountSwitches: maxAccountSwitches, + maxAccountSwitchesGemini: maxAccountSwitchesGemini, } } @@ -164,7 +176,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { } if platform == service.PlatformGemini { - const maxAccountSwitches = 3 + maxAccountSwitches := h.maxAccountSwitchesGemini switchCount := 0 failedAccountIDs := make(map[int64]struct{}) lastFailoverStatus := 0 @@ -291,7 +303,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { } } - const maxAccountSwitches = 10 + maxAccountSwitches := h.maxAccountSwitches switchCount := 0 failedAccountIDs := make(map[int64]struct{}) lastFailoverStatus := 0 diff --git a/backend/internal/handler/gemini_v1beta_handler.go b/backend/internal/handler/gemini_v1beta_handler.go index 0cbe44f2..9909fa90 100644 --- a/backend/internal/handler/gemini_v1beta_handler.go +++ b/backend/internal/handler/gemini_v1beta_handler.go @@ -212,7 +212,7 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { if sessionHash != "" { sessionKey = "gemini:" + sessionHash } - const maxAccountSwitches = 3 + maxAccountSwitches := h.maxAccountSwitchesGemini switchCount := 0 failedAccountIDs := make(map[int64]struct{}) lastFailoverStatus := 0 diff --git a/backend/internal/handler/openai_gateway_handler.go b/backend/internal/handler/openai_gateway_handler.go index 70131417..334d1368 100644 --- a/backend/internal/handler/openai_gateway_handler.go +++ b/backend/internal/handler/openai_gateway_handler.go @@ -23,6 +23,7 @@ type OpenAIGatewayHandler struct { gatewayService *service.OpenAIGatewayService billingCacheService *service.BillingCacheService concurrencyHelper *ConcurrencyHelper + maxAccountSwitches int } // NewOpenAIGatewayHandler creates a new OpenAIGatewayHandler @@ -33,13 +34,18 @@ func NewOpenAIGatewayHandler( cfg *config.Config, ) *OpenAIGatewayHandler { pingInterval := time.Duration(0) + maxAccountSwitches := 3 if cfg != nil { pingInterval = time.Duration(cfg.Concurrency.PingInterval) * time.Second + if cfg.Gateway.MaxAccountSwitches > 0 { + maxAccountSwitches = cfg.Gateway.MaxAccountSwitches + } } return &OpenAIGatewayHandler{ gatewayService: gatewayService, billingCacheService: billingCacheService, concurrencyHelper: NewConcurrencyHelper(concurrencyService, SSEPingFormatComment, pingInterval), + maxAccountSwitches: maxAccountSwitches, } } @@ -147,7 +153,7 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { // Generate session hash (from header for OpenAI) sessionHash := h.gatewayService.GenerateSessionHash(c) - const maxAccountSwitches = 3 + maxAccountSwitches := h.maxAccountSwitches switchCount := 0 failedAccountIDs := make(map[int64]struct{}) lastFailoverStatus := 0 diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 347877ee..a0e845ee 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -587,19 +587,11 @@ urlFallbackLoop: return nil, s.writeClaudeError(c, http.StatusBadGateway, "upstream_error", "Upstream request failed after retries") } - // 429 重试3次后限流账户 + // 429 不重试,直接限流账户 if resp.StatusCode == http.StatusTooManyRequests { respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) _ = resp.Body.Close() - if attempt < 3 { - log.Printf("%s status=429 retry=%d/3 body=%s", prefix, attempt, truncateForLog(respBody, 200)) - if !sleepAntigravityBackoffWithContext(ctx, attempt) { - return nil, ctx.Err() - } - continue - } - // 3次重试都失败,限流账户 s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope) log.Printf("%s status=429 rate_limited body=%s", prefix, truncateForLog(respBody, 200)) resp = &http.Response{ @@ -622,10 +614,6 @@ urlFallbackLoop: } continue } - // 所有重试都失败,标记限流状态 - if resp.StatusCode == 429 { - s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope) - } // 最后一次尝试也失败 resp = &http.Response{ StatusCode: resp.StatusCode, @@ -1145,19 +1133,11 @@ urlFallbackLoop: return nil, s.writeGoogleError(c, http.StatusBadGateway, "Upstream request failed after retries") } - // 429 重试3次后限流账户 + // 429 不重试,直接限流账户 if resp.StatusCode == http.StatusTooManyRequests { respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) _ = resp.Body.Close() - if attempt < 3 { - log.Printf("%s status=429 retry=%d/3 body=%s", prefix, attempt, truncateForLog(respBody, 200)) - if !sleepAntigravityBackoffWithContext(ctx, attempt) { - return nil, ctx.Err() - } - continue - } - // 3次重试都失败,限流账户 s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope) log.Printf("%s status=429 rate_limited body=%s", prefix, truncateForLog(respBody, 200)) resp = &http.Response{ @@ -1180,10 +1160,6 @@ urlFallbackLoop: } continue } - // 所有重试都失败,标记限流状态 - if resp.StatusCode == 429 { - s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope) - } resp = &http.Response{ StatusCode: resp.StatusCode, Header: resp.Header.Clone(), @@ -1356,8 +1332,12 @@ func (s *AntigravityGatewayService) handleUpstreamError(ctx context.Context, pre if statusCode == 429 { resetAt := ParseGeminiRateLimitResetTime(body) if resetAt == nil { - // 解析失败:默认 5 分钟,直接限流整个账户 - defaultDur := 5 * time.Minute + // 解析失败:使用配置的 fallback 时间,直接限流整个账户 + fallbackMinutes := 5 + if s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.AntigravityFallbackCooldownMinutes > 0 { + fallbackMinutes = s.settingService.cfg.Gateway.AntigravityFallbackCooldownMinutes + } + defaultDur := time.Duration(fallbackMinutes) * time.Minute ra := time.Now().Add(defaultDur) log.Printf("%s status=429 rate_limited account=%d reset_in=%v (fallback)", prefix, account.ID, defaultDur) if err := s.accountRepo.SetRateLimited(ctx, account.ID, ra); err != nil { From 1be3eacad5e22469e97dfcd7c2ba2c92da5e77ce Mon Sep 17 00:00:00 2001 From: song Date: Fri, 16 Jan 2026 20:47:07 +0800 Subject: [PATCH 019/147] =?UTF-8?q?feat(scheduling):=20=E5=85=9C=E5=BA=95?= =?UTF-8?q?=E5=B1=82=E8=B4=A6=E6=88=B7=E9=80=89=E6=8B=A9=E7=AD=96=E7=95=A5?= =?UTF-8?q?=E5=8F=AF=E9=85=8D=E7=BD=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - gateway.scheduling.fallback_selection_mode: "last_used"(默认) 或 "random" - last_used: 按最后使用时间排序(轮询效果) - random: 同优先级内随机选择 --- backend/internal/config/config.go | 4 ++ backend/internal/service/gateway_service.go | 53 ++++++++++++++++++++- 2 files changed, 56 insertions(+), 1 deletion(-) diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index b2105bc6..dfa5e2f4 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -250,6 +250,9 @@ type GatewaySchedulingConfig struct { FallbackWaitTimeout time.Duration `mapstructure:"fallback_wait_timeout"` FallbackMaxWaiting int `mapstructure:"fallback_max_waiting"` + // 兜底层账户选择策略: "last_used"(按最后使用时间排序,默认) 或 "random"(随机) + FallbackSelectionMode string `mapstructure:"fallback_selection_mode"` + // 负载计算 LoadBatchEnabled bool `mapstructure:"load_batch_enabled"` @@ -689,6 +692,7 @@ func setDefaults() { viper.SetDefault("gateway.scheduling.sticky_session_wait_timeout", 45*time.Second) viper.SetDefault("gateway.scheduling.fallback_wait_timeout", 30*time.Second) viper.SetDefault("gateway.scheduling.fallback_max_waiting", 100) + viper.SetDefault("gateway.scheduling.fallback_selection_mode", "last_used") viper.SetDefault("gateway.scheduling.load_batch_enabled", true) viper.SetDefault("gateway.scheduling.slot_cleanup_interval", 30*time.Second) viper.SetDefault("concurrency.ping_interval", 10) diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 5871fddb..72343e2c 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -11,6 +11,7 @@ import ( "fmt" "io" "log" + mathrand "math/rand" "net/http" "regexp" "sort" @@ -605,7 +606,7 @@ func (s *GatewayService) SelectAccountWithLoadAwareness(ctx context.Context, gro } // ============ Layer 3: 兜底排队 ============ - sortAccountsByPriorityAndLastUsed(candidates, preferOAuth) + s.sortCandidatesForFallback(candidates, preferOAuth, cfg.FallbackSelectionMode) for _, acc := range candidates { return &AccountSelectionResult{ Account: acc, @@ -805,6 +806,56 @@ func sortAccountsByPriorityAndLastUsed(accounts []*Account, preferOAuth bool) { }) } +// sortCandidatesForFallback 根据配置选择排序策略 +// mode: "last_used"(按最后使用时间) 或 "random"(随机) +func (s *GatewayService) sortCandidatesForFallback(accounts []*Account, preferOAuth bool, mode string) { + if mode == "random" { + // 先按优先级排序,然后在同优先级内随机打乱 + sortAccountsByPriorityOnly(accounts, preferOAuth) + shuffleWithinPriority(accounts) + } else { + // 默认按最后使用时间排序 + sortAccountsByPriorityAndLastUsed(accounts, preferOAuth) + } +} + +// sortAccountsByPriorityOnly 仅按优先级排序 +func sortAccountsByPriorityOnly(accounts []*Account, preferOAuth bool) { + sort.SliceStable(accounts, func(i, j int) bool { + a, b := accounts[i], accounts[j] + if a.Priority != b.Priority { + return a.Priority < b.Priority + } + if preferOAuth && a.Type != b.Type { + return a.Type == AccountTypeOAuth + } + return false + }) +} + +// shuffleWithinPriority 在同优先级内随机打乱顺序 +func shuffleWithinPriority(accounts []*Account) { + if len(accounts) <= 1 { + return + } + r := mathrand.New(mathrand.NewSource(time.Now().UnixNano())) + start := 0 + for start < len(accounts) { + priority := accounts[start].Priority + end := start + 1 + for end < len(accounts) && accounts[end].Priority == priority { + end++ + } + // 对 [start, end) 范围内的账户随机打乱 + if end-start > 1 { + r.Shuffle(end-start, func(i, j int) { + accounts[start+i], accounts[start+j] = accounts[start+j], accounts[start+i] + }) + } + start = end + } +} + // selectAccountForModelWithPlatform 选择单平台账户(完全隔离) func (s *GatewayService) selectAccountForModelWithPlatform(ctx context.Context, groupID *int64, sessionHash string, requestedModel string, excludedIDs map[int64]struct{}, platform string) (*Account, error) { preferOAuth := platform == PlatformGemini From 28e46e0e7cd9337a89ce221d3afd8e27baf95168 Mon Sep 17 00:00:00 2001 From: IanShaw027 <131567472+IanShaw027@users.noreply.github.com> Date: Fri, 16 Jan 2026 23:47:42 +0800 Subject: [PATCH 020/147] =?UTF-8?q?fix(gemini):=20=E6=9B=B4=E6=96=B0=20Gem?= =?UTF-8?q?ini=20=E6=A8=A1=E5=9E=8B=E5=88=97=E8=A1=A8=E9=85=8D=E7=BD=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 移除已弃用的 1.5 系列模型 - 调整模型优先级顺序(2.0 Flash > 2.5 Flash > 2.5 Pro > 3.0 Preview) - 同步前后端模型配置 - 更新相关测试用例和默认模型选择逻辑 --- backend/internal/pkg/gemini/models.go | 11 +++----- backend/internal/pkg/geminicli/models.go | 4 +-- .../service/antigravity_model_mapping_test.go | 8 +++--- .../service/gemini_multiplatform_test.go | 2 +- backend/internal/service/pricing_service.go | 4 +-- .../components/account/AccountTestModal.vue | 5 +++- .../admin/account/AccountTestModal.vue | 5 +++- frontend/src/components/keys/UseKeyModal.vue | 26 ++++++++++++------- frontend/src/composables/useModelWhitelist.ts | 19 +++++++------- 9 files changed, 47 insertions(+), 37 deletions(-) diff --git a/backend/internal/pkg/gemini/models.go b/backend/internal/pkg/gemini/models.go index e251c8d8..424e8ddb 100644 --- a/backend/internal/pkg/gemini/models.go +++ b/backend/internal/pkg/gemini/models.go @@ -16,14 +16,11 @@ type ModelsListResponse struct { func DefaultModels() []Model { methods := []string{"generateContent", "streamGenerateContent"} return []Model{ - {Name: "models/gemini-3-pro-preview", SupportedGenerationMethods: methods}, - {Name: "models/gemini-3-flash-preview", SupportedGenerationMethods: methods}, - {Name: "models/gemini-2.5-pro", SupportedGenerationMethods: methods}, - {Name: "models/gemini-2.5-flash", SupportedGenerationMethods: methods}, {Name: "models/gemini-2.0-flash", SupportedGenerationMethods: methods}, - {Name: "models/gemini-1.5-pro", SupportedGenerationMethods: methods}, - {Name: "models/gemini-1.5-flash", SupportedGenerationMethods: methods}, - {Name: "models/gemini-1.5-flash-8b", SupportedGenerationMethods: methods}, + {Name: "models/gemini-2.5-flash", SupportedGenerationMethods: methods}, + {Name: "models/gemini-2.5-pro", SupportedGenerationMethods: methods}, + {Name: "models/gemini-3-flash-preview", SupportedGenerationMethods: methods}, + {Name: "models/gemini-3-pro-preview", SupportedGenerationMethods: methods}, } } diff --git a/backend/internal/pkg/geminicli/models.go b/backend/internal/pkg/geminicli/models.go index 922988c7..08e69886 100644 --- a/backend/internal/pkg/geminicli/models.go +++ b/backend/internal/pkg/geminicli/models.go @@ -12,10 +12,10 @@ type Model struct { // DefaultModels is the curated Gemini model list used by the admin UI "test account" flow. var DefaultModels = []Model{ {ID: "gemini-2.0-flash", Type: "model", DisplayName: "Gemini 2.0 Flash", CreatedAt: ""}, - {ID: "gemini-2.5-pro", Type: "model", DisplayName: "Gemini 2.5 Pro", CreatedAt: ""}, {ID: "gemini-2.5-flash", Type: "model", DisplayName: "Gemini 2.5 Flash", CreatedAt: ""}, - {ID: "gemini-3-pro-preview", Type: "model", DisplayName: "Gemini 3 Pro Preview", CreatedAt: ""}, + {ID: "gemini-2.5-pro", Type: "model", DisplayName: "Gemini 2.5 Pro", CreatedAt: ""}, {ID: "gemini-3-flash-preview", Type: "model", DisplayName: "Gemini 3 Flash Preview", CreatedAt: ""}, + {ID: "gemini-3-pro-preview", Type: "model", DisplayName: "Gemini 3 Pro Preview", CreatedAt: ""}, } // DefaultTestModel is the default model to preselect in test flows. diff --git a/backend/internal/service/antigravity_model_mapping_test.go b/backend/internal/service/antigravity_model_mapping_test.go index 39000e4f..179a3520 100644 --- a/backend/internal/service/antigravity_model_mapping_test.go +++ b/backend/internal/service/antigravity_model_mapping_test.go @@ -30,7 +30,7 @@ func TestIsAntigravityModelSupported(t *testing.T) { {"可映射 - claude-3-haiku-20240307", "claude-3-haiku-20240307", true}, // Gemini 前缀透传 - {"Gemini前缀 - gemini-1.5-pro", "gemini-1.5-pro", true}, + {"Gemini前缀 - gemini-2.5-pro", "gemini-2.5-pro", true}, {"Gemini前缀 - gemini-unknown-model", "gemini-unknown-model", true}, {"Gemini前缀 - gemini-future-version", "gemini-future-version", true}, @@ -142,10 +142,10 @@ func TestAntigravityGatewayService_GetMappedModel(t *testing.T) { expected: "gemini-2.5-flash", }, { - name: "Gemini透传 - gemini-1.5-pro", - requestedModel: "gemini-1.5-pro", + name: "Gemini透传 - gemini-2.5-pro", + requestedModel: "gemini-2.5-pro", accountMapping: nil, - expected: "gemini-1.5-pro", + expected: "gemini-2.5-pro", }, { name: "Gemini透传 - gemini-future-model", diff --git a/backend/internal/service/gemini_multiplatform_test.go b/backend/internal/service/gemini_multiplatform_test.go index 03f5d757..f2ea5859 100644 --- a/backend/internal/service/gemini_multiplatform_test.go +++ b/backend/internal/service/gemini_multiplatform_test.go @@ -599,7 +599,7 @@ func TestGeminiMessagesCompatService_isModelSupportedByAccount(t *testing.T) { name: "Gemini平台-有映射配置-只支持配置的模型", account: &Account{ Platform: PlatformGemini, - Credentials: map[string]any{"model_mapping": map[string]any{"gemini-1.5-pro": "x"}}, + Credentials: map[string]any{"model_mapping": map[string]any{"gemini-2.5-pro": "x"}}, }, model: "gemini-2.5-flash", expected: false, diff --git a/backend/internal/service/pricing_service.go b/backend/internal/service/pricing_service.go index 392fb65c..0ade72cd 100644 --- a/backend/internal/service/pricing_service.go +++ b/backend/internal/service/pricing_service.go @@ -531,8 +531,8 @@ func (s *PricingService) buildModelLookupCandidates(modelLower string) []string func normalizeModelNameForPricing(model string) string { // Common Gemini/VertexAI forms: // - models/gemini-2.0-flash-exp - // - publishers/google/models/gemini-1.5-pro - // - projects/.../locations/.../publishers/google/models/gemini-1.5-pro + // - publishers/google/models/gemini-2.5-pro + // - projects/.../locations/.../publishers/google/models/gemini-2.5-pro model = strings.TrimSpace(model) model = strings.TrimLeft(model, "/") model = strings.TrimPrefix(model, "models/") diff --git a/frontend/src/components/account/AccountTestModal.vue b/frontend/src/components/account/AccountTestModal.vue index 42f3c1b9..dfa1503e 100644 --- a/frontend/src/components/account/AccountTestModal.vue +++ b/frontend/src/components/account/AccountTestModal.vue @@ -292,8 +292,11 @@ const loadAvailableModels = async () => { if (availableModels.value.length > 0) { if (props.account.platform === 'gemini') { const preferred = + availableModels.value.find((m) => m.id === 'gemini-2.0-flash') || + availableModels.value.find((m) => m.id === 'gemini-2.5-flash') || availableModels.value.find((m) => m.id === 'gemini-2.5-pro') || - availableModels.value.find((m) => m.id === 'gemini-3-pro') + availableModels.value.find((m) => m.id === 'gemini-3-flash-preview') || + availableModels.value.find((m) => m.id === 'gemini-3-pro-preview') selectedModelId.value = preferred?.id || availableModels.value[0].id } else { // Try to select Sonnet as default, otherwise use first model diff --git a/frontend/src/components/admin/account/AccountTestModal.vue b/frontend/src/components/admin/account/AccountTestModal.vue index 2cb1c5a5..feb09654 100644 --- a/frontend/src/components/admin/account/AccountTestModal.vue +++ b/frontend/src/components/admin/account/AccountTestModal.vue @@ -232,8 +232,11 @@ const loadAvailableModels = async () => { if (availableModels.value.length > 0) { if (props.account.platform === 'gemini') { const preferred = + availableModels.value.find((m) => m.id === 'gemini-2.0-flash') || + availableModels.value.find((m) => m.id === 'gemini-2.5-flash') || availableModels.value.find((m) => m.id === 'gemini-2.5-pro') || - availableModels.value.find((m) => m.id === 'gemini-3-pro') + availableModels.value.find((m) => m.id === 'gemini-3-flash-preview') || + availableModels.value.find((m) => m.id === 'gemini-3-pro-preview') selectedModelId.value = preferred?.id || availableModels.value[0].id } else { // Try to select Sonnet as default, otherwise use first model diff --git a/frontend/src/components/keys/UseKeyModal.vue b/frontend/src/components/keys/UseKeyModal.vue index 8075ba70..7f9bd1ed 100644 --- a/frontend/src/components/keys/UseKeyModal.vue +++ b/frontend/src/components/keys/UseKeyModal.vue @@ -443,7 +443,7 @@ $env:ANTHROPIC_AUTH_TOKEN="${apiKey}"` } function generateGeminiCliContent(baseUrl: string, apiKey: string): FileConfig { - const model = 'gemini-2.5-pro' + const model = 'gemini-2.0-flash' const modelComment = t('keys.useKeyModal.gemini.modelComment') let path: string let content: string @@ -548,14 +548,22 @@ function generateOpenCodeConfig(platform: string, baseUrl: string, apiKey: strin } } const geminiModels = { - 'gemini-3-pro-high': { name: 'Gemini 3 Pro High' }, - 'gemini-3-pro-low': { name: 'Gemini 3 Pro Low' }, - 'gemini-3-pro-preview': { name: 'Gemini 3 Pro Preview' }, - 'gemini-3-pro-image': { name: 'Gemini 3 Pro Image' }, - 'gemini-3-flash': { name: 'Gemini 3 Flash' }, - 'gemini-2.5-flash-thinking': { name: 'Gemini 2.5 Flash Thinking' }, + 'gemini-2.0-flash': { name: 'Gemini 2.0 Flash' }, 'gemini-2.5-flash': { name: 'Gemini 2.5 Flash' }, - 'gemini-2.5-flash-lite': { name: 'Gemini 2.5 Flash Lite' } + 'gemini-2.5-pro': { name: 'Gemini 2.5 Pro' }, + 'gemini-3-flash-preview': { name: 'Gemini 3 Flash Preview' }, + 'gemini-3-pro-preview': { name: 'Gemini 3 Pro Preview' } + } + + const antigravityGeminiModels = { + 'gemini-2.5-flash': { name: 'Gemini 2.5 Flash' }, + 'gemini-2.5-flash-lite': { name: 'Gemini 2.5 Flash Lite' }, + 'gemini-2.5-flash-thinking': { name: 'Gemini 2.5 Flash Thinking' }, + 'gemini-3-flash': { name: 'Gemini 3 Flash' }, + 'gemini-3-pro-low': { name: 'Gemini 3 Pro Low' }, + 'gemini-3-pro-high': { name: 'Gemini 3 Pro High' }, + 'gemini-3-pro-preview': { name: 'Gemini 3 Pro Preview' }, + 'gemini-3-pro-image': { name: 'Gemini 3 Pro Image' } } const claudeModels = { 'claude-opus-4-5-thinking': { name: 'Claude Opus 4.5 Thinking' }, @@ -575,7 +583,7 @@ function generateOpenCodeConfig(platform: string, baseUrl: string, apiKey: strin } else if (platform === 'antigravity-gemini') { provider[platform].npm = '@ai-sdk/google' provider[platform].name = 'Antigravity (Gemini)' - provider[platform].models = geminiModels + provider[platform].models = antigravityGeminiModels } else if (platform === 'openai') { provider[platform].models = openaiModels } diff --git a/frontend/src/composables/useModelWhitelist.ts b/frontend/src/composables/useModelWhitelist.ts index 79900c6e..d4fa2993 100644 --- a/frontend/src/composables/useModelWhitelist.ts +++ b/frontend/src/composables/useModelWhitelist.ts @@ -43,13 +43,13 @@ export const claudeModels = [ // Google Gemini const geminiModels = [ - 'gemini-2.0-flash', 'gemini-2.0-flash-lite-preview', 'gemini-2.0-flash-exp', - 'gemini-2.0-pro-exp', 'gemini-2.0-flash-thinking-exp', - 'gemini-2.5-pro-exp-03-25', 'gemini-2.5-pro-preview-03-25', - 'gemini-3-pro-preview', - 'gemini-1.5-pro', 'gemini-1.5-pro-latest', - 'gemini-1.5-flash', 'gemini-1.5-flash-latest', 'gemini-1.5-flash-8b', - 'gemini-exp-1206' + // Keep in sync with backend curated Gemini lists. + // This list is intentionally conservative (models commonly available across OAuth/API key). + 'gemini-2.0-flash', + 'gemini-2.5-flash', + 'gemini-2.5-pro', + 'gemini-3-flash-preview', + 'gemini-3-pro-preview' ] // 智谱 GLM @@ -229,9 +229,8 @@ const openaiPresetMappings = [ const geminiPresetMappings = [ { label: 'Flash 2.0', from: 'gemini-2.0-flash', to: 'gemini-2.0-flash', color: 'bg-blue-100 text-blue-700 hover:bg-blue-200 dark:bg-blue-900/30 dark:text-blue-400' }, - { label: 'Flash Lite', from: 'gemini-2.0-flash-lite-preview', to: 'gemini-2.0-flash-lite-preview', color: 'bg-indigo-100 text-indigo-700 hover:bg-indigo-200 dark:bg-indigo-900/30 dark:text-indigo-400' }, - { label: '1.5 Pro', from: 'gemini-1.5-pro', to: 'gemini-1.5-pro', color: 'bg-purple-100 text-purple-700 hover:bg-purple-200 dark:bg-purple-900/30 dark:text-purple-400' }, - { label: '1.5 Flash', from: 'gemini-1.5-flash', to: 'gemini-1.5-flash', color: 'bg-emerald-100 text-emerald-700 hover:bg-emerald-200 dark:bg-emerald-900/30 dark:text-emerald-400' } + { label: '2.5 Flash', from: 'gemini-2.5-flash', to: 'gemini-2.5-flash', color: 'bg-indigo-100 text-indigo-700 hover:bg-indigo-200 dark:bg-indigo-900/30 dark:text-indigo-400' }, + { label: '2.5 Pro', from: 'gemini-2.5-pro', to: 'gemini-2.5-pro', color: 'bg-purple-100 text-purple-700 hover:bg-purple-200 dark:bg-purple-900/30 dark:text-purple-400' } ] // ===================== From cc0fca35ec26604e0ce5ff47235d8dfd32ef6be9 Mon Sep 17 00:00:00 2001 From: song Date: Sat, 17 Jan 2026 01:49:42 +0800 Subject: [PATCH 021/147] =?UTF-8?q?feat(antigravity):=20=E5=90=8C=E6=AD=A5?= =?UTF-8?q?=20Antigravity-Manager=20=E7=9A=84=E8=AF=B7=E6=B1=82=E9=80=BB?= =?UTF-8?q?=E8=BE=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - System Prompt: 改为简短版,添加 OpenCode 过滤、MCP XML 协议注入、SYSTEM_PROMPT_END 标记 - HTTP Headers: 只保留 Content-Type/Authorization/User-Agent,移除 Accept 和 Host - User-Agent: 改为 antigravity/1.11.9 windows/amd64 - requestType: 动态判断 (agent/web_search/image_gen) - BaseURLs: 添加 daily sandbox 备用 URL - Fallback: 扩展触发条件 (429/408/404/5xx) --- backend/internal/pkg/antigravity/client.go | 30 ++-------- backend/internal/pkg/antigravity/oauth.go | 9 +-- .../pkg/antigravity/request_transformer.go | 60 +++++++++++++++++-- 3 files changed, 66 insertions(+), 33 deletions(-) diff --git a/backend/internal/pkg/antigravity/client.go b/backend/internal/pkg/antigravity/client.go index 1248be95..454d3438 100644 --- a/backend/internal/pkg/antigravity/client.go +++ b/backend/internal/pkg/antigravity/client.go @@ -16,15 +16,6 @@ import ( "time" ) -// resolveHost 从 URL 解析 host -func resolveHost(urlStr string) string { - parsed, err := url.Parse(urlStr) - if err != nil { - return "" - } - return parsed.Host -} - // NewAPIRequestWithURL 使用指定的 base URL 创建 Antigravity API 请求(v1internal 端点) func NewAPIRequestWithURL(ctx context.Context, baseURL, action, accessToken string, body []byte) (*http.Request, error) { // 构建 URL,流式请求添加 ?alt=sse 参数 @@ -39,23 +30,11 @@ func NewAPIRequestWithURL(ctx context.Context, baseURL, action, accessToken stri return nil, err } - // 基础 Headers + // 基础 Headers(与 Antigravity-Manager 保持一致,只设置这 3 个) req.Header.Set("Content-Type", "application/json") req.Header.Set("Authorization", "Bearer "+accessToken) req.Header.Set("User-Agent", UserAgent) - // Accept Header 根据请求类型设置 - if isStream { - req.Header.Set("Accept", "text/event-stream") - } else { - req.Header.Set("Accept", "application/json") - } - - // 显式设置 Host Header - if host := resolveHost(apiURL); host != "" { - req.Host = host - } - return req, nil } @@ -195,12 +174,15 @@ func isConnectionError(err error) bool { } // shouldFallbackToNextURL 判断是否应切换到下一个 URL -// 仅连接错误和 HTTP 429 触发 URL 降级 +// 与 Antigravity-Manager 保持一致:连接错误、429、408、404、5xx 触发 URL 降级 func shouldFallbackToNextURL(err error, statusCode int) bool { if isConnectionError(err) { return true } - return statusCode == http.StatusTooManyRequests + return statusCode == http.StatusTooManyRequests || + statusCode == http.StatusRequestTimeout || + statusCode == http.StatusNotFound || + statusCode >= 500 } // ExchangeCode 用 authorization code 交换 token diff --git a/backend/internal/pkg/antigravity/oauth.go b/backend/internal/pkg/antigravity/oauth.go index debef3e9..9d4baa6c 100644 --- a/backend/internal/pkg/antigravity/oauth.go +++ b/backend/internal/pkg/antigravity/oauth.go @@ -32,8 +32,8 @@ const ( "https://www.googleapis.com/auth/cclog " + "https://www.googleapis.com/auth/experimentsandconfigs" - // User-Agent(模拟官方客户端) - UserAgent = "antigravity/1.104.0 darwin/arm64" + // User-Agent(与 Antigravity-Manager 保持一致) + UserAgent = "antigravity/1.11.9 windows/amd64" // Session 过期时间 SessionTTL = 30 * time.Minute @@ -42,9 +42,10 @@ const ( URLAvailabilityTTL = 5 * time.Minute ) -// BaseURLs 定义 Antigravity API 端点 +// BaseURLs 定义 Antigravity API 端点(与 Antigravity-Manager 保持一致) var BaseURLs = []string{ - "https://cloudcode-pa.googleapis.com", // prod + "https://cloudcode-pa.googleapis.com", // prod (优先) + "https://daily-cloudcode-pa.sandbox.googleapis.com", // daily sandbox (备用) } // BaseURL 默认 URL(保持向后兼容) diff --git a/backend/internal/pkg/antigravity/request_transformer.go b/backend/internal/pkg/antigravity/request_transformer.go index a6f72c22..9b703187 100644 --- a/backend/internal/pkg/antigravity/request_transformer.go +++ b/backend/internal/pkg/antigravity/request_transformer.go @@ -78,7 +78,7 @@ func TransformClaudeToGeminiWithOptions(claudeReq *ClaudeRequest, projectID, map } // 2. 构建 systemInstruction - systemInstruction := buildSystemInstruction(claudeReq.System, claudeReq.Model, opts) + systemInstruction := buildSystemInstruction(claudeReq.System, claudeReq.Model, opts, claudeReq.Tools) // 3. 构建 generationConfig reqForConfig := claudeReq @@ -154,8 +154,40 @@ func GetDefaultIdentityPatch() string { return antigravityIdentity } -// buildSystemInstruction 构建 systemInstruction -func buildSystemInstruction(system json.RawMessage, modelName string, opts TransformOptions) *GeminiContent { +// mcpXMLProtocol MCP XML 工具调用协议(与 Antigravity-Manager 保持一致) +const mcpXMLProtocol = ` +==== MCP XML 工具调用协议 (Workaround) ==== +当你需要调用名称以 ` + "`mcp__`" + ` 开头的 MCP 工具时: +1) 优先尝试 XML 格式调用:输出 ` + "`{\"arg\":\"value\"}`" + `。 +2) 必须直接输出 XML 块,无需 markdown 包装,内容为 JSON 格式的入参。 +3) 这种方式具有更高的连通性和容错性,适用于大型结果返回场景。 +===========================================` + +// hasMCPTools 检测是否有 mcp__ 前缀的工具 +func hasMCPTools(tools []ClaudeTool) bool { + for _, tool := range tools { + if strings.HasPrefix(tool.Name, "mcp__") { + return true + } + } + return false +} + +// filterOpenCodePrompt 过滤 OpenCode 默认提示词,只保留用户自定义指令 +func filterOpenCodePrompt(text string) string { + if !strings.Contains(text, "You are an interactive CLI tool") { + return text + } + // 提取 "Instructions from:" 及之后的部分 + if idx := strings.Index(text, "Instructions from:"); idx >= 0 { + return text[idx:] + } + // 如果没有自定义指令,返回空 + return "" +} + +// buildSystemInstruction 构建 systemInstruction(与 Antigravity-Manager 保持一致) +func buildSystemInstruction(system json.RawMessage, modelName string, opts TransformOptions, tools []ClaudeTool) *GeminiContent { var parts []GeminiPart // 先解析用户的 system prompt,检测是否已包含 Antigravity identity @@ -167,10 +199,14 @@ func buildSystemInstruction(system json.RawMessage, modelName string, opts Trans var sysStr string if err := json.Unmarshal(system, &sysStr); err == nil { if strings.TrimSpace(sysStr) != "" { - userSystemParts = append(userSystemParts, GeminiPart{Text: sysStr}) if strings.Contains(sysStr, "You are Antigravity") { userHasAntigravityIdentity = true } + // 过滤 OpenCode 默认提示词 + filtered := filterOpenCodePrompt(sysStr) + if filtered != "" { + userSystemParts = append(userSystemParts, GeminiPart{Text: filtered}) + } } } else { // 尝试解析为数组 @@ -178,10 +214,14 @@ func buildSystemInstruction(system json.RawMessage, modelName string, opts Trans if err := json.Unmarshal(system, &sysBlocks); err == nil { for _, block := range sysBlocks { if block.Type == "text" && strings.TrimSpace(block.Text) != "" { - userSystemParts = append(userSystemParts, GeminiPart{Text: block.Text}) if strings.Contains(block.Text, "You are Antigravity") { userHasAntigravityIdentity = true } + // 过滤 OpenCode 默认提示词 + filtered := filterOpenCodePrompt(block.Text) + if filtered != "" { + userSystemParts = append(userSystemParts, GeminiPart{Text: filtered}) + } } } } @@ -200,6 +240,16 @@ func buildSystemInstruction(system json.RawMessage, modelName string, opts Trans // 添加用户的 system prompt parts = append(parts, userSystemParts...) + // 检测是否有 MCP 工具,如有则注入 XML 调用协议 + if hasMCPTools(tools) { + parts = append(parts, GeminiPart{Text: mcpXMLProtocol}) + } + + // 如果用户没有提供 Antigravity 身份,添加结束标记 + if !userHasAntigravityIdentity { + parts = append(parts, GeminiPart{Text: "\n--- [SYSTEM_PROMPT_END] ---"}) + } + if len(parts) == 0 { return nil } From a7165b0f73b86c750c8037d4d5ec41dfa1f461d8 Mon Sep 17 00:00:00 2001 From: IanShaw027 <131567472+IanShaw027@users.noreply.github.com> Date: Sat, 17 Jan 2026 01:53:51 +0800 Subject: [PATCH 022/147] =?UTF-8?q?fix(group):=20SIMPLE=20=E6=A8=A1?= =?UTF-8?q?=E5=BC=8F=E5=90=AF=E5=8A=A8=E8=A1=A5=E9=BD=90=E9=BB=98=E8=AE=A4?= =?UTF-8?q?=E5=88=86=E7=BB=84?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/repository/ent.go | 13 +++ .../repository/simple_mode_default_groups.go | 82 ++++++++++++++++++ ...le_mode_default_groups_integration_test.go | 84 +++++++++++++++++++ 3 files changed, 179 insertions(+) create mode 100644 backend/internal/repository/simple_mode_default_groups.go create mode 100644 backend/internal/repository/simple_mode_default_groups_integration_test.go diff --git a/backend/internal/repository/ent.go b/backend/internal/repository/ent.go index 8005f114..d7d574e8 100644 --- a/backend/internal/repository/ent.go +++ b/backend/internal/repository/ent.go @@ -65,5 +65,18 @@ func InitEnt(cfg *config.Config) (*ent.Client, *sql.DB, error) { // 创建 Ent 客户端,绑定到已配置的数据库驱动。 client := ent.NewClient(ent.Driver(drv)) + + // SIMPLE 模式:启动时补齐各平台默认分组。 + // - anthropic/openai/gemini: 确保存在 -default + // - antigravity: 仅要求存在 >=2 个未软删除分组(用于 claude/gemini 混合调度场景) + if cfg.RunMode == config.RunModeSimple { + seedCtx, seedCancel := context.WithTimeout(context.Background(), 30*time.Second) + defer seedCancel() + if err := ensureSimpleModeDefaultGroups(seedCtx, client); err != nil { + _ = client.Close() + return nil, nil, err + } + } + return client, drv.DB(), nil } diff --git a/backend/internal/repository/simple_mode_default_groups.go b/backend/internal/repository/simple_mode_default_groups.go new file mode 100644 index 00000000..56309184 --- /dev/null +++ b/backend/internal/repository/simple_mode_default_groups.go @@ -0,0 +1,82 @@ +package repository + +import ( + "context" + "fmt" + + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/internal/service" +) + +func ensureSimpleModeDefaultGroups(ctx context.Context, client *dbent.Client) error { + if client == nil { + return fmt.Errorf("nil ent client") + } + + requiredByPlatform := map[string]int{ + service.PlatformAnthropic: 1, + service.PlatformOpenAI: 1, + service.PlatformGemini: 1, + service.PlatformAntigravity: 2, + } + + for platform, minCount := range requiredByPlatform { + count, err := client.Group.Query(). + Where(group.PlatformEQ(platform), group.DeletedAtIsNil()). + Count(ctx) + if err != nil { + return fmt.Errorf("count groups for platform %s: %w", platform, err) + } + + if platform == service.PlatformAntigravity { + if count < minCount { + for i := count; i < minCount; i++ { + name := fmt.Sprintf("%s-default-%d", platform, i+1) + if err := createGroupIfNotExists(ctx, client, name, platform); err != nil { + return err + } + } + } + continue + } + + // Non-antigravity platforms: ensure -default exists. + name := platform + "-default" + if err := createGroupIfNotExists(ctx, client, name, platform); err != nil { + return err + } + } + + return nil +} + +func createGroupIfNotExists(ctx context.Context, client *dbent.Client, name, platform string) error { + exists, err := client.Group.Query(). + Where(group.NameEQ(name), group.DeletedAtIsNil()). + Exist(ctx) + if err != nil { + return fmt.Errorf("check group exists %s: %w", name, err) + } + if exists { + return nil + } + + _, err = client.Group.Create(). + SetName(name). + SetDescription("Auto-created default group"). + SetPlatform(platform). + SetStatus(service.StatusActive). + SetSubscriptionType(service.SubscriptionTypeStandard). + SetRateMultiplier(1.0). + SetIsExclusive(false). + Save(ctx) + if err != nil { + if dbent.IsConstraintError(err) { + // Concurrent server startups may race on creation; treat as success. + return nil + } + return fmt.Errorf("create default group %s: %w", name, err) + } + return nil +} diff --git a/backend/internal/repository/simple_mode_default_groups_integration_test.go b/backend/internal/repository/simple_mode_default_groups_integration_test.go new file mode 100644 index 00000000..3327257b --- /dev/null +++ b/backend/internal/repository/simple_mode_default_groups_integration_test.go @@ -0,0 +1,84 @@ +//go:build integration + +package repository + +import ( + "context" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/stretchr/testify/require" +) + +func TestEnsureSimpleModeDefaultGroups_CreatesMissingDefaults(t *testing.T) { + ctx := context.Background() + tx := testEntTx(t) + client := tx.Client() + + seedCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + require.NoError(t, ensureSimpleModeDefaultGroups(seedCtx, client)) + + assertGroupExists := func(name string) { + exists, err := client.Group.Query().Where(group.NameEQ(name), group.DeletedAtIsNil()).Exist(seedCtx) + require.NoError(t, err) + require.True(t, exists, "expected group %s to exist", name) + } + + assertGroupExists(service.PlatformAnthropic + "-default") + assertGroupExists(service.PlatformOpenAI + "-default") + assertGroupExists(service.PlatformGemini + "-default") + assertGroupExists(service.PlatformAntigravity + "-default-1") + assertGroupExists(service.PlatformAntigravity + "-default-2") +} + +func TestEnsureSimpleModeDefaultGroups_IgnoresSoftDeletedGroups(t *testing.T) { + ctx := context.Background() + tx := testEntTx(t) + client := tx.Client() + + seedCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + // Create and then soft-delete an anthropic default group. + g, err := client.Group.Create(). + SetName(service.PlatformAnthropic + "-default"). + SetPlatform(service.PlatformAnthropic). + SetStatus(service.StatusActive). + SetSubscriptionType(service.SubscriptionTypeStandard). + SetRateMultiplier(1.0). + SetIsExclusive(false). + Save(seedCtx) + require.NoError(t, err) + + _, err = client.Group.Delete().Where(group.IDEQ(g.ID)).Exec(seedCtx) + require.NoError(t, err) + + require.NoError(t, ensureSimpleModeDefaultGroups(seedCtx, client)) + + // New active one should exist. + count, err := client.Group.Query().Where(group.NameEQ(service.PlatformAnthropic+"-default"), group.DeletedAtIsNil()).Count(seedCtx) + require.NoError(t, err) + require.Equal(t, 1, count) +} + +func TestEnsureSimpleModeDefaultGroups_AntigravityNeedsTwoGroupsOnlyByCount(t *testing.T) { + ctx := context.Background() + tx := testEntTx(t) + client := tx.Client() + + seedCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + mustCreateGroup(t, client, &service.Group{Name: "ag-custom-1-" + time.Now().Format(time.RFC3339Nano), Platform: service.PlatformAntigravity}) + mustCreateGroup(t, client, &service.Group{Name: "ag-custom-2-" + time.Now().Format(time.RFC3339Nano), Platform: service.PlatformAntigravity}) + + require.NoError(t, ensureSimpleModeDefaultGroups(seedCtx, client)) + + count, err := client.Group.Query().Where(group.PlatformEQ(service.PlatformAntigravity), group.DeletedAtIsNil()).Count(seedCtx) + require.NoError(t, err) + require.GreaterOrEqual(t, count, 2) +} From 69c4b17a9b05b57d07914dfb0be7655db70447c9 Mon Sep 17 00:00:00 2001 From: song Date: Sat, 17 Jan 2026 01:54:14 +0800 Subject: [PATCH 023/147] =?UTF-8?q?feat(antigravity):=20=E5=8A=A8=E6=80=81?= =?UTF-8?q?=20URL=20=E6=8E=92=E5=BA=8F=EF=BC=8C=E6=9C=80=E8=BF=91=E6=88=90?= =?UTF-8?q?=E5=8A=9F=E7=9A=84=E4=BC=98=E5=85=88=E4=BD=BF=E7=94=A8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - URLAvailability 新增 lastSuccess 字段追踪最近成功的 URL - GetAvailableURLs 返回列表时优先放置 lastSuccess - 所有 Antigravity API 调用成功后调用 MarkSuccess 更新优先级 --- backend/internal/pkg/antigravity/client.go | 4 +++ backend/internal/pkg/antigravity/oauth.go | 29 +++++++++++++++++-- .../service/antigravity_gateway_service.go | 16 ++++++++++ 3 files changed, 47 insertions(+), 2 deletions(-) diff --git a/backend/internal/pkg/antigravity/client.go b/backend/internal/pkg/antigravity/client.go index 454d3438..fd6cac58 100644 --- a/backend/internal/pkg/antigravity/client.go +++ b/backend/internal/pkg/antigravity/client.go @@ -358,6 +358,8 @@ func (c *Client) LoadCodeAssist(ctx context.Context, accessToken string) (*LoadC var rawResp map[string]any _ = json.Unmarshal(respBodyBytes, &rawResp) + // 标记成功的 URL,下次优先使用 + DefaultURLAvailability.MarkSuccess(baseURL) return &loadResp, rawResp, nil } @@ -449,6 +451,8 @@ func (c *Client) FetchAvailableModels(ctx context.Context, accessToken, projectI var rawResp map[string]any _ = json.Unmarshal(respBodyBytes, &rawResp) + // 标记成功的 URL,下次优先使用 + DefaultURLAvailability.MarkSuccess(baseURL) return &modelsResp, rawResp, nil } diff --git a/backend/internal/pkg/antigravity/oauth.go b/backend/internal/pkg/antigravity/oauth.go index 9d4baa6c..ee2a6c1a 100644 --- a/backend/internal/pkg/antigravity/oauth.go +++ b/backend/internal/pkg/antigravity/oauth.go @@ -51,11 +51,12 @@ var BaseURLs = []string{ // BaseURL 默认 URL(保持向后兼容) var BaseURL = BaseURLs[0] -// URLAvailability 管理 URL 可用性状态(带 TTL 自动恢复) +// URLAvailability 管理 URL 可用性状态(带 TTL 自动恢复和动态优先级) type URLAvailability struct { mu sync.RWMutex unavailable map[string]time.Time // URL -> 恢复时间 ttl time.Duration + lastSuccess string // 最近成功请求的 URL,优先使用 } // DefaultURLAvailability 全局 URL 可用性管理器 @@ -76,6 +77,15 @@ func (u *URLAvailability) MarkUnavailable(url string) { u.unavailable[url] = time.Now().Add(u.ttl) } +// MarkSuccess 标记 URL 请求成功,将其设为优先使用 +func (u *URLAvailability) MarkSuccess(url string) { + u.mu.Lock() + defer u.mu.Unlock() + u.lastSuccess = url + // 成功后清除该 URL 的不可用标记 + delete(u.unavailable, url) +} + // IsAvailable 检查 URL 是否可用 func (u *URLAvailability) IsAvailable(url string) bool { u.mu.RLock() @@ -87,14 +97,29 @@ func (u *URLAvailability) IsAvailable(url string) bool { return time.Now().After(expiry) } -// GetAvailableURLs 返回可用的 URL 列表(保持优先级顺序) +// GetAvailableURLs 返回可用的 URL 列表 +// 最近成功的 URL 优先,其他按默认顺序 func (u *URLAvailability) GetAvailableURLs() []string { u.mu.RLock() defer u.mu.RUnlock() now := time.Now() result := make([]string, 0, len(BaseURLs)) + + // 如果有最近成功的 URL 且可用,放在最前面 + if u.lastSuccess != "" { + expiry, exists := u.unavailable[u.lastSuccess] + if !exists || now.After(expiry) { + result = append(result, u.lastSuccess) + } + } + + // 添加其他可用的 URL(按默认顺序) for _, url := range BaseURLs { + // 跳过已添加的 lastSuccess + if url == u.lastSuccess { + continue + } expiry, exists := u.unavailable[url] if !exists || now.After(expiry) { result = append(result, url) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index a0e845ee..fc0008ea 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -266,6 +266,8 @@ func (s *AntigravityGatewayService) TestConnection(ctx context.Context, account // 解析流式响应,提取文本 text := extractTextFromSSEResponse(respBody) + // 标记成功的 URL,下次优先使用 + antigravity.DefaultURLAvailability.MarkSuccess(baseURL) return &TestConnectionResult{ Text: text, MappedModel: mappedModel, @@ -551,8 +553,10 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, // 重试循环 var resp *http.Response + var usedBaseURL string // 追踪成功使用的 URL urlFallbackLoop: for urlIdx, baseURL := range availableURLs { + usedBaseURL = baseURL for attempt := 1; attempt <= antigravityMaxRetries; attempt++ { // 检查 context 是否已取消(客户端断开连接) select { @@ -628,6 +632,11 @@ urlFallbackLoop: } defer func() { _ = resp.Body.Close() }() + // 请求成功,标记 URL 供后续优先使用 + if resp.StatusCode < 400 && usedBaseURL != "" { + antigravity.DefaultURLAvailability.MarkSuccess(usedBaseURL) + } + if resp.StatusCode >= 400 { respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) @@ -1097,8 +1106,10 @@ func (s *AntigravityGatewayService) ForwardGemini(ctx context.Context, c *gin.Co // 重试循环 var resp *http.Response + var usedBaseURL string // 追踪成功使用的 URL urlFallbackLoop: for urlIdx, baseURL := range availableURLs { + usedBaseURL = baseURL for attempt := 1; attempt <= antigravityMaxRetries; attempt++ { // 检查 context 是否已取消(客户端断开连接) select { @@ -1177,6 +1188,11 @@ urlFallbackLoop: } }() + // 请求成功,标记 URL 供后续优先使用 + if resp.StatusCode < 400 && usedBaseURL != "" { + antigravity.DefaultURLAvailability.MarkSuccess(usedBaseURL) + } + // 处理错误响应 if resp.StatusCode >= 400 { respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) From ac7503d95f086fe12682e291d61459eb3ef4c0a1 Mon Sep 17 00:00:00 2001 From: song Date: Sat, 17 Jan 2026 02:14:57 +0800 Subject: [PATCH 024/147] =?UTF-8?q?fix(antigravity):=20429=20=E6=97=B6?= =?UTF-8?q?=E4=B9=9F=E5=88=87=E6=8D=A2=20URL=20=E9=87=8D=E8=AF=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 429 优先切换到下一个 URL 重试 - 只有所有 URL 都返回 429 时才限流账户并返回错误 - 与 client.go 中的逻辑保持一致 --- .../service/antigravity_gateway_service.go | 20 +++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index fc0008ea..45381d37 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -591,11 +591,19 @@ urlFallbackLoop: return nil, s.writeClaudeError(c, http.StatusBadGateway, "upstream_error", "Upstream request failed after retries") } - // 429 不重试,直接限流账户 + // 429 限流:优先切换 URL,所有 URL 都 429 时才返回 if resp.StatusCode == http.StatusTooManyRequests { respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) _ = resp.Body.Close() + // 还有其他 URL,切换重试 + if urlIdx < len(availableURLs)-1 { + antigravity.DefaultURLAvailability.MarkUnavailable(baseURL) + log.Printf("%s URL fallback (429): %s -> %s", prefix, baseURL, availableURLs[urlIdx+1]) + continue urlFallbackLoop + } + + // 所有 URL 都 429,限流账户并返回 s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope) log.Printf("%s status=429 rate_limited body=%s", prefix, truncateForLog(respBody, 200)) resp = &http.Response{ @@ -1144,11 +1152,19 @@ urlFallbackLoop: return nil, s.writeGoogleError(c, http.StatusBadGateway, "Upstream request failed after retries") } - // 429 不重试,直接限流账户 + // 429 限流:优先切换 URL,所有 URL 都 429 时才返回 if resp.StatusCode == http.StatusTooManyRequests { respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) _ = resp.Body.Close() + // 还有其他 URL,切换重试 + if urlIdx < len(availableURLs)-1 { + antigravity.DefaultURLAvailability.MarkUnavailable(baseURL) + log.Printf("%s URL fallback (429): %s -> %s", prefix, baseURL, availableURLs[urlIdx+1]) + continue urlFallbackLoop + } + + // 所有 URL 都 429,限流账户并返回 s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope) log.Printf("%s status=429 rate_limited body=%s", prefix, truncateForLog(respBody, 200)) resp = &http.Response{ From ae21db77ecaa9f3fa05e3efe8b3a6b0c2dc47566 Mon Sep 17 00:00:00 2001 From: IanShaw027 <131567472+IanShaw027@users.noreply.github.com> Date: Sat, 17 Jan 2026 02:31:16 +0800 Subject: [PATCH 025/147] =?UTF-8?q?fix(openai):=20=E4=BD=BF=E7=94=A8=20pro?= =?UTF-8?q?mpt=5Fcache=5Fkey=20=E5=85=9C=E5=BA=95=E7=B2=98=E6=80=A7?= =?UTF-8?q?=E4=BC=9A=E8=AF=9D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit opencode 请求不带 session_id/conversation_id,导致粘性会话失效。现在按 header 优先、prompt_cache_key 兜底生成 session hash,并补充单测验证优先级。 --- .../handler/openai_gateway_handler.go | 4 +- .../service/openai_gateway_service.go | 24 +++++++++-- .../service/openai_gateway_service_test.go | 43 +++++++++++++++++++ 3 files changed, 66 insertions(+), 5 deletions(-) diff --git a/backend/internal/handler/openai_gateway_handler.go b/backend/internal/handler/openai_gateway_handler.go index c4cfabc3..68e67656 100644 --- a/backend/internal/handler/openai_gateway_handler.go +++ b/backend/internal/handler/openai_gateway_handler.go @@ -186,8 +186,8 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { return } - // Generate session hash (from header for OpenAI) - sessionHash := h.gatewayService.GenerateSessionHash(c) + // Generate session hash (header first; fallback to prompt_cache_key) + sessionHash := h.gatewayService.GenerateSessionHash(c, reqBody) const maxAccountSwitches = 3 switchCount := 0 diff --git a/backend/internal/service/openai_gateway_service.go b/backend/internal/service/openai_gateway_service.go index c7d94882..a3c4a239 100644 --- a/backend/internal/service/openai_gateway_service.go +++ b/backend/internal/service/openai_gateway_service.go @@ -133,12 +133,30 @@ func NewOpenAIGatewayService( } } -// GenerateSessionHash generates session hash from header (OpenAI uses session_id header) -func (s *OpenAIGatewayService) GenerateSessionHash(c *gin.Context) string { - sessionID := c.GetHeader("session_id") +// GenerateSessionHash generates a sticky-session hash for OpenAI requests. +// +// Priority: +// 1. Header: session_id +// 2. Header: conversation_id +// 3. Body: prompt_cache_key (opencode) +func (s *OpenAIGatewayService) GenerateSessionHash(c *gin.Context, reqBody map[string]any) string { + if c == nil { + return "" + } + + sessionID := strings.TrimSpace(c.GetHeader("session_id")) + if sessionID == "" { + sessionID = strings.TrimSpace(c.GetHeader("conversation_id")) + } + if sessionID == "" && reqBody != nil { + if v, ok := reqBody["prompt_cache_key"].(string); ok { + sessionID = strings.TrimSpace(v) + } + } if sessionID == "" { return "" } + hash := sha256.Sum256([]byte(sessionID)) return hex.EncodeToString(hash[:]) } diff --git a/backend/internal/service/openai_gateway_service_test.go b/backend/internal/service/openai_gateway_service_test.go index 42b88b7d..a34b8045 100644 --- a/backend/internal/service/openai_gateway_service_test.go +++ b/backend/internal/service/openai_gateway_service_test.go @@ -49,6 +49,49 @@ func (c stubConcurrencyCache) GetAccountsLoadBatch(ctx context.Context, accounts return out, nil } +func TestOpenAIGatewayService_GenerateSessionHash_Priority(t *testing.T) { + gin.SetMode(gin.TestMode) + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) + + svc := &OpenAIGatewayService{} + + // 1) session_id header wins + c.Request.Header.Set("session_id", "sess-123") + c.Request.Header.Set("conversation_id", "conv-456") + h1 := svc.GenerateSessionHash(c, map[string]any{"prompt_cache_key": "ses_aaa"}) + if h1 == "" { + t.Fatalf("expected non-empty hash") + } + + // 2) conversation_id used when session_id absent + c.Request.Header.Del("session_id") + h2 := svc.GenerateSessionHash(c, map[string]any{"prompt_cache_key": "ses_aaa"}) + if h2 == "" { + t.Fatalf("expected non-empty hash") + } + if h1 == h2 { + t.Fatalf("expected different hashes for different keys") + } + + // 3) prompt_cache_key used when both headers absent + c.Request.Header.Del("conversation_id") + h3 := svc.GenerateSessionHash(c, map[string]any{"prompt_cache_key": "ses_aaa"}) + if h3 == "" { + t.Fatalf("expected non-empty hash") + } + if h2 == h3 { + t.Fatalf("expected different hashes for different keys") + } + + // 4) empty when no signals + h4 := svc.GenerateSessionHash(c, map[string]any{}) + if h4 != "" { + t.Fatalf("expected empty hash when no signals") + } +} + func TestOpenAISelectAccountWithLoadAwareness_FiltersUnschedulable(t *testing.T) { now := time.Now() resetAt := now.Add(10 * time.Minute) From 78bccd032d0bd5388f7be8f4f8c79b8f5611bebb Mon Sep 17 00:00:00 2001 From: song Date: Sat, 17 Jan 2026 10:28:31 +0800 Subject: [PATCH 026/147] =?UTF-8?q?refactor(antigravity):=20=E6=8F=90?= =?UTF-8?q?=E5=8F=96=E5=85=AC=E5=85=B1=E9=87=8D=E8=AF=95=E5=BE=AA=E7=8E=AF?= =?UTF-8?q?=E5=87=BD=E6=95=B0=E5=87=8F=E5=B0=91=E9=87=8D=E5=A4=8D=E4=BB=A3?= =?UTF-8?q?=E7=A0=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 新增 antigravityRetryLoop 函数统一处理 Forward 和 ForwardGemini 的重试逻辑 - 429 日志增加 base_url 字段便于调试 - 删除重复的 shouldRetryUpstreamError 方法 --- .../service/antigravity_gateway_service.go | 365 ++++++++---------- 1 file changed, 163 insertions(+), 202 deletions(-) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 45381d37..7e89c97d 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -28,6 +28,135 @@ const ( antigravityRetryMaxDelay = 16 * time.Second ) +// antigravityRetryLoopParams 重试循环的参数 +type antigravityRetryLoopParams struct { + ctx context.Context + prefix string + account *Account + proxyURL string + accessToken string + action string + body []byte + quotaScope AntigravityQuotaScope + httpUpstream HTTPUpstream + accountRepo AccountRepository + handleError func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope) +} + +// antigravityRetryLoopResult 重试循环的结果 +type antigravityRetryLoopResult struct { + resp *http.Response + usedBaseURL string +} + +// antigravityRetryLoop 执行带 URL fallback 的重试循环 +func antigravityRetryLoop(p antigravityRetryLoopParams) (*antigravityRetryLoopResult, error) { + availableURLs := antigravity.DefaultURLAvailability.GetAvailableURLs() + if len(availableURLs) == 0 { + availableURLs = antigravity.BaseURLs + } + + var resp *http.Response + var usedBaseURL string + +urlFallbackLoop: + for urlIdx, baseURL := range availableURLs { + usedBaseURL = baseURL + for attempt := 1; attempt <= antigravityMaxRetries; attempt++ { + select { + case <-p.ctx.Done(): + log.Printf("%s status=context_canceled error=%v", p.prefix, p.ctx.Err()) + return nil, p.ctx.Err() + default: + } + + upstreamReq, err := antigravity.NewAPIRequestWithURL(p.ctx, baseURL, p.action, p.accessToken, p.body) + if err != nil { + return nil, err + } + + resp, err = p.httpUpstream.Do(upstreamReq, p.proxyURL, p.account.ID, p.account.Concurrency) + if err != nil { + if shouldAntigravityFallbackToNextURL(err, 0) && urlIdx < len(availableURLs)-1 { + antigravity.DefaultURLAvailability.MarkUnavailable(baseURL) + log.Printf("%s URL fallback (connection error): %s -> %s", p.prefix, baseURL, availableURLs[urlIdx+1]) + continue urlFallbackLoop + } + if attempt < antigravityMaxRetries { + log.Printf("%s status=request_failed retry=%d/%d error=%v", p.prefix, attempt, antigravityMaxRetries, err) + if !sleepAntigravityBackoffWithContext(p.ctx, attempt) { + log.Printf("%s status=context_canceled_during_backoff", p.prefix) + return nil, p.ctx.Err() + } + continue + } + log.Printf("%s status=request_failed retries_exhausted error=%v", p.prefix, err) + return nil, fmt.Errorf("upstream request failed after retries: %w", err) + } + + // 429 限流:优先切换 URL,所有 URL 都 429 时才返回 + if resp.StatusCode == http.StatusTooManyRequests { + respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + _ = resp.Body.Close() + + if urlIdx < len(availableURLs)-1 { + antigravity.DefaultURLAvailability.MarkUnavailable(baseURL) + log.Printf("%s URL fallback (429): %s -> %s", p.prefix, baseURL, availableURLs[urlIdx+1]) + continue urlFallbackLoop + } + + p.handleError(p.ctx, p.prefix, p.account, resp.StatusCode, resp.Header, respBody, p.quotaScope) + log.Printf("%s status=429 rate_limited base_url=%s body=%s", p.prefix, baseURL, truncateForLog(respBody, 200)) + resp = &http.Response{ + StatusCode: resp.StatusCode, + Header: resp.Header.Clone(), + Body: io.NopCloser(bytes.NewReader(respBody)), + } + break urlFallbackLoop + } + + // 其他可重试错误 + if resp.StatusCode >= 400 && shouldRetryAntigravityError(resp.StatusCode) { + respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + _ = resp.Body.Close() + + if attempt < antigravityMaxRetries { + log.Printf("%s status=%d retry=%d/%d body=%s", p.prefix, resp.StatusCode, attempt, antigravityMaxRetries, truncateForLog(respBody, 500)) + if !sleepAntigravityBackoffWithContext(p.ctx, attempt) { + log.Printf("%s status=context_canceled_during_backoff", p.prefix) + return nil, p.ctx.Err() + } + continue + } + resp = &http.Response{ + StatusCode: resp.StatusCode, + Header: resp.Header.Clone(), + Body: io.NopCloser(bytes.NewReader(respBody)), + } + break urlFallbackLoop + } + + break urlFallbackLoop + } + } + + if resp != nil && resp.StatusCode < 400 && usedBaseURL != "" { + antigravity.DefaultURLAvailability.MarkSuccess(usedBaseURL) + } + + return &antigravityRetryLoopResult{resp: resp, usedBaseURL: usedBaseURL}, nil +} + +// shouldRetryAntigravityError 判断是否应该重试 +func shouldRetryAntigravityError(statusCode int) bool { + switch statusCode { + case 429, 500, 502, 503, 504, 529: + return true + default: + return false + } +} + // isAntigravityConnectionError 判断是否为连接错误(网络超时、DNS 失败、连接拒绝) func isAntigravityConnectionError(err error) bool { if err == nil { @@ -545,106 +674,26 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, // 如果客户端请求非流式,在响应处理阶段会收集完整流式响应后转换返回 action := "streamGenerateContent" - // URL fallback 循环 - availableURLs := antigravity.DefaultURLAvailability.GetAvailableURLs() - if len(availableURLs) == 0 { - availableURLs = antigravity.BaseURLs // 所有 URL 都不可用时,重试所有 - } - - // 重试循环 - var resp *http.Response - var usedBaseURL string // 追踪成功使用的 URL -urlFallbackLoop: - for urlIdx, baseURL := range availableURLs { - usedBaseURL = baseURL - for attempt := 1; attempt <= antigravityMaxRetries; attempt++ { - // 检查 context 是否已取消(客户端断开连接) - select { - case <-ctx.Done(): - log.Printf("%s status=context_canceled error=%v", prefix, ctx.Err()) - return nil, ctx.Err() - default: - } - - upstreamReq, err := antigravity.NewAPIRequestWithURL(ctx, baseURL, action, accessToken, geminiBody) - if err != nil { - return nil, err - } - - resp, err = s.httpUpstream.Do(upstreamReq, proxyURL, account.ID, account.Concurrency) - if err != nil { - // 检查是否应触发 URL 降级 - if shouldAntigravityFallbackToNextURL(err, 0) && urlIdx < len(availableURLs)-1 { - antigravity.DefaultURLAvailability.MarkUnavailable(baseURL) - log.Printf("%s URL fallback (connection error): %s -> %s", prefix, baseURL, availableURLs[urlIdx+1]) - continue urlFallbackLoop - } - if attempt < antigravityMaxRetries { - log.Printf("%s status=request_failed retry=%d/%d error=%v", prefix, attempt, antigravityMaxRetries, err) - if !sleepAntigravityBackoffWithContext(ctx, attempt) { - log.Printf("%s status=context_canceled_during_backoff", prefix) - return nil, ctx.Err() - } - continue - } - log.Printf("%s status=request_failed retries_exhausted error=%v", prefix, err) - return nil, s.writeClaudeError(c, http.StatusBadGateway, "upstream_error", "Upstream request failed after retries") - } - - // 429 限流:优先切换 URL,所有 URL 都 429 时才返回 - if resp.StatusCode == http.StatusTooManyRequests { - respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) - _ = resp.Body.Close() - - // 还有其他 URL,切换重试 - if urlIdx < len(availableURLs)-1 { - antigravity.DefaultURLAvailability.MarkUnavailable(baseURL) - log.Printf("%s URL fallback (429): %s -> %s", prefix, baseURL, availableURLs[urlIdx+1]) - continue urlFallbackLoop - } - - // 所有 URL 都 429,限流账户并返回 - s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope) - log.Printf("%s status=429 rate_limited body=%s", prefix, truncateForLog(respBody, 200)) - resp = &http.Response{ - StatusCode: resp.StatusCode, - Header: resp.Header.Clone(), - Body: io.NopCloser(bytes.NewReader(respBody)), - } - break urlFallbackLoop - } - - if resp.StatusCode >= 400 && s.shouldRetryUpstreamError(resp.StatusCode) { - respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) - _ = resp.Body.Close() - - if attempt < antigravityMaxRetries { - log.Printf("%s status=%d retry=%d/%d body=%s", prefix, resp.StatusCode, attempt, antigravityMaxRetries, truncateForLog(respBody, 500)) - if !sleepAntigravityBackoffWithContext(ctx, attempt) { - log.Printf("%s status=context_canceled_during_backoff", prefix) - return nil, ctx.Err() - } - continue - } - // 最后一次尝试也失败 - resp = &http.Response{ - StatusCode: resp.StatusCode, - Header: resp.Header.Clone(), - Body: io.NopCloser(bytes.NewReader(respBody)), - } - break urlFallbackLoop - } - - break urlFallbackLoop - } + // 执行带重试的请求 + result, err := antigravityRetryLoop(antigravityRetryLoopParams{ + ctx: ctx, + prefix: prefix, + account: account, + proxyURL: proxyURL, + accessToken: accessToken, + action: action, + body: geminiBody, + quotaScope: quotaScope, + httpUpstream: s.httpUpstream, + accountRepo: s.accountRepo, + handleError: s.handleUpstreamError, + }) + if err != nil { + return nil, s.writeClaudeError(c, http.StatusBadGateway, "upstream_error", "Upstream request failed after retries") } + resp := result.resp defer func() { _ = resp.Body.Close() }() - // 请求成功,标记 URL 供后续优先使用 - if resp.StatusCode < 400 && usedBaseURL != "" { - antigravity.DefaultURLAvailability.MarkSuccess(usedBaseURL) - } - if resp.StatusCode >= 400 { respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) @@ -1106,109 +1155,30 @@ func (s *AntigravityGatewayService) ForwardGemini(ctx context.Context, c *gin.Co // 如果客户端请求非流式,在响应处理阶段会收集完整流式响应后返回 upstreamAction := "streamGenerateContent" - // URL fallback 循环 - availableURLs := antigravity.DefaultURLAvailability.GetAvailableURLs() - if len(availableURLs) == 0 { - availableURLs = antigravity.BaseURLs // 所有 URL 都不可用时,重试所有 - } - - // 重试循环 - var resp *http.Response - var usedBaseURL string // 追踪成功使用的 URL -urlFallbackLoop: - for urlIdx, baseURL := range availableURLs { - usedBaseURL = baseURL - for attempt := 1; attempt <= antigravityMaxRetries; attempt++ { - // 检查 context 是否已取消(客户端断开连接) - select { - case <-ctx.Done(): - log.Printf("%s status=context_canceled error=%v", prefix, ctx.Err()) - return nil, ctx.Err() - default: - } - - upstreamReq, err := antigravity.NewAPIRequestWithURL(ctx, baseURL, upstreamAction, accessToken, wrappedBody) - if err != nil { - return nil, err - } - - resp, err = s.httpUpstream.Do(upstreamReq, proxyURL, account.ID, account.Concurrency) - if err != nil { - // 检查是否应触发 URL 降级 - if shouldAntigravityFallbackToNextURL(err, 0) && urlIdx < len(availableURLs)-1 { - antigravity.DefaultURLAvailability.MarkUnavailable(baseURL) - log.Printf("%s URL fallback (connection error): %s -> %s", prefix, baseURL, availableURLs[urlIdx+1]) - continue urlFallbackLoop - } - if attempt < antigravityMaxRetries { - log.Printf("%s status=request_failed retry=%d/%d error=%v", prefix, attempt, antigravityMaxRetries, err) - if !sleepAntigravityBackoffWithContext(ctx, attempt) { - log.Printf("%s status=context_canceled_during_backoff", prefix) - return nil, ctx.Err() - } - continue - } - log.Printf("%s status=request_failed retries_exhausted error=%v", prefix, err) - return nil, s.writeGoogleError(c, http.StatusBadGateway, "Upstream request failed after retries") - } - - // 429 限流:优先切换 URL,所有 URL 都 429 时才返回 - if resp.StatusCode == http.StatusTooManyRequests { - respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) - _ = resp.Body.Close() - - // 还有其他 URL,切换重试 - if urlIdx < len(availableURLs)-1 { - antigravity.DefaultURLAvailability.MarkUnavailable(baseURL) - log.Printf("%s URL fallback (429): %s -> %s", prefix, baseURL, availableURLs[urlIdx+1]) - continue urlFallbackLoop - } - - // 所有 URL 都 429,限流账户并返回 - s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope) - log.Printf("%s status=429 rate_limited body=%s", prefix, truncateForLog(respBody, 200)) - resp = &http.Response{ - StatusCode: resp.StatusCode, - Header: resp.Header.Clone(), - Body: io.NopCloser(bytes.NewReader(respBody)), - } - break urlFallbackLoop - } - - if resp.StatusCode >= 400 && s.shouldRetryUpstreamError(resp.StatusCode) { - respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) - _ = resp.Body.Close() - - if attempt < antigravityMaxRetries { - log.Printf("%s status=%d retry=%d/%d", prefix, resp.StatusCode, attempt, antigravityMaxRetries) - if !sleepAntigravityBackoffWithContext(ctx, attempt) { - log.Printf("%s status=context_canceled_during_backoff", prefix) - return nil, ctx.Err() - } - continue - } - resp = &http.Response{ - StatusCode: resp.StatusCode, - Header: resp.Header.Clone(), - Body: io.NopCloser(bytes.NewReader(respBody)), - } - break urlFallbackLoop - } - - break urlFallbackLoop - } + // 执行带重试的请求 + result, err := antigravityRetryLoop(antigravityRetryLoopParams{ + ctx: ctx, + prefix: prefix, + account: account, + proxyURL: proxyURL, + accessToken: accessToken, + action: upstreamAction, + body: wrappedBody, + quotaScope: quotaScope, + httpUpstream: s.httpUpstream, + accountRepo: s.accountRepo, + handleError: s.handleUpstreamError, + }) + if err != nil { + return nil, s.writeGoogleError(c, http.StatusBadGateway, "Upstream request failed after retries") } + resp := result.resp defer func() { if resp != nil && resp.Body != nil { _ = resp.Body.Close() } }() - // 请求成功,标记 URL 供后续优先使用 - if resp.StatusCode < 400 && usedBaseURL != "" { - antigravity.DefaultURLAvailability.MarkSuccess(usedBaseURL) - } - // 处理错误响应 if resp.StatusCode >= 400 { respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) @@ -1317,15 +1287,6 @@ handleSuccess: }, nil } -func (s *AntigravityGatewayService) shouldRetryUpstreamError(statusCode int) bool { - switch statusCode { - case 429, 500, 502, 503, 504, 529: - return true - default: - return false - } -} - func (s *AntigravityGatewayService) shouldFailoverUpstreamError(statusCode int) bool { switch statusCode { case 401, 403, 429, 529: From 31933c8a604826c0639f2ea2eceb4b52fb95f6cf Mon Sep 17 00:00:00 2001 From: song Date: Sat, 17 Jan 2026 10:40:28 +0800 Subject: [PATCH 027/147] =?UTF-8?q?fix:=20=E5=88=A0=E9=99=A4=E6=9C=AA?= =?UTF-8?q?=E4=BD=BF=E7=94=A8=E7=9A=84=E5=AD=97=E6=AE=B5=E4=BF=AE=E5=A4=8D?= =?UTF-8?q?=20lint=20=E9=94=99=E8=AF=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/service/antigravity_gateway_service.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 7e89c97d..00b89260 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -39,14 +39,12 @@ type antigravityRetryLoopParams struct { body []byte quotaScope AntigravityQuotaScope httpUpstream HTTPUpstream - accountRepo AccountRepository handleError func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope) } // antigravityRetryLoopResult 重试循环的结果 type antigravityRetryLoopResult struct { - resp *http.Response - usedBaseURL string + resp *http.Response } // antigravityRetryLoop 执行带 URL fallback 的重试循环 @@ -144,7 +142,7 @@ urlFallbackLoop: antigravity.DefaultURLAvailability.MarkSuccess(usedBaseURL) } - return &antigravityRetryLoopResult{resp: resp, usedBaseURL: usedBaseURL}, nil + return &antigravityRetryLoopResult{resp: resp}, nil } // shouldRetryAntigravityError 判断是否应该重试 @@ -685,7 +683,6 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, body: geminiBody, quotaScope: quotaScope, httpUpstream: s.httpUpstream, - accountRepo: s.accountRepo, handleError: s.handleUpstreamError, }) if err != nil { @@ -1166,7 +1163,6 @@ func (s *AntigravityGatewayService) ForwardGemini(ctx context.Context, c *gin.Co body: wrappedBody, quotaScope: quotaScope, httpUpstream: s.httpUpstream, - accountRepo: s.accountRepo, handleError: s.handleUpstreamError, }) if err != nil { From a61cc2cb249e32e72fd5d0b41d1e3294bda79d75 Mon Sep 17 00:00:00 2001 From: IanShaw027 <131567472+IanShaw027@users.noreply.github.com> Date: Sat, 17 Jan 2026 11:00:07 +0800 Subject: [PATCH 028/147] =?UTF-8?q?fix(openai):=20=E5=A2=9E=E5=BC=BA=20Cod?= =?UTF-8?q?ex=20=E5=B7=A5=E5=85=B7=E8=BF=87=E6=BB=A4=E5=92=8C=E5=8F=82?= =?UTF-8?q?=E6=95=B0=E6=A0=87=E5=87=86=E5=8C=96?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - codex_transform: 过滤无效工具,支持 Responses-style 和 ChatCompletions-style 格式 - tool_corrector: 添加 fetch 工具映射,修正 bash/edit 参数命名规范 --- .../service/openai_codex_transform.go | 28 +++++-- .../service/openai_codex_transform_test.go | 31 ++++++++ .../internal/service/openai_tool_corrector.go | 77 +++++++++++++++---- .../service/openai_tool_corrector_test.go | 19 +++-- 4 files changed, 125 insertions(+), 30 deletions(-) diff --git a/backend/internal/service/openai_codex_transform.go b/backend/internal/service/openai_codex_transform.go index 264bdf95..48c72593 100644 --- a/backend/internal/service/openai_codex_transform.go +++ b/backend/internal/service/openai_codex_transform.go @@ -394,19 +394,35 @@ func normalizeCodexTools(reqBody map[string]any) bool { } modified := false - for idx, tool := range tools { + validTools := make([]any, 0, len(tools)) + + for _, tool := range tools { toolMap, ok := tool.(map[string]any) if !ok { + // Keep unknown structure as-is to avoid breaking upstream behavior. + validTools = append(validTools, tool) continue } toolType, _ := toolMap["type"].(string) - if strings.TrimSpace(toolType) != "function" { + toolType = strings.TrimSpace(toolType) + if toolType != "function" { + validTools = append(validTools, toolMap) continue } - function, ok := toolMap["function"].(map[string]any) - if !ok { + // OpenAI Responses-style tools use top-level name/parameters. + if name, ok := toolMap["name"].(string); ok && strings.TrimSpace(name) != "" { + validTools = append(validTools, toolMap) + continue + } + + // ChatCompletions-style tools use {type:"function", function:{...}}. + functionValue, hasFunction := toolMap["function"] + function, ok := functionValue.(map[string]any) + if !hasFunction || functionValue == nil || !ok || function == nil { + // Drop invalid function tools. + modified = true continue } @@ -435,11 +451,11 @@ func normalizeCodexTools(reqBody map[string]any) bool { } } - tools[idx] = toolMap + validTools = append(validTools, toolMap) } if modified { - reqBody["tools"] = tools + reqBody["tools"] = validTools } return modified diff --git a/backend/internal/service/openai_codex_transform_test.go b/backend/internal/service/openai_codex_transform_test.go index 0ff9485a..4cd72ab6 100644 --- a/backend/internal/service/openai_codex_transform_test.go +++ b/backend/internal/service/openai_codex_transform_test.go @@ -129,6 +129,37 @@ func TestFilterCodexInput_RemovesItemReferenceWhenNotPreserved(t *testing.T) { require.False(t, hasID) } +func TestApplyCodexOAuthTransform_NormalizeCodexTools_PreservesResponsesFunctionTools(t *testing.T) { + setupCodexCache(t) + + reqBody := map[string]any{ + "model": "gpt-5.1", + "tools": []any{ + map[string]any{ + "type": "function", + "name": "bash", + "description": "desc", + "parameters": map[string]any{"type": "object"}, + }, + map[string]any{ + "type": "function", + "function": nil, + }, + }, + } + + applyCodexOAuthTransform(reqBody) + + tools, ok := reqBody["tools"].([]any) + require.True(t, ok) + require.Len(t, tools, 1) + + first, ok := tools[0].(map[string]any) + require.True(t, ok) + require.Equal(t, "function", first["type"]) + require.Equal(t, "bash", first["name"]) +} + func TestApplyCodexOAuthTransform_EmptyInput(t *testing.T) { // 空 input 应保持为空且不触发异常。 setupCodexCache(t) diff --git a/backend/internal/service/openai_tool_corrector.go b/backend/internal/service/openai_tool_corrector.go index 9c9eab84..f4719275 100644 --- a/backend/internal/service/openai_tool_corrector.go +++ b/backend/internal/service/openai_tool_corrector.go @@ -27,6 +27,11 @@ var codexToolNameMapping = map[string]string{ "executeBash": "bash", "exec_bash": "bash", "execBash": "bash", + + // Some clients output generic fetch names. + "fetch": "webfetch", + "web_fetch": "webfetch", + "webFetch": "webfetch", } // ToolCorrectionStats 记录工具修正的统计信息(导出用于 JSON 序列化) @@ -208,27 +213,67 @@ func (c *CodexToolCorrector) correctToolParameters(toolName string, functionCall // 根据工具名称应用特定的参数修正规则 switch toolName { case "bash": - // 移除 workdir 参数(OpenCode 不支持) - if _, exists := argsMap["workdir"]; exists { - delete(argsMap, "workdir") - corrected = true - log.Printf("[CodexToolCorrector] Removed 'workdir' parameter from bash tool") - } - if _, exists := argsMap["work_dir"]; exists { - delete(argsMap, "work_dir") - corrected = true - log.Printf("[CodexToolCorrector] Removed 'work_dir' parameter from bash tool") + // OpenCode bash 支持 workdir;有些来源会输出 work_dir。 + if _, hasWorkdir := argsMap["workdir"]; !hasWorkdir { + if workDir, exists := argsMap["work_dir"]; exists { + argsMap["workdir"] = workDir + delete(argsMap, "work_dir") + corrected = true + log.Printf("[CodexToolCorrector] Renamed 'work_dir' to 'workdir' in bash tool") + } + } else { + if _, exists := argsMap["work_dir"]; exists { + delete(argsMap, "work_dir") + corrected = true + log.Printf("[CodexToolCorrector] Removed duplicate 'work_dir' parameter from bash tool") + } } case "edit": - // OpenCode edit 使用 old_string/new_string,Codex 可能使用其他名称 - // 这里可以添加参数名称的映射逻辑 - if _, exists := argsMap["file_path"]; !exists { - if path, exists := argsMap["path"]; exists { - argsMap["file_path"] = path + // OpenCode edit 参数为 filePath/oldString/newString(camelCase)。 + if _, exists := argsMap["filePath"]; !exists { + if filePath, exists := argsMap["file_path"]; exists { + argsMap["filePath"] = filePath + delete(argsMap, "file_path") + corrected = true + log.Printf("[CodexToolCorrector] Renamed 'file_path' to 'filePath' in edit tool") + } else if filePath, exists := argsMap["path"]; exists { + argsMap["filePath"] = filePath delete(argsMap, "path") corrected = true - log.Printf("[CodexToolCorrector] Renamed 'path' to 'file_path' in edit tool") + log.Printf("[CodexToolCorrector] Renamed 'path' to 'filePath' in edit tool") + } else if filePath, exists := argsMap["file"]; exists { + argsMap["filePath"] = filePath + delete(argsMap, "file") + corrected = true + log.Printf("[CodexToolCorrector] Renamed 'file' to 'filePath' in edit tool") + } + } + + if _, exists := argsMap["oldString"]; !exists { + if oldString, exists := argsMap["old_string"]; exists { + argsMap["oldString"] = oldString + delete(argsMap, "old_string") + corrected = true + log.Printf("[CodexToolCorrector] Renamed 'old_string' to 'oldString' in edit tool") + } + } + + if _, exists := argsMap["newString"]; !exists { + if newString, exists := argsMap["new_string"]; exists { + argsMap["newString"] = newString + delete(argsMap, "new_string") + corrected = true + log.Printf("[CodexToolCorrector] Renamed 'new_string' to 'newString' in edit tool") + } + } + + if _, exists := argsMap["replaceAll"]; !exists { + if replaceAll, exists := argsMap["replace_all"]; exists { + argsMap["replaceAll"] = replaceAll + delete(argsMap, "replace_all") + corrected = true + log.Printf("[CodexToolCorrector] Renamed 'replace_all' to 'replaceAll' in edit tool") } } } diff --git a/backend/internal/service/openai_tool_corrector_test.go b/backend/internal/service/openai_tool_corrector_test.go index 3e885b4b..ff518ea6 100644 --- a/backend/internal/service/openai_tool_corrector_test.go +++ b/backend/internal/service/openai_tool_corrector_test.go @@ -416,22 +416,23 @@ func TestCorrectToolParameters(t *testing.T) { expected map[string]bool // key: 期待存在的参数, value: true表示应该存在 }{ { - name: "remove workdir from bash tool", + name: "rename work_dir to workdir in bash tool", input: `{ "tool_calls": [{ "function": { "name": "bash", - "arguments": "{\"command\":\"ls\",\"workdir\":\"/tmp\"}" + "arguments": "{\"command\":\"ls\",\"work_dir\":\"/tmp\"}" } }] }`, expected: map[string]bool{ - "command": true, - "workdir": false, + "command": true, + "workdir": true, + "work_dir": false, }, }, { - name: "rename path to file_path in edit tool", + name: "rename snake_case edit params to camelCase", input: `{ "tool_calls": [{ "function": { @@ -441,10 +442,12 @@ func TestCorrectToolParameters(t *testing.T) { }] }`, expected: map[string]bool{ - "file_path": true, + "filePath": true, "path": false, - "old_string": true, - "new_string": true, + "oldString": true, + "old_string": false, + "newString": true, + "new_string": false, }, }, } From 5a6f60a95412d31e6acf3e4d762ed9a8c69a6d26 Mon Sep 17 00:00:00 2001 From: song Date: Sat, 17 Jan 2026 11:11:18 +0800 Subject: [PATCH 029/147] =?UTF-8?q?fix(antigravity):=20=E5=8C=BA=E5=88=86?= =?UTF-8?q?=20URL=20=E7=BA=A7=E5=88=AB=E5=92=8C=E8=B4=A6=E6=88=B7=E9=85=8D?= =?UTF-8?q?=E9=A2=9D=E7=BA=A7=E5=88=AB=E7=9A=84=20429=20=E9=99=90=E6=B5=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - "Resource has been exhausted" → URL 级别限流,立即切换 URL - "exhausted your capacity on this model" → 账户配额限流,重试 3 次(指数退避)后标记限流 --- .../service/antigravity_gateway_service.go | 26 +++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 00b89260..fcdf04f1 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -92,17 +92,29 @@ urlFallbackLoop: return nil, fmt.Errorf("upstream request failed after retries: %w", err) } - // 429 限流:优先切换 URL,所有 URL 都 429 时才返回 + // 429 限流处理:区分 URL 级别限流和账户配额限流 if resp.StatusCode == http.StatusTooManyRequests { respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) _ = resp.Body.Close() - if urlIdx < len(availableURLs)-1 { + // "Resource has been exhausted" 是 URL 级别限流,切换 URL + if isURLLevelRateLimit(respBody) && urlIdx < len(availableURLs)-1 { antigravity.DefaultURLAvailability.MarkUnavailable(baseURL) log.Printf("%s URL fallback (429): %s -> %s", p.prefix, baseURL, availableURLs[urlIdx+1]) continue urlFallbackLoop } + // 账户/模型配额限流,重试 3 次(指数退避) + if attempt < antigravityMaxRetries { + log.Printf("%s status=429 retry=%d/%d body=%s", p.prefix, attempt, antigravityMaxRetries, truncateForLog(respBody, 200)) + if !sleepAntigravityBackoffWithContext(p.ctx, attempt) { + log.Printf("%s status=context_canceled_during_backoff", p.prefix) + return nil, p.ctx.Err() + } + continue + } + + // 重试用尽,标记账户限流 p.handleError(p.ctx, p.prefix, p.account, resp.StatusCode, resp.Header, respBody, p.quotaScope) log.Printf("%s status=429 rate_limited base_url=%s body=%s", p.prefix, baseURL, truncateForLog(respBody, 200)) resp = &http.Response{ @@ -155,6 +167,16 @@ func shouldRetryAntigravityError(statusCode int) bool { } } +// isURLLevelRateLimit 判断是否为 URL 级别的限流(应切换 URL 重试) +// "Resource has been exhausted" 是 URL/节点级别限流,切换 URL 可能成功 +// "exhausted your capacity on this model" 是账户/模型配额限流,切换 URL 无效 +func isURLLevelRateLimit(body []byte) bool { + // 快速检查:包含 "Resource has been exhausted" 且不包含 "capacity on this model" + bodyStr := string(body) + return strings.Contains(bodyStr, "Resource has been exhausted") && + !strings.Contains(bodyStr, "capacity on this model") +} + // isAntigravityConnectionError 判断是否为连接错误(网络超时、DNS 失败、连接拒绝) func isAntigravityConnectionError(err error) bool { if err == nil { From bc1d7edc58a09b0ef0abb5297a679eadeb6d74a4 Mon Sep 17 00:00:00 2001 From: ianshaw Date: Sat, 17 Jan 2026 17:54:33 +0800 Subject: [PATCH 030/147] =?UTF-8?q?fix(ops):=20=E7=BB=9F=E4=B8=80=20reques?= =?UTF-8?q?t-errors=20=E5=92=8C=20SLA=20=E7=9A=84=E9=94=99=E8=AF=AF?= =?UTF-8?q?=E5=88=86=E7=B1=BB=E9=80=BB=E8=BE=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 修复 request-errors 接口与 Dashboard Overview SLA 计算不一致的问题: - errors 视图现在只排除业务限制错误(余额不足、并发限制等) - 上游 429/529 错误现在包含在 errors 视图中,与 SLA 计算保持一致 - excluded 视图现在只显示业务限制错误 这确保了 request-errors 接口和 Dashboard 的 error_count_sla 使用相同的过滤逻辑。 --- backend/internal/repository/ops_repo.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/backend/internal/repository/ops_repo.go b/backend/internal/repository/ops_repo.go index 613c5bd5..b04154b7 100644 --- a/backend/internal/repository/ops_repo.go +++ b/backend/internal/repository/ops_repo.go @@ -992,7 +992,8 @@ func buildOpsErrorLogsWhere(filter *service.OpsErrorLogFilter) (string, []any) { } // View filter: errors vs excluded vs all. - // Excluded = upstream 429/529 and business-limited (quota/concurrency/billing) errors. + // Excluded = business-limited errors (quota/concurrency/billing). + // Upstream 429/529 are included in errors view to match SLA calculation. view := "" if filter != nil { view = strings.ToLower(strings.TrimSpace(filter.View)) @@ -1000,15 +1001,13 @@ func buildOpsErrorLogsWhere(filter *service.OpsErrorLogFilter) (string, []any) { switch view { case "", "errors": clauses = append(clauses, "COALESCE(is_business_limited,false) = false") - clauses = append(clauses, "COALESCE(upstream_status_code, status_code, 0) NOT IN (429, 529)") case "excluded": - clauses = append(clauses, "(COALESCE(is_business_limited,false) = true OR COALESCE(upstream_status_code, status_code, 0) IN (429, 529))") + clauses = append(clauses, "COALESCE(is_business_limited,false) = true") case "all": // no-op default: // treat unknown as default 'errors' clauses = append(clauses, "COALESCE(is_business_limited,false) = false") - clauses = append(clauses, "COALESCE(upstream_status_code, status_code, 0) NOT IN (429, 529)") } if len(filter.StatusCodes) > 0 { args = append(args, pq.Array(filter.StatusCodes)) From 14a3694a9af4032b74c830c7e89afe121b731c63 Mon Sep 17 00:00:00 2001 From: song Date: Sat, 17 Jan 2026 18:03:45 +0800 Subject: [PATCH 031/147] chore: set antigravity fallback cooldown default to 1 --- backend/internal/config/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index 3bd72608..85face75 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -772,7 +772,7 @@ func setDefaults() { viper.SetDefault("gateway.failover_on_400", false) viper.SetDefault("gateway.max_account_switches", 10) viper.SetDefault("gateway.max_account_switches_gemini", 3) - viper.SetDefault("gateway.antigravity_fallback_cooldown_minutes", 5) + viper.SetDefault("gateway.antigravity_fallback_cooldown_minutes", 1) viper.SetDefault("gateway.max_body_size", int64(100*1024*1024)) viper.SetDefault("gateway.connection_pool_isolation", ConnectionPoolIsolationAccountProxy) // HTTP 上游连接池配置(针对 5000+ 并发用户优化) From 9078b17a41ef717c99dfcde899d80b23507a3a2a Mon Sep 17 00:00:00 2001 From: song Date: Sat, 17 Jan 2026 18:15:45 +0800 Subject: [PATCH 032/147] test: add antigravity rate limit coverage --- .../service/antigravity_rate_limit_test.go | 186 ++++++++++++++++++ 1 file changed, 186 insertions(+) create mode 100644 backend/internal/service/antigravity_rate_limit_test.go diff --git a/backend/internal/service/antigravity_rate_limit_test.go b/backend/internal/service/antigravity_rate_limit_test.go new file mode 100644 index 00000000..bf02364b --- /dev/null +++ b/backend/internal/service/antigravity_rate_limit_test.go @@ -0,0 +1,186 @@ +//go:build unit + +package service + +import ( + "context" + "fmt" + "io" + "net/http" + "strings" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/antigravity" + "github.com/stretchr/testify/require" +) + +type stubAntigravityUpstream struct { + firstBase string + secondBase string + calls []string +} + +func (s *stubAntigravityUpstream) Do(req *http.Request, proxyURL string, accountID int64, accountConcurrency int) (*http.Response, error) { + url := req.URL.String() + s.calls = append(s.calls, url) + if strings.HasPrefix(url, s.firstBase) { + return &http.Response{ + StatusCode: http.StatusTooManyRequests, + Header: http.Header{}, + Body: io.NopCloser(strings.NewReader(`{"error":{"message":"Resource has been exhausted"}}`)), + }, nil + } + return &http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{}, + Body: io.NopCloser(strings.NewReader("ok")), + }, nil +} + +type scopeLimitCall struct { + accountID int64 + scope AntigravityQuotaScope + resetAt time.Time +} + +type rateLimitCall struct { + accountID int64 + resetAt time.Time +} + +type stubAntigravityAccountRepo struct { + AccountRepository + scopeCalls []scopeLimitCall + rateCalls []rateLimitCall +} + +func (s *stubAntigravityAccountRepo) SetAntigravityQuotaScopeLimit(ctx context.Context, id int64, scope AntigravityQuotaScope, resetAt time.Time) error { + s.scopeCalls = append(s.scopeCalls, scopeLimitCall{accountID: id, scope: scope, resetAt: resetAt}) + return nil +} + +func (s *stubAntigravityAccountRepo) SetRateLimited(ctx context.Context, id int64, resetAt time.Time) error { + s.rateCalls = append(s.rateCalls, rateLimitCall{accountID: id, resetAt: resetAt}) + return nil +} + +func TestAntigravityRetryLoop_URLFallback_UsesLatestSuccess(t *testing.T) { + oldBaseURLs := append([]string(nil), antigravity.BaseURLs...) + oldAvailability := antigravity.DefaultURLAvailability + defer func() { + antigravity.BaseURLs = oldBaseURLs + antigravity.DefaultURLAvailability = oldAvailability + }() + + base1 := "https://ag-1.test" + base2 := "https://ag-2.test" + antigravity.BaseURLs = []string{base1, base2} + antigravity.DefaultURLAvailability = antigravity.NewURLAvailability(time.Minute) + + upstream := &stubAntigravityUpstream{firstBase: base1, secondBase: base2} + account := &Account{ + ID: 1, + Name: "acc-1", + Platform: PlatformAntigravity, + Schedulable: true, + Status: StatusActive, + Concurrency: 1, + } + + var handleErrorCalled bool + result, err := antigravityRetryLoop(antigravityRetryLoopParams{ + prefix: "[test]", + ctx: context.Background(), + account: account, + proxyURL: "", + accessToken: "token", + action: "generateContent", + body: []byte(`{"input":"test"}`), + quotaScope: AntigravityQuotaScopeClaude, + httpUpstream: upstream, + handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope) { + handleErrorCalled = true + }, + }) + + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.resp) + defer func() { _ = result.resp.Body.Close() }() + require.Equal(t, http.StatusOK, result.resp.StatusCode) + require.False(t, handleErrorCalled) + require.Len(t, upstream.calls, 2) + require.True(t, strings.HasPrefix(upstream.calls[0], base1)) + require.True(t, strings.HasPrefix(upstream.calls[1], base2)) + + available := antigravity.DefaultURLAvailability.GetAvailableURLs() + require.NotEmpty(t, available) + require.Equal(t, base2, available[0]) +} + +func TestAntigravityHandleUpstreamError_UsesScopeLimitWhenEnabled(t *testing.T) { + t.Setenv(antigravityScopeRateLimitEnv, "true") + repo := &stubAntigravityAccountRepo{} + svc := &AntigravityGatewayService{accountRepo: repo} + account := &Account{ID: 9, Name: "acc-9", Platform: PlatformAntigravity} + + body := buildGeminiRateLimitBody("3s") + svc.handleUpstreamError(context.Background(), "[test]", account, http.StatusTooManyRequests, http.Header{}, body, AntigravityQuotaScopeClaude) + + require.Len(t, repo.scopeCalls, 1) + require.Empty(t, repo.rateCalls) + call := repo.scopeCalls[0] + require.Equal(t, account.ID, call.accountID) + require.Equal(t, AntigravityQuotaScopeClaude, call.scope) + require.WithinDuration(t, time.Now().Add(3*time.Second), call.resetAt, 2*time.Second) +} + +func TestAntigravityHandleUpstreamError_UsesAccountLimitWhenScopeDisabled(t *testing.T) { + t.Setenv(antigravityScopeRateLimitEnv, "false") + repo := &stubAntigravityAccountRepo{} + svc := &AntigravityGatewayService{accountRepo: repo} + account := &Account{ID: 10, Name: "acc-10", Platform: PlatformAntigravity} + + body := buildGeminiRateLimitBody("2s") + svc.handleUpstreamError(context.Background(), "[test]", account, http.StatusTooManyRequests, http.Header{}, body, AntigravityQuotaScopeClaude) + + require.Len(t, repo.rateCalls, 1) + require.Empty(t, repo.scopeCalls) + call := repo.rateCalls[0] + require.Equal(t, account.ID, call.accountID) + require.WithinDuration(t, time.Now().Add(2*time.Second), call.resetAt, 2*time.Second) +} + +func TestAccountIsSchedulableForModel_AntigravityRateLimits(t *testing.T) { + now := time.Now() + future := now.Add(10 * time.Minute) + + account := &Account{ + ID: 1, + Name: "acc", + Platform: PlatformAntigravity, + Status: StatusActive, + Schedulable: true, + } + + account.RateLimitResetAt = &future + require.False(t, account.IsSchedulableForModel("claude-sonnet-4-5")) + require.False(t, account.IsSchedulableForModel("gemini-3-flash")) + + account.RateLimitResetAt = nil + account.Extra = map[string]any{ + antigravityQuotaScopesKey: map[string]any{ + "claude": map[string]any{ + "rate_limit_reset_at": future.Format(time.RFC3339), + }, + }, + } + + require.False(t, account.IsSchedulableForModel("claude-sonnet-4-5")) + require.True(t, account.IsSchedulableForModel("gemini-3-flash")) +} + +func buildGeminiRateLimitBody(delay string) []byte { + return []byte(fmt.Sprintf(`{"error":{"message":"too many requests","details":[{"metadata":{"quotaResetDelay":%q}}]}}`, delay)) +} From a7a0017aa84e47229a17ca8fb7d54c7c40a56564 Mon Sep 17 00:00:00 2001 From: song Date: Sat, 17 Jan 2026 18:22:43 +0800 Subject: [PATCH 033/147] chore: gofmt antigravity gateway service --- .../service/antigravity_gateway_service.go | 66 +++++++++---------- 1 file changed, 33 insertions(+), 33 deletions(-) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 60e81158..40b8e17f 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -33,18 +33,18 @@ const antigravityScopeRateLimitEnv = "GATEWAY_ANTIGRAVITY_429_SCOPE_LIMIT" // antigravityRetryLoopParams 重试循环的参数 type antigravityRetryLoopParams struct { - ctx context.Context - prefix string - account *Account - proxyURL string - accessToken string - action string - body []byte - quotaScope AntigravityQuotaScope - c *gin.Context - httpUpstream HTTPUpstream + ctx context.Context + prefix string + account *Account + proxyURL string + accessToken string + action string + body []byte + quotaScope AntigravityQuotaScope + c *gin.Context + httpUpstream HTTPUpstream settingService *SettingService - handleError func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope) + handleError func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope) } // antigravityRetryLoopResult 重试循环的结果 @@ -769,18 +769,18 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, // 执行带重试的请求 result, err := antigravityRetryLoop(antigravityRetryLoopParams{ - ctx: ctx, - prefix: prefix, - account: account, - proxyURL: proxyURL, - accessToken: accessToken, - action: action, - body: geminiBody, - quotaScope: quotaScope, - c: c, - httpUpstream: s.httpUpstream, + ctx: ctx, + prefix: prefix, + account: account, + proxyURL: proxyURL, + accessToken: accessToken, + action: action, + body: geminiBody, + quotaScope: quotaScope, + c: c, + httpUpstream: s.httpUpstream, settingService: s.settingService, - handleError: s.handleUpstreamError, + handleError: s.handleUpstreamError, }) if err != nil { return nil, s.writeClaudeError(c, http.StatusBadGateway, "upstream_error", "Upstream request failed after retries") @@ -1459,18 +1459,18 @@ func (s *AntigravityGatewayService) ForwardGemini(ctx context.Context, c *gin.Co // 执行带重试的请求 result, err := antigravityRetryLoop(antigravityRetryLoopParams{ - ctx: ctx, - prefix: prefix, - account: account, - proxyURL: proxyURL, - accessToken: accessToken, - action: upstreamAction, - body: wrappedBody, - quotaScope: quotaScope, - c: c, - httpUpstream: s.httpUpstream, + ctx: ctx, + prefix: prefix, + account: account, + proxyURL: proxyURL, + accessToken: accessToken, + action: upstreamAction, + body: wrappedBody, + quotaScope: quotaScope, + c: c, + httpUpstream: s.httpUpstream, settingService: s.settingService, - handleError: s.handleUpstreamError, + handleError: s.handleUpstreamError, }) if err != nil { return nil, s.writeGoogleError(c, http.StatusBadGateway, "Upstream request failed after retries") From 5e9f5efbe320d02c349dd2e91f407d3873bdfb23 Mon Sep 17 00:00:00 2001 From: song Date: Sat, 17 Jan 2026 18:22:53 +0800 Subject: [PATCH 034/147] chore: log antigravity signature retry 429 --- backend/internal/service/antigravity_gateway_service.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 40b8e17f..72ad7180 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -871,6 +871,13 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, retryBody, _ := io.ReadAll(io.LimitReader(retryResp.Body, 2<<20)) _ = retryResp.Body.Close() + if retryResp.StatusCode == http.StatusTooManyRequests { + retryBaseURL := "" + if retryReq.URL != nil { + retryBaseURL = retryReq.URL.Scheme + "://" + retryReq.URL.Host + } + log.Printf("%s status=429 rate_limited base_url=%s retry_stage=%s body=%s", prefix, retryBaseURL, stage.name, truncateForLog(retryBody, 200)) + } kind := "signature_retry" if strings.TrimSpace(stage.name) != "" { kind = "signature_retry_" + strings.ReplaceAll(stage.name, "+", "_") From 5427a9e4224ed090c0b39bdfdc482b69963a8d57 Mon Sep 17 00:00:00 2001 From: song Date: Sat, 17 Jan 2026 20:41:06 +0800 Subject: [PATCH 035/147] =?UTF-8?q?Revert=20"fix(antigravity):=20Claude=20?= =?UTF-8?q?=E6=A8=A1=E5=9E=8B=E9=80=8F=E4=BC=A0=20tool=5Fuse=20=E7=9A=84?= =?UTF-8?q?=20signature"?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 81b865b89dedb30a7efe98d6b4a4e268f9b99d60. --- backend/internal/pkg/antigravity/request_transformer.go | 7 ++----- .../internal/pkg/antigravity/request_transformer_test.go | 8 ++++---- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/backend/internal/pkg/antigravity/request_transformer.go b/backend/internal/pkg/antigravity/request_transformer.go index 9b703187..adafa196 100644 --- a/backend/internal/pkg/antigravity/request_transformer.go +++ b/backend/internal/pkg/antigravity/request_transformer.go @@ -389,13 +389,10 @@ func buildParts(content json.RawMessage, toolIDToName map[string]string, allowDu ID: block.ID, }, } - // tool_use 的 signature 处理: - // - Gemini 模型:使用 dummy signature(跳过 thought_signature 校验) - // - Claude 模型:透传上游返回的真实 signature(Vertex/Google 需要完整签名链路) + // 只有 Gemini 模型使用 dummy signature + // Claude 模型不设置 signature(避免验证问题) if allowDummyThought { part.ThoughtSignature = dummyThoughtSignature - } else if block.Signature != "" && block.Signature != dummyThoughtSignature { - part.ThoughtSignature = block.Signature } parts = append(parts, part) diff --git a/backend/internal/pkg/antigravity/request_transformer_test.go b/backend/internal/pkg/antigravity/request_transformer_test.go index 60ee6f63..eca3107e 100644 --- a/backend/internal/pkg/antigravity/request_transformer_test.go +++ b/backend/internal/pkg/antigravity/request_transformer_test.go @@ -114,7 +114,7 @@ func TestBuildParts_ToolUseSignatureHandling(t *testing.T) { } }) - t.Run("Claude model - preserve valid signature for tool_use", func(t *testing.T) { + t.Run("Claude model - no signature for tool_use", func(t *testing.T) { toolIDToName := make(map[string]string) parts, _, err := buildParts(json.RawMessage(content), toolIDToName, false) if err != nil { @@ -123,9 +123,9 @@ func TestBuildParts_ToolUseSignatureHandling(t *testing.T) { if len(parts) != 1 || parts[0].FunctionCall == nil { t.Fatalf("expected 1 functionCall part, got %+v", parts) } - // Claude 模型应透传有效的 signature(Vertex/Google 需要完整签名链路) - if parts[0].ThoughtSignature != "sig_tool_abc" { - t.Fatalf("expected preserved tool signature %q, got %q", "sig_tool_abc", parts[0].ThoughtSignature) + // Claude 模型不设置 signature + if parts[0].ThoughtSignature != "" { + t.Fatalf("expected no tool signature for Claude, got %q", parts[0].ThoughtSignature) } }) } From 0ce8666cc0193d0c08cb907f10d163b786a756b3 Mon Sep 17 00:00:00 2001 From: song Date: Sat, 17 Jan 2026 21:09:59 +0800 Subject: [PATCH 036/147] =?UTF-8?q?Revert=20"Revert=20"fix(antigravity):?= =?UTF-8?q?=20Claude=20=E6=A8=A1=E5=9E=8B=E9=80=8F=E4=BC=A0=20tool=5Fuse?= =?UTF-8?q?=20=E7=9A=84=20signature""?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 5427a9e4224ed090c0b39bdfdc482b69963a8d57. --- backend/internal/pkg/antigravity/request_transformer.go | 7 +++++-- .../internal/pkg/antigravity/request_transformer_test.go | 8 ++++---- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/backend/internal/pkg/antigravity/request_transformer.go b/backend/internal/pkg/antigravity/request_transformer.go index adafa196..9b703187 100644 --- a/backend/internal/pkg/antigravity/request_transformer.go +++ b/backend/internal/pkg/antigravity/request_transformer.go @@ -389,10 +389,13 @@ func buildParts(content json.RawMessage, toolIDToName map[string]string, allowDu ID: block.ID, }, } - // 只有 Gemini 模型使用 dummy signature - // Claude 模型不设置 signature(避免验证问题) + // tool_use 的 signature 处理: + // - Gemini 模型:使用 dummy signature(跳过 thought_signature 校验) + // - Claude 模型:透传上游返回的真实 signature(Vertex/Google 需要完整签名链路) if allowDummyThought { part.ThoughtSignature = dummyThoughtSignature + } else if block.Signature != "" && block.Signature != dummyThoughtSignature { + part.ThoughtSignature = block.Signature } parts = append(parts, part) diff --git a/backend/internal/pkg/antigravity/request_transformer_test.go b/backend/internal/pkg/antigravity/request_transformer_test.go index eca3107e..60ee6f63 100644 --- a/backend/internal/pkg/antigravity/request_transformer_test.go +++ b/backend/internal/pkg/antigravity/request_transformer_test.go @@ -114,7 +114,7 @@ func TestBuildParts_ToolUseSignatureHandling(t *testing.T) { } }) - t.Run("Claude model - no signature for tool_use", func(t *testing.T) { + t.Run("Claude model - preserve valid signature for tool_use", func(t *testing.T) { toolIDToName := make(map[string]string) parts, _, err := buildParts(json.RawMessage(content), toolIDToName, false) if err != nil { @@ -123,9 +123,9 @@ func TestBuildParts_ToolUseSignatureHandling(t *testing.T) { if len(parts) != 1 || parts[0].FunctionCall == nil { t.Fatalf("expected 1 functionCall part, got %+v", parts) } - // Claude 模型不设置 signature - if parts[0].ThoughtSignature != "" { - t.Fatalf("expected no tool signature for Claude, got %q", parts[0].ThoughtSignature) + // Claude 模型应透传有效的 signature(Vertex/Google 需要完整签名链路) + if parts[0].ThoughtSignature != "sig_tool_abc" { + t.Fatalf("expected preserved tool signature %q, got %q", "sig_tool_abc", parts[0].ThoughtSignature) } }) } From f22bc59fe37c9708c5751e6aace6913995d6f251 Mon Sep 17 00:00:00 2001 From: song Date: Sat, 17 Jan 2026 21:15:33 +0800 Subject: [PATCH 037/147] fix(antigravity): route signature retry through url fallback --- .../service/antigravity_gateway_service.go | 24 +++++++++++++------ 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 72ad7180..e6401891 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -844,11 +844,20 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, if txErr != nil { continue } - retryReq, buildErr := antigravity.NewAPIRequest(ctx, action, accessToken, retryGeminiBody) - if buildErr != nil { - continue - } - retryResp, retryErr := s.httpUpstream.Do(retryReq, proxyURL, account.ID, account.Concurrency) + retryResult, retryErr := antigravityRetryLoop(antigravityRetryLoopParams{ + ctx: ctx, + prefix: prefix, + account: account, + proxyURL: proxyURL, + accessToken: accessToken, + action: action, + body: retryGeminiBody, + quotaScope: quotaScope, + c: c, + httpUpstream: s.httpUpstream, + settingService: s.settingService, + handleError: s.handleUpstreamError, + }) if retryErr != nil { appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ Platform: account.Platform, @@ -862,6 +871,7 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, continue } + retryResp := retryResult.resp if retryResp.StatusCode < 400 { _ = resp.Body.Close() resp = retryResp @@ -873,8 +883,8 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, _ = retryResp.Body.Close() if retryResp.StatusCode == http.StatusTooManyRequests { retryBaseURL := "" - if retryReq.URL != nil { - retryBaseURL = retryReq.URL.Scheme + "://" + retryReq.URL.Host + if retryResp.Request != nil && retryResp.Request.URL != nil { + retryBaseURL = retryResp.Request.URL.Scheme + "://" + retryResp.Request.URL.Host } log.Printf("%s status=429 rate_limited base_url=%s retry_stage=%s body=%s", prefix, retryBaseURL, stage.name, truncateForLog(retryBody, 200)) } From 07ba64c6662a2a53e223b1e3d61cf0858a2c3001 Mon Sep 17 00:00:00 2001 From: song Date: Sat, 17 Jan 2026 21:37:32 +0800 Subject: [PATCH 038/147] fix(antigravity): handle url-level 429 without failover --- .../service/antigravity_gateway_service.go | 34 +++++++++++++++---- 1 file changed, 27 insertions(+), 7 deletions(-) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index e6401891..93383ab5 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -129,7 +129,7 @@ urlFallbackLoop: _ = resp.Body.Close() // "Resource has been exhausted" 是 URL 级别限流,切换 URL - if isURLLevelRateLimit(respBody) && urlIdx < len(availableURLs)-1 { + if isURLLevelRateLimit(respBody) { upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(respBody)) upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) appendOpsUpstreamError(p.c, OpsUpstreamErrorEvent{ @@ -142,9 +142,18 @@ urlFallbackLoop: Message: upstreamMsg, Detail: getUpstreamDetail(respBody), }) - antigravity.DefaultURLAvailability.MarkUnavailable(baseURL) - log.Printf("%s URL fallback (HTTP 429): %s -> %s body=%s", p.prefix, baseURL, availableURLs[urlIdx+1], truncateForLog(respBody, 200)) - continue urlFallbackLoop + if urlIdx < len(availableURLs)-1 { + log.Printf("%s URL fallback (HTTP 429): %s -> %s body=%s", p.prefix, baseURL, availableURLs[urlIdx+1], truncateForLog(respBody, 200)) + continue urlFallbackLoop + } + log.Printf("%s status=429 url_rate_limited base_url=%s body=%s", p.prefix, baseURL, truncateForLog(respBody, 200)) + resp = &http.Response{ + StatusCode: resp.StatusCode, + Header: resp.Header.Clone(), + Body: io.NopCloser(bytes.NewReader(respBody)), + Request: resp.Request, + } + break urlFallbackLoop } // 账户/模型配额限流,重试 3 次(指数退避) @@ -932,9 +941,15 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, // 处理错误响应(重试后仍失败或不触发重试) if resp.StatusCode >= 400 { - s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope) + urlLevelRateLimit := resp.StatusCode == http.StatusTooManyRequests && isURLLevelRateLimit(respBody) + if !urlLevelRateLimit { + s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope) + } if s.shouldFailoverUpstreamError(resp.StatusCode) { + if urlLevelRateLimit { + return nil, s.writeMappedClaudeError(c, account, resp.StatusCode, resp.Header.Get("x-request-id"), respBody) + } upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(respBody)) upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) logBody := s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBody @@ -1534,8 +1549,6 @@ func (s *AntigravityGatewayService) ForwardGemini(ctx context.Context, c *gin.Co goto handleSuccess } - s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope) - requestID := resp.Header.Get("x-request-id") if requestID != "" { c.Header("x-request-id", requestID) @@ -1546,6 +1559,10 @@ func (s *AntigravityGatewayService) ForwardGemini(ctx context.Context, c *gin.Co if unwrapErr != nil || len(unwrappedForOps) == 0 { unwrappedForOps = respBody } + urlLevelRateLimit := resp.StatusCode == http.StatusTooManyRequests && isURLLevelRateLimit(unwrappedForOps) + if !urlLevelRateLimit { + s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope) + } upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(unwrappedForOps)) upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) @@ -1563,6 +1580,9 @@ func (s *AntigravityGatewayService) ForwardGemini(ctx context.Context, c *gin.Co setOpsUpstreamError(c, resp.StatusCode, upstreamMsg, upstreamDetail) if s.shouldFailoverUpstreamError(resp.StatusCode) { + if urlLevelRateLimit { + return nil, s.writeGoogleError(c, resp.StatusCode, upstreamMsg) + } appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ Platform: account.Platform, AccountID: account.ID, From 22eb72e0f9a62b396f7572b067aa533596bdb5e1 Mon Sep 17 00:00:00 2001 From: song Date: Sat, 17 Jan 2026 21:50:09 +0800 Subject: [PATCH 039/147] fix(antigravity): restore url fallback behavior --- .../service/antigravity_gateway_service.go | 72 +++---------------- 1 file changed, 11 insertions(+), 61 deletions(-) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 93383ab5..a66a1df8 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -129,31 +129,10 @@ urlFallbackLoop: _ = resp.Body.Close() // "Resource has been exhausted" 是 URL 级别限流,切换 URL - if isURLLevelRateLimit(respBody) { - upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(respBody)) - upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) - appendOpsUpstreamError(p.c, OpsUpstreamErrorEvent{ - Platform: p.account.Platform, - AccountID: p.account.ID, - AccountName: p.account.Name, - UpstreamStatusCode: resp.StatusCode, - UpstreamRequestID: resp.Header.Get("x-request-id"), - Kind: "retry", - Message: upstreamMsg, - Detail: getUpstreamDetail(respBody), - }) - if urlIdx < len(availableURLs)-1 { - log.Printf("%s URL fallback (HTTP 429): %s -> %s body=%s", p.prefix, baseURL, availableURLs[urlIdx+1], truncateForLog(respBody, 200)) - continue urlFallbackLoop - } - log.Printf("%s status=429 url_rate_limited base_url=%s body=%s", p.prefix, baseURL, truncateForLog(respBody, 200)) - resp = &http.Response{ - StatusCode: resp.StatusCode, - Header: resp.Header.Clone(), - Body: io.NopCloser(bytes.NewReader(respBody)), - Request: resp.Request, - } - break urlFallbackLoop + if isURLLevelRateLimit(respBody) && urlIdx < len(availableURLs)-1 { + antigravity.DefaultURLAvailability.MarkUnavailable(baseURL) + log.Printf("%s URL fallback (429): %s -> %s", p.prefix, baseURL, availableURLs[urlIdx+1]) + continue urlFallbackLoop } // 账户/模型配额限流,重试 3 次(指数退避) @@ -853,20 +832,11 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, if txErr != nil { continue } - retryResult, retryErr := antigravityRetryLoop(antigravityRetryLoopParams{ - ctx: ctx, - prefix: prefix, - account: account, - proxyURL: proxyURL, - accessToken: accessToken, - action: action, - body: retryGeminiBody, - quotaScope: quotaScope, - c: c, - httpUpstream: s.httpUpstream, - settingService: s.settingService, - handleError: s.handleUpstreamError, - }) + retryReq, buildErr := antigravity.NewAPIRequest(ctx, action, accessToken, retryGeminiBody) + if buildErr != nil { + continue + } + retryResp, retryErr := s.httpUpstream.Do(retryReq, proxyURL, account.ID, account.Concurrency) if retryErr != nil { appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ Platform: account.Platform, @@ -880,7 +850,6 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, continue } - retryResp := retryResult.resp if retryResp.StatusCode < 400 { _ = resp.Body.Close() resp = retryResp @@ -890,13 +859,6 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, retryBody, _ := io.ReadAll(io.LimitReader(retryResp.Body, 2<<20)) _ = retryResp.Body.Close() - if retryResp.StatusCode == http.StatusTooManyRequests { - retryBaseURL := "" - if retryResp.Request != nil && retryResp.Request.URL != nil { - retryBaseURL = retryResp.Request.URL.Scheme + "://" + retryResp.Request.URL.Host - } - log.Printf("%s status=429 rate_limited base_url=%s retry_stage=%s body=%s", prefix, retryBaseURL, stage.name, truncateForLog(retryBody, 200)) - } kind := "signature_retry" if strings.TrimSpace(stage.name) != "" { kind = "signature_retry_" + strings.ReplaceAll(stage.name, "+", "_") @@ -941,15 +903,9 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, // 处理错误响应(重试后仍失败或不触发重试) if resp.StatusCode >= 400 { - urlLevelRateLimit := resp.StatusCode == http.StatusTooManyRequests && isURLLevelRateLimit(respBody) - if !urlLevelRateLimit { - s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope) - } + s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope) if s.shouldFailoverUpstreamError(resp.StatusCode) { - if urlLevelRateLimit { - return nil, s.writeMappedClaudeError(c, account, resp.StatusCode, resp.Header.Get("x-request-id"), respBody) - } upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(respBody)) upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) logBody := s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBody @@ -1559,10 +1515,7 @@ func (s *AntigravityGatewayService) ForwardGemini(ctx context.Context, c *gin.Co if unwrapErr != nil || len(unwrappedForOps) == 0 { unwrappedForOps = respBody } - urlLevelRateLimit := resp.StatusCode == http.StatusTooManyRequests && isURLLevelRateLimit(unwrappedForOps) - if !urlLevelRateLimit { - s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope) - } + s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope) upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(unwrappedForOps)) upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) @@ -1580,9 +1533,6 @@ func (s *AntigravityGatewayService) ForwardGemini(ctx context.Context, c *gin.Co setOpsUpstreamError(c, resp.StatusCode, upstreamMsg, upstreamDetail) if s.shouldFailoverUpstreamError(resp.StatusCode) { - if urlLevelRateLimit { - return nil, s.writeGoogleError(c, resp.StatusCode, upstreamMsg) - } appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ Platform: account.Platform, AccountID: account.ID, From ec916a31975228ab11af54431a79aa8976c79916 Mon Sep 17 00:00:00 2001 From: song Date: Sat, 17 Jan 2026 21:56:57 +0800 Subject: [PATCH 040/147] fix(antigravity): remove signature retry --- .../service/antigravity_gateway_service.go | 122 ------------------ 1 file changed, 122 deletions(-) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index a66a1df8..468d94c7 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -779,128 +779,6 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, if resp.StatusCode >= 400 { respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) - // 优先检测 thinking block 的 signature 相关错误(400)并重试一次: - // Antigravity /v1internal 链路在部分场景会对 thought/thinking signature 做严格校验, - // 当历史消息携带的 signature 不合法时会直接 400;去除 thinking 后可继续完成请求。 - if resp.StatusCode == http.StatusBadRequest && isSignatureRelatedError(respBody) { - upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(respBody)) - upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) - logBody := s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBody - maxBytes := 2048 - if s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes > 0 { - maxBytes = s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes - } - upstreamDetail := "" - if logBody { - upstreamDetail = truncateString(string(respBody), maxBytes) - } - appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ - Platform: account.Platform, - AccountID: account.ID, - AccountName: account.Name, - UpstreamStatusCode: resp.StatusCode, - UpstreamRequestID: resp.Header.Get("x-request-id"), - Kind: "signature_error", - Message: upstreamMsg, - Detail: upstreamDetail, - }) - - // Conservative two-stage fallback: - // 1) Disable top-level thinking + thinking->text - // 2) Only if still signature-related 400: also downgrade tool_use/tool_result to text. - - retryStages := []struct { - name string - strip func(*antigravity.ClaudeRequest) (bool, error) - }{ - {name: "thinking-only", strip: stripThinkingFromClaudeRequest}, - {name: "thinking+tools", strip: stripSignatureSensitiveBlocksFromClaudeRequest}, - } - - for _, stage := range retryStages { - retryClaudeReq := claudeReq - retryClaudeReq.Messages = append([]antigravity.ClaudeMessage(nil), claudeReq.Messages...) - - stripped, stripErr := stage.strip(&retryClaudeReq) - if stripErr != nil || !stripped { - continue - } - - log.Printf("Antigravity account %d: detected signature-related 400, retrying once (%s)", account.ID, stage.name) - - retryGeminiBody, txErr := antigravity.TransformClaudeToGeminiWithOptions(&retryClaudeReq, projectID, mappedModel, s.getClaudeTransformOptions(ctx)) - if txErr != nil { - continue - } - retryReq, buildErr := antigravity.NewAPIRequest(ctx, action, accessToken, retryGeminiBody) - if buildErr != nil { - continue - } - retryResp, retryErr := s.httpUpstream.Do(retryReq, proxyURL, account.ID, account.Concurrency) - if retryErr != nil { - appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ - Platform: account.Platform, - AccountID: account.ID, - AccountName: account.Name, - UpstreamStatusCode: 0, - Kind: "signature_retry_request_error", - Message: sanitizeUpstreamErrorMessage(retryErr.Error()), - }) - log.Printf("Antigravity account %d: signature retry request failed (%s): %v", account.ID, stage.name, retryErr) - continue - } - - if retryResp.StatusCode < 400 { - _ = resp.Body.Close() - resp = retryResp - respBody = nil - break - } - - retryBody, _ := io.ReadAll(io.LimitReader(retryResp.Body, 2<<20)) - _ = retryResp.Body.Close() - kind := "signature_retry" - if strings.TrimSpace(stage.name) != "" { - kind = "signature_retry_" + strings.ReplaceAll(stage.name, "+", "_") - } - retryUpstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(retryBody)) - retryUpstreamMsg = sanitizeUpstreamErrorMessage(retryUpstreamMsg) - retryUpstreamDetail := "" - if logBody { - retryUpstreamDetail = truncateString(string(retryBody), maxBytes) - } - appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ - Platform: account.Platform, - AccountID: account.ID, - AccountName: account.Name, - UpstreamStatusCode: retryResp.StatusCode, - UpstreamRequestID: retryResp.Header.Get("x-request-id"), - Kind: kind, - Message: retryUpstreamMsg, - Detail: retryUpstreamDetail, - }) - - // If this stage fixed the signature issue, we stop; otherwise we may try the next stage. - if retryResp.StatusCode != http.StatusBadRequest || !isSignatureRelatedError(retryBody) { - respBody = retryBody - resp = &http.Response{ - StatusCode: retryResp.StatusCode, - Header: retryResp.Header.Clone(), - Body: io.NopCloser(bytes.NewReader(retryBody)), - } - break - } - - // Still signature-related; capture context and allow next stage. - respBody = retryBody - resp = &http.Response{ - StatusCode: retryResp.StatusCode, - Header: retryResp.Header.Clone(), - Body: io.NopCloser(bytes.NewReader(retryBody)), - } - } - } - // 处理错误响应(重试后仍失败或不触发重试) if resp.StatusCode >= 400 { s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope) From 217b3b59c0d8c33cd28cddbc5168edc7954975ca Mon Sep 17 00:00:00 2001 From: song Date: Sat, 17 Jan 2026 21:59:32 +0800 Subject: [PATCH 041/147] fix(antigravity): drop MarkUnavailable --- backend/internal/pkg/antigravity/client.go | 4 ---- backend/internal/service/antigravity_gateway_service.go | 4 ---- 2 files changed, 8 deletions(-) diff --git a/backend/internal/pkg/antigravity/client.go b/backend/internal/pkg/antigravity/client.go index fd6cac58..77d3dc9b 100644 --- a/backend/internal/pkg/antigravity/client.go +++ b/backend/internal/pkg/antigravity/client.go @@ -325,7 +325,6 @@ func (c *Client) LoadCodeAssist(ctx context.Context, accessToken string) (*LoadC if err != nil { lastErr = fmt.Errorf("loadCodeAssist 请求失败: %w", err) if shouldFallbackToNextURL(err, 0) && urlIdx < len(availableURLs)-1 { - DefaultURLAvailability.MarkUnavailable(baseURL) log.Printf("[antigravity] loadCodeAssist URL fallback: %s -> %s", baseURL, availableURLs[urlIdx+1]) continue } @@ -340,7 +339,6 @@ func (c *Client) LoadCodeAssist(ctx context.Context, accessToken string) (*LoadC // 检查是否需要 URL 降级 if shouldFallbackToNextURL(nil, resp.StatusCode) && urlIdx < len(availableURLs)-1 { - DefaultURLAvailability.MarkUnavailable(baseURL) log.Printf("[antigravity] loadCodeAssist URL fallback (HTTP %d): %s -> %s", resp.StatusCode, baseURL, availableURLs[urlIdx+1]) continue } @@ -418,7 +416,6 @@ func (c *Client) FetchAvailableModels(ctx context.Context, accessToken, projectI if err != nil { lastErr = fmt.Errorf("fetchAvailableModels 请求失败: %w", err) if shouldFallbackToNextURL(err, 0) && urlIdx < len(availableURLs)-1 { - DefaultURLAvailability.MarkUnavailable(baseURL) log.Printf("[antigravity] fetchAvailableModels URL fallback: %s -> %s", baseURL, availableURLs[urlIdx+1]) continue } @@ -433,7 +430,6 @@ func (c *Client) FetchAvailableModels(ctx context.Context, accessToken, projectI // 检查是否需要 URL 降级 if shouldFallbackToNextURL(nil, resp.StatusCode) && urlIdx < len(availableURLs)-1 { - DefaultURLAvailability.MarkUnavailable(baseURL) log.Printf("[antigravity] fetchAvailableModels URL fallback (HTTP %d): %s -> %s", resp.StatusCode, baseURL, availableURLs[urlIdx+1]) continue } diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 468d94c7..8f4d2bfd 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -106,7 +106,6 @@ urlFallbackLoop: Message: safeErr, }) if shouldAntigravityFallbackToNextURL(err, 0) && urlIdx < len(availableURLs)-1 { - antigravity.DefaultURLAvailability.MarkUnavailable(baseURL) log.Printf("%s URL fallback (connection error): %s -> %s", p.prefix, baseURL, availableURLs[urlIdx+1]) continue urlFallbackLoop } @@ -130,7 +129,6 @@ urlFallbackLoop: // "Resource has been exhausted" 是 URL 级别限流,切换 URL if isURLLevelRateLimit(respBody) && urlIdx < len(availableURLs)-1 { - antigravity.DefaultURLAvailability.MarkUnavailable(baseURL) log.Printf("%s URL fallback (429): %s -> %s", p.prefix, baseURL, availableURLs[urlIdx+1]) continue urlFallbackLoop } @@ -442,7 +440,6 @@ func (s *AntigravityGatewayService) TestConnection(ctx context.Context, account if err != nil { lastErr = fmt.Errorf("请求失败: %w", err) if shouldAntigravityFallbackToNextURL(err, 0) && urlIdx < len(availableURLs)-1 { - antigravity.DefaultURLAvailability.MarkUnavailable(baseURL) log.Printf("[antigravity-Test] URL fallback: %s -> %s", baseURL, availableURLs[urlIdx+1]) continue } @@ -458,7 +455,6 @@ func (s *AntigravityGatewayService) TestConnection(ctx context.Context, account // 检查是否需要 URL 降级 if shouldAntigravityFallbackToNextURL(nil, resp.StatusCode) && urlIdx < len(availableURLs)-1 { - antigravity.DefaultURLAvailability.MarkUnavailable(baseURL) log.Printf("[antigravity-Test] URL fallback (HTTP %d): %s -> %s", resp.StatusCode, baseURL, availableURLs[urlIdx+1]) continue } From 959f6c538a48546d4d25fdc77d59f21eab46ae90 Mon Sep 17 00:00:00 2001 From: song Date: Sat, 17 Jan 2026 22:21:48 +0800 Subject: [PATCH 042/147] fix(antigravity): remove thinking sanitation --- .../service/antigravity_gateway_service.go | 143 ------------------ 1 file changed, 143 deletions(-) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 8f4d2bfd..3b6ddcb1 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -730,9 +730,6 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, proxyURL = account.Proxy.URL() } - // Sanitize thinking blocks (clean cache_control and flatten history thinking) - sanitizeThinkingBlocks(&claudeReq) - // 获取转换选项 // Antigravity 上游要求必须包含身份提示词,否则会返回 429 transformOpts := s.getClaudeTransformOptions(ctx) @@ -744,9 +741,6 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, return nil, fmt.Errorf("transform request: %w", err) } - // Safety net: ensure no cache_control leaked into Gemini request - geminiBody = cleanCacheControlFromGeminiJSON(geminiBody) - // Antigravity 上游只支持流式请求,统一使用 streamGenerateContent // 如果客户端请求非流式,在响应处理阶段会收集完整流式响应后转换返回 action := "streamGenerateContent" @@ -887,143 +881,6 @@ func extractAntigravityErrorMessage(body []byte) string { return "" } -// cleanCacheControlFromGeminiJSON removes cache_control from Gemini JSON (emergency fix) -// This should not be needed if transformation is correct, but serves as a safety net -func cleanCacheControlFromGeminiJSON(body []byte) []byte { - // Try a more robust approach: parse and clean - var data map[string]any - if err := json.Unmarshal(body, &data); err != nil { - log.Printf("[Antigravity] Failed to parse Gemini JSON for cache_control cleaning: %v", err) - return body - } - - cleaned := removeCacheControlFromAny(data) - if !cleaned { - return body - } - - if result, err := json.Marshal(data); err == nil { - log.Printf("[Antigravity] Successfully cleaned cache_control from Gemini JSON") - return result - } - - return body -} - -// removeCacheControlFromAny recursively removes cache_control fields -func removeCacheControlFromAny(v any) bool { - cleaned := false - - switch val := v.(type) { - case map[string]any: - for k, child := range val { - if k == "cache_control" { - delete(val, k) - cleaned = true - } else if removeCacheControlFromAny(child) { - cleaned = true - } - } - case []any: - for _, item := range val { - if removeCacheControlFromAny(item) { - cleaned = true - } - } - } - - return cleaned -} - -// sanitizeThinkingBlocks cleans cache_control and flattens history thinking blocks -// Thinking blocks do NOT support cache_control field (Anthropic API/Vertex AI requirement) -// Additionally, history thinking blocks are flattened to text to avoid upstream validation errors -func sanitizeThinkingBlocks(req *antigravity.ClaudeRequest) { - if req == nil { - return - } - - log.Printf("[Antigravity] sanitizeThinkingBlocks: processing request with %d messages", len(req.Messages)) - - // Clean system blocks - if len(req.System) > 0 { - var systemBlocks []map[string]any - if err := json.Unmarshal(req.System, &systemBlocks); err == nil { - for i := range systemBlocks { - if blockType, _ := systemBlocks[i]["type"].(string); blockType == "thinking" || systemBlocks[i]["thinking"] != nil { - if removeCacheControlFromAny(systemBlocks[i]) { - log.Printf("[Antigravity] Deep cleaned cache_control from thinking block in system[%d]", i) - } - } - } - // Marshal back - if cleaned, err := json.Marshal(systemBlocks); err == nil { - req.System = cleaned - } - } - } - - // Clean message content blocks and flatten history - lastMsgIdx := len(req.Messages) - 1 - for msgIdx := range req.Messages { - raw := req.Messages[msgIdx].Content - if len(raw) == 0 { - continue - } - - // Try to parse as blocks array - var blocks []map[string]any - if err := json.Unmarshal(raw, &blocks); err != nil { - continue - } - - cleaned := false - for blockIdx := range blocks { - blockType, _ := blocks[blockIdx]["type"].(string) - - // Check for thinking blocks (typed or untyped) - if blockType == "thinking" || blocks[blockIdx]["thinking"] != nil { - // 1. Clean cache_control - if removeCacheControlFromAny(blocks[blockIdx]) { - log.Printf("[Antigravity] Deep cleaned cache_control from thinking block in messages[%d].content[%d]", msgIdx, blockIdx) - cleaned = true - } - - // 2. Flatten to text if it's a history message (not the last one) - if msgIdx < lastMsgIdx { - log.Printf("[Antigravity] Flattening history thinking block to text at messages[%d].content[%d]", msgIdx, blockIdx) - - // Extract thinking content - var textContent string - if t, ok := blocks[blockIdx]["thinking"].(string); ok { - textContent = t - } else { - // Fallback for non-string content (marshal it) - if b, err := json.Marshal(blocks[blockIdx]["thinking"]); err == nil { - textContent = string(b) - } - } - - // Convert to text block - blocks[blockIdx]["type"] = "text" - blocks[blockIdx]["text"] = textContent - delete(blocks[blockIdx], "thinking") - delete(blocks[blockIdx], "signature") - delete(blocks[blockIdx], "cache_control") // Ensure it's gone - cleaned = true - } - } - } - - // Marshal back if modified - if cleaned { - if marshaled, err := json.Marshal(blocks); err == nil { - req.Messages[msgIdx].Content = marshaled - } - } - } -} - // stripThinkingFromClaudeRequest converts thinking blocks to text blocks in a Claude Messages request. // This preserves the thinking content while avoiding signature validation errors. // Note: redacted_thinking blocks are removed because they cannot be converted to text. From 8b071cc665ed1f07e13df56ef6c08f18d14481a9 Mon Sep 17 00:00:00 2001 From: song Date: Sat, 17 Jan 2026 22:50:50 +0800 Subject: [PATCH 043/147] fix(antigravity): restore signature retry and base order --- backend/internal/pkg/antigravity/client.go | 14 +- .../service/antigravity_gateway_service.go | 139 ++++++++++++++++++ 2 files changed, 143 insertions(+), 10 deletions(-) diff --git a/backend/internal/pkg/antigravity/client.go b/backend/internal/pkg/antigravity/client.go index 77d3dc9b..a6279b11 100644 --- a/backend/internal/pkg/antigravity/client.go +++ b/backend/internal/pkg/antigravity/client.go @@ -303,11 +303,8 @@ func (c *Client) LoadCodeAssist(ctx context.Context, accessToken string) (*LoadC return nil, nil, fmt.Errorf("序列化请求失败: %w", err) } - // 获取可用的 URL 列表 - availableURLs := DefaultURLAvailability.GetAvailableURLs() - if len(availableURLs) == 0 { - availableURLs = BaseURLs // 所有 URL 都不可用时,重试所有 - } + // 固定顺序:prod -> daily + availableURLs := BaseURLs var lastErr error for urlIdx, baseURL := range availableURLs { @@ -394,11 +391,8 @@ func (c *Client) FetchAvailableModels(ctx context.Context, accessToken, projectI return nil, nil, fmt.Errorf("序列化请求失败: %w", err) } - // 获取可用的 URL 列表 - availableURLs := DefaultURLAvailability.GetAvailableURLs() - if len(availableURLs) == 0 { - availableURLs = BaseURLs // 所有 URL 都不可用时,重试所有 - } + // 固定顺序:prod -> daily + availableURLs := BaseURLs var lastErr error for urlIdx, baseURL := range availableURLs { diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 3b6ddcb1..043f338d 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -769,6 +769,145 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, if resp.StatusCode >= 400 { respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + // 优先检测 thinking block 的 signature 相关错误(400)并重试一次: + // Antigravity /v1internal 链路在部分场景会对 thought/thinking signature 做严格校验, + // 当历史消息携带的 signature 不合法时会直接 400;去除 thinking 后可继续完成请求。 + if resp.StatusCode == http.StatusBadRequest && isSignatureRelatedError(respBody) { + upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + logBody := s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBody + maxBytes := 2048 + if s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes > 0 { + maxBytes = s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + } + upstreamDetail := "" + if logBody { + upstreamDetail = truncateString(string(respBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "signature_error", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + + // Conservative two-stage fallback: + // 1) Disable top-level thinking + thinking->text + // 2) Only if still signature-related 400: also downgrade tool_use/tool_result to text. + + retryStages := []struct { + name string + strip func(*antigravity.ClaudeRequest) (bool, error) + }{ + {name: "thinking-only", strip: stripThinkingFromClaudeRequest}, + {name: "thinking+tools", strip: stripSignatureSensitiveBlocksFromClaudeRequest}, + } + + for _, stage := range retryStages { + retryClaudeReq := claudeReq + retryClaudeReq.Messages = append([]antigravity.ClaudeMessage(nil), claudeReq.Messages...) + + stripped, stripErr := stage.strip(&retryClaudeReq) + if stripErr != nil || !stripped { + continue + } + + log.Printf("Antigravity account %d: detected signature-related 400, retrying once (%s)", account.ID, stage.name) + + retryGeminiBody, txErr := antigravity.TransformClaudeToGeminiWithOptions(&retryClaudeReq, projectID, mappedModel, s.getClaudeTransformOptions(ctx)) + if txErr != nil { + continue + } + retryResult, retryErr := antigravityRetryLoop(antigravityRetryLoopParams{ + ctx: ctx, + prefix: prefix, + account: account, + proxyURL: proxyURL, + accessToken: accessToken, + action: action, + body: retryGeminiBody, + quotaScope: quotaScope, + c: c, + httpUpstream: s.httpUpstream, + settingService: s.settingService, + handleError: s.handleUpstreamError, + }) + if retryErr != nil { + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: 0, + Kind: "signature_retry_request_error", + Message: sanitizeUpstreamErrorMessage(retryErr.Error()), + }) + log.Printf("Antigravity account %d: signature retry request failed (%s): %v", account.ID, stage.name, retryErr) + continue + } + + retryResp := retryResult.resp + if retryResp.StatusCode < 400 { + _ = resp.Body.Close() + resp = retryResp + respBody = nil + break + } + + retryBody, _ := io.ReadAll(io.LimitReader(retryResp.Body, 2<<20)) + _ = retryResp.Body.Close() + if retryResp.StatusCode == http.StatusTooManyRequests { + retryBaseURL := "" + if retryResp.Request != nil && retryResp.Request.URL != nil { + retryBaseURL = retryResp.Request.URL.Scheme + "://" + retryResp.Request.URL.Host + } + log.Printf("%s status=429 rate_limited base_url=%s retry_stage=%s body=%s", prefix, retryBaseURL, stage.name, truncateForLog(retryBody, 200)) + } + kind := "signature_retry" + if strings.TrimSpace(stage.name) != "" { + kind = "signature_retry_" + strings.ReplaceAll(stage.name, "+", "_") + } + retryUpstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(retryBody)) + retryUpstreamMsg = sanitizeUpstreamErrorMessage(retryUpstreamMsg) + retryUpstreamDetail := "" + if logBody { + retryUpstreamDetail = truncateString(string(retryBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: retryResp.StatusCode, + UpstreamRequestID: retryResp.Header.Get("x-request-id"), + Kind: kind, + Message: retryUpstreamMsg, + Detail: retryUpstreamDetail, + }) + + // If this stage fixed the signature issue, we stop; otherwise we may try the next stage. + if retryResp.StatusCode != http.StatusBadRequest || !isSignatureRelatedError(retryBody) { + respBody = retryBody + resp = &http.Response{ + StatusCode: retryResp.StatusCode, + Header: retryResp.Header.Clone(), + Body: io.NopCloser(bytes.NewReader(retryBody)), + } + break + } + + // Still signature-related; capture context and allow next stage. + respBody = retryBody + resp = &http.Response{ + StatusCode: retryResp.StatusCode, + Header: retryResp.Header.Clone(), + Body: io.NopCloser(bytes.NewReader(retryBody)), + } + } + } + // 处理错误响应(重试后仍失败或不触发重试) if resp.StatusCode >= 400 { s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope) From 694131543279fca2cb395cf3dcaf0dc6dbf3b3a2 Mon Sep 17 00:00:00 2001 From: song Date: Sun, 18 Jan 2026 01:09:40 +0800 Subject: [PATCH 044/147] feat: add antigravity web search support --- .../internal/pkg/antigravity/gemini_types.go | 24 ++++- .../pkg/antigravity/request_transformer.go | 88 +++++++++++++------ .../pkg/antigravity/response_transformer.go | 57 ++++++++++++ .../pkg/antigravity/stream_transformer.go | 37 ++++++++ 4 files changed, 178 insertions(+), 28 deletions(-) diff --git a/backend/internal/pkg/antigravity/gemini_types.go b/backend/internal/pkg/antigravity/gemini_types.go index f688332f..ad873901 100644 --- a/backend/internal/pkg/antigravity/gemini_types.go +++ b/backend/internal/pkg/antigravity/gemini_types.go @@ -143,9 +143,10 @@ type GeminiResponse struct { // GeminiCandidate Gemini 候选响应 type GeminiCandidate struct { - Content *GeminiContent `json:"content,omitempty"` - FinishReason string `json:"finishReason,omitempty"` - Index int `json:"index,omitempty"` + Content *GeminiContent `json:"content,omitempty"` + FinishReason string `json:"finishReason,omitempty"` + Index int `json:"index,omitempty"` + GroundingMetadata *GeminiGroundingMetadata `json:"groundingMetadata,omitempty"` } // GeminiUsageMetadata Gemini 用量元数据 @@ -156,6 +157,23 @@ type GeminiUsageMetadata struct { TotalTokenCount int `json:"totalTokenCount,omitempty"` } +// GeminiGroundingMetadata Gemini grounding 元数据(Web Search) +type GeminiGroundingMetadata struct { + WebSearchQueries []string `json:"webSearchQueries,omitempty"` + GroundingChunks []GeminiGroundingChunk `json:"groundingChunks,omitempty"` +} + +// GeminiGroundingChunk Gemini grounding chunk +type GeminiGroundingChunk struct { + Web *GeminiGroundingWeb `json:"web,omitempty"` +} + +// GeminiGroundingWeb Gemini grounding web 信息 +type GeminiGroundingWeb struct { + Title string `json:"title,omitempty"` + URI string `json:"uri,omitempty"` +} + // DefaultSafetySettings 默认安全设置(关闭所有过滤) var DefaultSafetySettings = []GeminiSafetySetting{ {Category: "HARM_CATEGORY_HARASSMENT", Threshold: "OFF"}, diff --git a/backend/internal/pkg/antigravity/request_transformer.go b/backend/internal/pkg/antigravity/request_transformer.go index 9b703187..637a4ea8 100644 --- a/backend/internal/pkg/antigravity/request_transformer.go +++ b/backend/internal/pkg/antigravity/request_transformer.go @@ -54,6 +54,9 @@ func DefaultTransformOptions() TransformOptions { } } +// webSearchFallbackModel web_search 请求使用的降级模型 +const webSearchFallbackModel = "gemini-2.5-flash" + // TransformClaudeToGemini 将 Claude 请求转换为 v1internal Gemini 格式 func TransformClaudeToGemini(claudeReq *ClaudeRequest, projectID, mappedModel string) ([]byte, error) { return TransformClaudeToGeminiWithOptions(claudeReq, projectID, mappedModel, DefaultTransformOptions()) @@ -64,12 +67,23 @@ func TransformClaudeToGeminiWithOptions(claudeReq *ClaudeRequest, projectID, map // 用于存储 tool_use id -> name 映射 toolIDToName := make(map[string]string) + // 检测是否有 web_search 工具 + hasWebSearchTool := hasWebSearchTool(claudeReq.Tools) + requestType := "agent" + targetModel := mappedModel + if hasWebSearchTool { + requestType = "web_search" + if targetModel != webSearchFallbackModel { + targetModel = webSearchFallbackModel + } + } + // 检测是否启用 thinking isThinkingEnabled := claudeReq.Thinking != nil && claudeReq.Thinking.Type == "enabled" // 只有 Gemini 模型支持 dummy thought workaround // Claude 模型通过 Vertex/Google API 需要有效的 thought signatures - allowDummyThought := strings.HasPrefix(mappedModel, "gemini-") + allowDummyThought := strings.HasPrefix(targetModel, "gemini-") // 1. 构建 contents contents, strippedThinking, err := buildContents(claudeReq.Messages, toolIDToName, isThinkingEnabled, allowDummyThought) @@ -89,6 +103,11 @@ func TransformClaudeToGeminiWithOptions(claudeReq *ClaudeRequest, projectID, map reqCopy.Thinking = nil reqForConfig = &reqCopy } + if targetModel != "" && targetModel != reqForConfig.Model { + reqCopy := *reqForConfig + reqCopy.Model = targetModel + reqForConfig = &reqCopy + } generationConfig := buildGenerationConfig(reqForConfig) // 4. 构建 tools @@ -127,8 +146,8 @@ func TransformClaudeToGeminiWithOptions(claudeReq *ClaudeRequest, projectID, map Project: projectID, RequestID: "agent-" + uuid.New().String(), UserAgent: "antigravity", // 固定值,与官方客户端一致 - RequestType: "agent", - Model: mappedModel, + RequestType: requestType, + Model: targetModel, Request: innerRequest, } @@ -513,37 +532,43 @@ func buildGenerationConfig(req *ClaudeRequest) *GeminiGenerationConfig { return config } +func hasWebSearchTool(tools []ClaudeTool) bool { + for _, tool := range tools { + if isWebSearchTool(tool) { + return true + } + } + return false +} + +func isWebSearchTool(tool ClaudeTool) bool { + if strings.HasPrefix(tool.Type, "web_search") || tool.Type == "google_search" { + return true + } + + name := strings.TrimSpace(tool.Name) + switch name { + case "web_search", "google_search", "web_search_20250305": + return true + default: + return false + } +} + // buildTools 构建 tools func buildTools(tools []ClaudeTool) []GeminiToolDeclaration { if len(tools) == 0 { return nil } - // 检查是否有 web_search 工具 - hasWebSearch := false - for _, tool := range tools { - if tool.Name == "web_search" { - hasWebSearch = true - break - } - } - - if hasWebSearch { - // Web Search 工具映射 - return []GeminiToolDeclaration{{ - GoogleSearch: &GeminiGoogleSearch{ - EnhancedContent: &GeminiEnhancedContent{ - ImageSearch: &GeminiImageSearch{ - MaxResultCount: 5, - }, - }, - }, - }} - } + hasWebSearch := hasWebSearchTool(tools) // 普通工具 var funcDecls []GeminiFunctionDecl for _, tool := range tools { + if isWebSearchTool(tool) { + continue + } // 跳过无效工具名称 if strings.TrimSpace(tool.Name) == "" { log.Printf("Warning: skipping tool with empty name") @@ -586,7 +611,20 @@ func buildTools(tools []ClaudeTool) []GeminiToolDeclaration { } if len(funcDecls) == 0 { - return nil + if !hasWebSearch { + return nil + } + + // Web Search 工具映射 + return []GeminiToolDeclaration{{ + GoogleSearch: &GeminiGoogleSearch{ + EnhancedContent: &GeminiEnhancedContent{ + ImageSearch: &GeminiImageSearch{ + MaxResultCount: 5, + }, + }, + }, + }} } return []GeminiToolDeclaration{{ diff --git a/backend/internal/pkg/antigravity/response_transformer.go b/backend/internal/pkg/antigravity/response_transformer.go index cd7f5f80..b99e6b3d 100644 --- a/backend/internal/pkg/antigravity/response_transformer.go +++ b/backend/internal/pkg/antigravity/response_transformer.go @@ -3,6 +3,7 @@ package antigravity import ( "encoding/json" "fmt" + "strings" ) // TransformGeminiToClaude 将 Gemini 响应转换为 Claude 格式(非流式) @@ -63,6 +64,12 @@ func (p *NonStreamingProcessor) Process(geminiResp *GeminiResponse, responseID, p.processPart(&part) } + if len(geminiResp.Candidates) > 0 { + if grounding := geminiResp.Candidates[0].GroundingMetadata; grounding != nil { + p.processGrounding(grounding) + } + } + // 刷新剩余内容 p.flushThinking() p.flushText() @@ -190,6 +197,18 @@ func (p *NonStreamingProcessor) processPart(part *GeminiPart) { } } +func (p *NonStreamingProcessor) processGrounding(grounding *GeminiGroundingMetadata) { + groundingText := buildGroundingText(grounding) + if groundingText == "" { + return + } + + p.flushThinking() + p.flushText() + p.textBuilder += groundingText + p.flushText() +} + // flushText 刷新 text builder func (p *NonStreamingProcessor) flushText() { if p.textBuilder == "" { @@ -262,6 +281,44 @@ func (p *NonStreamingProcessor) buildResponse(geminiResp *GeminiResponse, respon } } +func buildGroundingText(grounding *GeminiGroundingMetadata) string { + if grounding == nil { + return "" + } + + var builder strings.Builder + + if len(grounding.WebSearchQueries) > 0 { + builder.WriteString("\n\n---\nWeb search queries: ") + builder.WriteString(strings.Join(grounding.WebSearchQueries, ", ")) + } + + if len(grounding.GroundingChunks) > 0 { + var links []string + for i, chunk := range grounding.GroundingChunks { + if chunk.Web == nil { + continue + } + title := strings.TrimSpace(chunk.Web.Title) + if title == "" { + title = "Source" + } + uri := strings.TrimSpace(chunk.Web.URI) + if uri == "" { + uri = "#" + } + links = append(links, fmt.Sprintf("[%d] [%s](%s)", i+1, title, uri)) + } + + if len(links) > 0 { + builder.WriteString("\n\nSources:\n") + builder.WriteString(strings.Join(links, "\n")) + } + } + + return builder.String() +} + // generateRandomID 生成随机 ID func generateRandomID() string { const chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" diff --git a/backend/internal/pkg/antigravity/stream_transformer.go b/backend/internal/pkg/antigravity/stream_transformer.go index 9fe68a11..da0c6f97 100644 --- a/backend/internal/pkg/antigravity/stream_transformer.go +++ b/backend/internal/pkg/antigravity/stream_transformer.go @@ -27,6 +27,8 @@ type StreamingProcessor struct { pendingSignature string trailingSignature string originalModel string + webSearchQueries []string + groundingChunks []GeminiGroundingChunk // 累计 usage inputTokens int @@ -93,6 +95,10 @@ func (p *StreamingProcessor) ProcessLine(line string) []byte { } } + if len(geminiResp.Candidates) > 0 { + p.captureGrounding(geminiResp.Candidates[0].GroundingMetadata) + } + // 检查是否结束 if len(geminiResp.Candidates) > 0 { finishReason := geminiResp.Candidates[0].FinishReason @@ -200,6 +206,20 @@ func (p *StreamingProcessor) processPart(part *GeminiPart) []byte { return result.Bytes() } +func (p *StreamingProcessor) captureGrounding(grounding *GeminiGroundingMetadata) { + if grounding == nil { + return + } + + if len(grounding.WebSearchQueries) > 0 && len(p.webSearchQueries) == 0 { + p.webSearchQueries = append([]string(nil), grounding.WebSearchQueries...) + } + + if len(grounding.GroundingChunks) > 0 && len(p.groundingChunks) == 0 { + p.groundingChunks = append([]GeminiGroundingChunk(nil), grounding.GroundingChunks...) + } +} + // processThinking 处理 thinking func (p *StreamingProcessor) processThinking(text, signature string) []byte { var result bytes.Buffer @@ -417,6 +437,23 @@ func (p *StreamingProcessor) emitFinish(finishReason string) []byte { p.trailingSignature = "" } + if len(p.webSearchQueries) > 0 || len(p.groundingChunks) > 0 { + groundingText := buildGroundingText(&GeminiGroundingMetadata{ + WebSearchQueries: p.webSearchQueries, + GroundingChunks: p.groundingChunks, + }) + if groundingText != "" { + _, _ = result.Write(p.startBlock(BlockTypeText, map[string]any{ + "type": "text", + "text": "", + })) + _, _ = result.Write(p.emitDelta("text_delta", map[string]any{ + "text": groundingText, + })) + _, _ = result.Write(p.endBlock()) + } + } + // 确定 stop_reason stopReason := "end_turn" if p.usedTool { From c115c9e04896b6f5b72241a818a974438dfdd121 Mon Sep 17 00:00:00 2001 From: song Date: Sun, 18 Jan 2026 01:22:40 +0800 Subject: [PATCH 045/147] fix: address lint errors --- backend/internal/pkg/antigravity/gemini_types.go | 8 ++++---- backend/internal/pkg/antigravity/response_transformer.go | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/backend/internal/pkg/antigravity/gemini_types.go b/backend/internal/pkg/antigravity/gemini_types.go index ad873901..c1cc998c 100644 --- a/backend/internal/pkg/antigravity/gemini_types.go +++ b/backend/internal/pkg/antigravity/gemini_types.go @@ -143,10 +143,10 @@ type GeminiResponse struct { // GeminiCandidate Gemini 候选响应 type GeminiCandidate struct { - Content *GeminiContent `json:"content,omitempty"` - FinishReason string `json:"finishReason,omitempty"` - Index int `json:"index,omitempty"` - GroundingMetadata *GeminiGroundingMetadata `json:"groundingMetadata,omitempty"` + Content *GeminiContent `json:"content,omitempty"` + FinishReason string `json:"finishReason,omitempty"` + Index int `json:"index,omitempty"` + GroundingMetadata *GeminiGroundingMetadata `json:"groundingMetadata,omitempty"` } // GeminiUsageMetadata Gemini 用量元数据 diff --git a/backend/internal/pkg/antigravity/response_transformer.go b/backend/internal/pkg/antigravity/response_transformer.go index b99e6b3d..04424c03 100644 --- a/backend/internal/pkg/antigravity/response_transformer.go +++ b/backend/internal/pkg/antigravity/response_transformer.go @@ -289,8 +289,8 @@ func buildGroundingText(grounding *GeminiGroundingMetadata) string { var builder strings.Builder if len(grounding.WebSearchQueries) > 0 { - builder.WriteString("\n\n---\nWeb search queries: ") - builder.WriteString(strings.Join(grounding.WebSearchQueries, ", ")) + _, _ = builder.WriteString("\n\n---\nWeb search queries: ") + _, _ = builder.WriteString(strings.Join(grounding.WebSearchQueries, ", ")) } if len(grounding.GroundingChunks) > 0 { @@ -311,8 +311,8 @@ func buildGroundingText(grounding *GeminiGroundingMetadata) string { } if len(links) > 0 { - builder.WriteString("\n\nSources:\n") - builder.WriteString(strings.Join(links, "\n")) + _, _ = builder.WriteString("\n\nSources:\n") + _, _ = builder.WriteString(strings.Join(links, "\n")) } } From ef5a41057fa7127aba012f5bbdd044ea11dc0b05 Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Sun, 18 Jan 2026 10:52:18 +0800 Subject: [PATCH 046/147] =?UTF-8?q?feat(usage):=20=E6=B7=BB=E5=8A=A0?= =?UTF-8?q?=E6=B8=85=E7=90=86=E4=BB=BB=E5=8A=A1=E4=B8=8E=E7=BB=9F=E8=AE=A1?= =?UTF-8?q?=E8=BF=87=E6=BB=A4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/cmd/server/wire.go | 7 + backend/cmd/server/wire_gen.go | 13 +- backend/go.mod | 1 + backend/go.sum | 1 + backend/internal/config/config.go | 49 ++ backend/internal/config/config_test.go | 570 ++++++++++++++++++ .../admin/admin_basic_handlers_test.go | 262 ++++++++ .../handler/admin/admin_helpers_test.go | 134 ++++ .../handler/admin/admin_service_stub_test.go | 290 +++++++++ .../handler/admin/dashboard_handler.go | 28 +- .../admin/usage_cleanup_handler_test.go | 377 ++++++++++++ .../internal/handler/admin/usage_handler.go | 192 +++++- backend/internal/handler/dto/mappers.go | 30 + backend/internal/handler/dto/types.go | 27 + .../repository/dashboard_aggregation_repo.go | 69 +++ .../internal/repository/usage_cleanup_repo.go | 363 +++++++++++ .../repository/usage_cleanup_repo_test.go | 440 ++++++++++++++ backend/internal/repository/usage_log_repo.go | 14 +- .../usage_log_repo_integration_test.go | 14 +- backend/internal/repository/wire.go | 1 + backend/internal/server/api_contract_test.go | 4 +- backend/internal/server/routes/admin.go | 3 + .../internal/service/account_usage_service.go | 8 +- .../service/dashboard_aggregation_service.go | 59 +- .../dashboard_aggregation_service_test.go | 4 + backend/internal/service/dashboard_service.go | 8 +- .../service/dashboard_service_test.go | 4 + backend/internal/service/ratelimit_service.go | 4 +- backend/internal/service/usage_cleanup.go | 74 +++ .../internal/service/usage_cleanup_service.go | 400 ++++++++++++ .../service/usage_cleanup_service_test.go | 420 +++++++++++++ backend/internal/service/wire.go | 8 + .../042_add_usage_cleanup_tasks.sql | 21 + .../043_add_usage_cleanup_cancel_audit.sql | 10 + config.yaml | 21 + deploy/config.example.yaml | 21 + frontend/src/api/admin/dashboard.ts | 2 + frontend/src/api/admin/usage.ts | 82 ++- .../admin/usage/UsageCleanupDialog.vue | 339 +++++++++++ .../components/admin/usage/UsageFilters.vue | 25 +- frontend/src/i18n/locales/en.ts | 38 +- frontend/src/i18n/locales/zh.ts | 38 +- frontend/src/types/index.ts | 29 + frontend/src/views/admin/UsageView.vue | 20 +- 44 files changed, 4478 insertions(+), 46 deletions(-) create mode 100644 backend/internal/handler/admin/admin_basic_handlers_test.go create mode 100644 backend/internal/handler/admin/admin_helpers_test.go create mode 100644 backend/internal/handler/admin/admin_service_stub_test.go create mode 100644 backend/internal/handler/admin/usage_cleanup_handler_test.go create mode 100644 backend/internal/repository/usage_cleanup_repo.go create mode 100644 backend/internal/repository/usage_cleanup_repo_test.go create mode 100644 backend/internal/service/usage_cleanup.go create mode 100644 backend/internal/service/usage_cleanup_service.go create mode 100644 backend/internal/service/usage_cleanup_service_test.go create mode 100644 backend/migrations/042_add_usage_cleanup_tasks.sql create mode 100644 backend/migrations/043_add_usage_cleanup_cancel_audit.sql create mode 100644 frontend/src/components/admin/usage/UsageCleanupDialog.vue diff --git a/backend/cmd/server/wire.go b/backend/cmd/server/wire.go index 0a5f9744..5ef04a66 100644 --- a/backend/cmd/server/wire.go +++ b/backend/cmd/server/wire.go @@ -70,6 +70,7 @@ func provideCleanup( schedulerSnapshot *service.SchedulerSnapshotService, tokenRefresh *service.TokenRefreshService, accountExpiry *service.AccountExpiryService, + usageCleanup *service.UsageCleanupService, pricing *service.PricingService, emailQueue *service.EmailQueueService, billingCache *service.BillingCacheService, @@ -123,6 +124,12 @@ func provideCleanup( } return nil }}, + {"UsageCleanupService", func() error { + if usageCleanup != nil { + usageCleanup.Stop() + } + return nil + }}, {"TokenRefreshService", func() error { tokenRefresh.Stop() return nil diff --git a/backend/cmd/server/wire_gen.go b/backend/cmd/server/wire_gen.go index 27404b02..509cf13a 100644 --- a/backend/cmd/server/wire_gen.go +++ b/backend/cmd/server/wire_gen.go @@ -153,7 +153,9 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { updateService := service.ProvideUpdateService(updateCache, gitHubReleaseClient, serviceBuildInfo) systemHandler := handler.ProvideSystemHandler(updateService) adminSubscriptionHandler := admin.NewSubscriptionHandler(subscriptionService) - adminUsageHandler := admin.NewUsageHandler(usageService, apiKeyService, adminService) + usageCleanupRepository := repository.NewUsageCleanupRepository(db) + usageCleanupService := service.ProvideUsageCleanupService(usageCleanupRepository, timingWheelService, dashboardAggregationService, configConfig) + adminUsageHandler := admin.NewUsageHandler(usageService, apiKeyService, adminService, usageCleanupService) userAttributeDefinitionRepository := repository.NewUserAttributeDefinitionRepository(client) userAttributeValueRepository := repository.NewUserAttributeValueRepository(client) userAttributeService := service.NewUserAttributeService(userAttributeDefinitionRepository, userAttributeValueRepository) @@ -175,7 +177,7 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { opsScheduledReportService := service.ProvideOpsScheduledReportService(opsService, userService, emailService, redisClient, configConfig) tokenRefreshService := service.ProvideTokenRefreshService(accountRepository, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, compositeTokenCacheInvalidator, configConfig) accountExpiryService := service.ProvideAccountExpiryService(accountRepository) - v := provideCleanup(client, redisClient, opsMetricsCollector, opsAggregationService, opsAlertEvaluatorService, opsCleanupService, opsScheduledReportService, schedulerSnapshotService, tokenRefreshService, accountExpiryService, pricingService, emailQueueService, billingCacheService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService) + v := provideCleanup(client, redisClient, opsMetricsCollector, opsAggregationService, opsAlertEvaluatorService, opsCleanupService, opsScheduledReportService, schedulerSnapshotService, tokenRefreshService, accountExpiryService, usageCleanupService, pricingService, emailQueueService, billingCacheService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService) application := &Application{ Server: httpServer, Cleanup: v, @@ -208,6 +210,7 @@ func provideCleanup( schedulerSnapshot *service.SchedulerSnapshotService, tokenRefresh *service.TokenRefreshService, accountExpiry *service.AccountExpiryService, + usageCleanup *service.UsageCleanupService, pricing *service.PricingService, emailQueue *service.EmailQueueService, billingCache *service.BillingCacheService, @@ -260,6 +263,12 @@ func provideCleanup( } return nil }}, + {"UsageCleanupService", func() error { + if usageCleanup != nil { + usageCleanup.Stop() + } + return nil + }}, {"TokenRefreshService", func() error { tokenRefresh.Stop() return nil diff --git a/backend/go.mod b/backend/go.mod index 4ac6ba14..9ebae69e 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -31,6 +31,7 @@ require ( ariga.io/atlas v0.32.1-0.20250325101103-175b25e1c1b9 // indirect dario.cat/mergo v1.0.2 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/DATA-DOG/go-sqlmock v1.5.2 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/agext/levenshtein v1.2.3 // indirect github.com/andybalholm/brotli v1.2.0 // indirect diff --git a/backend/go.sum b/backend/go.sum index 415e73a7..4496603d 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -141,6 +141,7 @@ github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk= github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index 5dc6ad19..d616e44b 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -55,6 +55,7 @@ type Config struct { APIKeyAuth APIKeyAuthCacheConfig `mapstructure:"api_key_auth_cache"` Dashboard DashboardCacheConfig `mapstructure:"dashboard_cache"` DashboardAgg DashboardAggregationConfig `mapstructure:"dashboard_aggregation"` + UsageCleanup UsageCleanupConfig `mapstructure:"usage_cleanup"` Concurrency ConcurrencyConfig `mapstructure:"concurrency"` TokenRefresh TokenRefreshConfig `mapstructure:"token_refresh"` RunMode string `mapstructure:"run_mode" yaml:"run_mode"` @@ -489,6 +490,20 @@ type DashboardAggregationRetentionConfig struct { DailyDays int `mapstructure:"daily_days"` } +// UsageCleanupConfig 使用记录清理任务配置 +type UsageCleanupConfig struct { + // Enabled: 是否启用清理任务执行器 + Enabled bool `mapstructure:"enabled"` + // MaxRangeDays: 单次任务允许的最大时间跨度(天) + MaxRangeDays int `mapstructure:"max_range_days"` + // BatchSize: 单批删除数量 + BatchSize int `mapstructure:"batch_size"` + // WorkerIntervalSeconds: 后台任务轮询间隔(秒) + WorkerIntervalSeconds int `mapstructure:"worker_interval_seconds"` + // TaskTimeoutSeconds: 单次任务最大执行时长(秒) + TaskTimeoutSeconds int `mapstructure:"task_timeout_seconds"` +} + func NormalizeRunMode(value string) string { normalized := strings.ToLower(strings.TrimSpace(value)) switch normalized { @@ -749,6 +764,13 @@ func setDefaults() { viper.SetDefault("dashboard_aggregation.retention.daily_days", 730) viper.SetDefault("dashboard_aggregation.recompute_days", 2) + // Usage cleanup task + viper.SetDefault("usage_cleanup.enabled", true) + viper.SetDefault("usage_cleanup.max_range_days", 31) + viper.SetDefault("usage_cleanup.batch_size", 5000) + viper.SetDefault("usage_cleanup.worker_interval_seconds", 10) + viper.SetDefault("usage_cleanup.task_timeout_seconds", 1800) + // Gateway viper.SetDefault("gateway.response_header_timeout", 600) // 600秒(10分钟)等待上游响应头,LLM高负载时可能排队较久 viper.SetDefault("gateway.log_upstream_error_body", true) @@ -985,6 +1007,33 @@ func (c *Config) Validate() error { return fmt.Errorf("dashboard_aggregation.recompute_days must be non-negative") } } + if c.UsageCleanup.Enabled { + if c.UsageCleanup.MaxRangeDays <= 0 { + return fmt.Errorf("usage_cleanup.max_range_days must be positive") + } + if c.UsageCleanup.BatchSize <= 0 { + return fmt.Errorf("usage_cleanup.batch_size must be positive") + } + if c.UsageCleanup.WorkerIntervalSeconds <= 0 { + return fmt.Errorf("usage_cleanup.worker_interval_seconds must be positive") + } + if c.UsageCleanup.TaskTimeoutSeconds <= 0 { + return fmt.Errorf("usage_cleanup.task_timeout_seconds must be positive") + } + } else { + if c.UsageCleanup.MaxRangeDays < 0 { + return fmt.Errorf("usage_cleanup.max_range_days must be non-negative") + } + if c.UsageCleanup.BatchSize < 0 { + return fmt.Errorf("usage_cleanup.batch_size must be non-negative") + } + if c.UsageCleanup.WorkerIntervalSeconds < 0 { + return fmt.Errorf("usage_cleanup.worker_interval_seconds must be non-negative") + } + if c.UsageCleanup.TaskTimeoutSeconds < 0 { + return fmt.Errorf("usage_cleanup.task_timeout_seconds must be non-negative") + } + } if c.Gateway.MaxBodySize <= 0 { return fmt.Errorf("gateway.max_body_size must be positive") } diff --git a/backend/internal/config/config_test.go b/backend/internal/config/config_test.go index 4637989e..f734619f 100644 --- a/backend/internal/config/config_test.go +++ b/backend/internal/config/config_test.go @@ -280,3 +280,573 @@ func TestValidateDashboardAggregationBackfillMaxDays(t *testing.T) { t.Fatalf("Validate() expected backfill_max_days error, got: %v", err) } } + +func TestLoadDefaultUsageCleanupConfig(t *testing.T) { + viper.Reset() + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + + if !cfg.UsageCleanup.Enabled { + t.Fatalf("UsageCleanup.Enabled = false, want true") + } + if cfg.UsageCleanup.MaxRangeDays != 31 { + t.Fatalf("UsageCleanup.MaxRangeDays = %d, want 31", cfg.UsageCleanup.MaxRangeDays) + } + if cfg.UsageCleanup.BatchSize != 5000 { + t.Fatalf("UsageCleanup.BatchSize = %d, want 5000", cfg.UsageCleanup.BatchSize) + } + if cfg.UsageCleanup.WorkerIntervalSeconds != 10 { + t.Fatalf("UsageCleanup.WorkerIntervalSeconds = %d, want 10", cfg.UsageCleanup.WorkerIntervalSeconds) + } + if cfg.UsageCleanup.TaskTimeoutSeconds != 1800 { + t.Fatalf("UsageCleanup.TaskTimeoutSeconds = %d, want 1800", cfg.UsageCleanup.TaskTimeoutSeconds) + } +} + +func TestValidateUsageCleanupConfigEnabled(t *testing.T) { + viper.Reset() + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + + cfg.UsageCleanup.Enabled = true + cfg.UsageCleanup.MaxRangeDays = 0 + err = cfg.Validate() + if err == nil { + t.Fatalf("Validate() expected error for usage_cleanup.max_range_days, got nil") + } + if !strings.Contains(err.Error(), "usage_cleanup.max_range_days") { + t.Fatalf("Validate() expected max_range_days error, got: %v", err) + } +} + +func TestValidateUsageCleanupConfigDisabled(t *testing.T) { + viper.Reset() + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + + cfg.UsageCleanup.Enabled = false + cfg.UsageCleanup.BatchSize = -1 + err = cfg.Validate() + if err == nil { + t.Fatalf("Validate() expected error for usage_cleanup.batch_size, got nil") + } + if !strings.Contains(err.Error(), "usage_cleanup.batch_size") { + t.Fatalf("Validate() expected batch_size error, got: %v", err) + } +} + +func TestConfigAddressHelpers(t *testing.T) { + server := ServerConfig{Host: "127.0.0.1", Port: 9000} + if server.Address() != "127.0.0.1:9000" { + t.Fatalf("ServerConfig.Address() = %q", server.Address()) + } + + dbCfg := DatabaseConfig{ + Host: "localhost", + Port: 5432, + User: "postgres", + Password: "", + DBName: "sub2api", + SSLMode: "disable", + } + if !strings.Contains(dbCfg.DSN(), "password=") { + } else { + t.Fatalf("DatabaseConfig.DSN() should not include password when empty") + } + + dbCfg.Password = "secret" + if !strings.Contains(dbCfg.DSN(), "password=secret") { + t.Fatalf("DatabaseConfig.DSN() missing password") + } + + dbCfg.Password = "" + if strings.Contains(dbCfg.DSNWithTimezone("UTC"), "password=") { + t.Fatalf("DatabaseConfig.DSNWithTimezone() should omit password when empty") + } + + if !strings.Contains(dbCfg.DSNWithTimezone(""), "TimeZone=Asia/Shanghai") { + t.Fatalf("DatabaseConfig.DSNWithTimezone() should use default timezone") + } + if !strings.Contains(dbCfg.DSNWithTimezone("UTC"), "TimeZone=UTC") { + t.Fatalf("DatabaseConfig.DSNWithTimezone() should use provided timezone") + } + + redis := RedisConfig{Host: "redis", Port: 6379} + if redis.Address() != "redis:6379" { + t.Fatalf("RedisConfig.Address() = %q", redis.Address()) + } +} + +func TestNormalizeStringSlice(t *testing.T) { + values := normalizeStringSlice([]string{" a ", "", "b", " ", "c"}) + if len(values) != 3 || values[0] != "a" || values[1] != "b" || values[2] != "c" { + t.Fatalf("normalizeStringSlice() unexpected result: %#v", values) + } + if normalizeStringSlice(nil) != nil { + t.Fatalf("normalizeStringSlice(nil) expected nil slice") + } +} + +func TestGetServerAddressFromEnv(t *testing.T) { + t.Setenv("SERVER_HOST", "127.0.0.1") + t.Setenv("SERVER_PORT", "9090") + + address := GetServerAddress() + if address != "127.0.0.1:9090" { + t.Fatalf("GetServerAddress() = %q", address) + } +} + +func TestValidateAbsoluteHTTPURL(t *testing.T) { + if err := ValidateAbsoluteHTTPURL("https://example.com/path"); err != nil { + t.Fatalf("ValidateAbsoluteHTTPURL valid url error: %v", err) + } + if err := ValidateAbsoluteHTTPURL(""); err == nil { + t.Fatalf("ValidateAbsoluteHTTPURL should reject empty url") + } + if err := ValidateAbsoluteHTTPURL("/relative"); err == nil { + t.Fatalf("ValidateAbsoluteHTTPURL should reject relative url") + } + if err := ValidateAbsoluteHTTPURL("ftp://example.com"); err == nil { + t.Fatalf("ValidateAbsoluteHTTPURL should reject ftp scheme") + } + if err := ValidateAbsoluteHTTPURL("https://example.com/#frag"); err == nil { + t.Fatalf("ValidateAbsoluteHTTPURL should reject fragment") + } +} + +func TestValidateFrontendRedirectURL(t *testing.T) { + if err := ValidateFrontendRedirectURL("/auth/callback"); err != nil { + t.Fatalf("ValidateFrontendRedirectURL relative error: %v", err) + } + if err := ValidateFrontendRedirectURL("https://example.com/auth"); err != nil { + t.Fatalf("ValidateFrontendRedirectURL absolute error: %v", err) + } + if err := ValidateFrontendRedirectURL("example.com/path"); err == nil { + t.Fatalf("ValidateFrontendRedirectURL should reject non-absolute url") + } + if err := ValidateFrontendRedirectURL("//evil.com"); err == nil { + t.Fatalf("ValidateFrontendRedirectURL should reject // prefix") + } + if err := ValidateFrontendRedirectURL("javascript:alert(1)"); err == nil { + t.Fatalf("ValidateFrontendRedirectURL should reject javascript scheme") + } +} + +func TestWarnIfInsecureURL(t *testing.T) { + warnIfInsecureURL("test", "http://example.com") + warnIfInsecureURL("test", "bad://url") +} + +func TestGenerateJWTSecretDefaultLength(t *testing.T) { + secret, err := generateJWTSecret(0) + if err != nil { + t.Fatalf("generateJWTSecret error: %v", err) + } + if len(secret) == 0 { + t.Fatalf("generateJWTSecret returned empty string") + } +} + +func TestValidateOpsCleanupScheduleRequired(t *testing.T) { + viper.Reset() + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + cfg.Ops.Cleanup.Enabled = true + cfg.Ops.Cleanup.Schedule = "" + err = cfg.Validate() + if err == nil { + t.Fatalf("Validate() expected error for ops.cleanup.schedule") + } + if !strings.Contains(err.Error(), "ops.cleanup.schedule") { + t.Fatalf("Validate() expected ops.cleanup.schedule error, got: %v", err) + } +} + +func TestValidateConcurrencyPingInterval(t *testing.T) { + viper.Reset() + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + cfg.Concurrency.PingInterval = 3 + err = cfg.Validate() + if err == nil { + t.Fatalf("Validate() expected error for concurrency.ping_interval") + } + if !strings.Contains(err.Error(), "concurrency.ping_interval") { + t.Fatalf("Validate() expected concurrency.ping_interval error, got: %v", err) + } +} + +func TestProvideConfig(t *testing.T) { + viper.Reset() + if _, err := ProvideConfig(); err != nil { + t.Fatalf("ProvideConfig() error: %v", err) + } +} + +func TestValidateConfigWithLinuxDoEnabled(t *testing.T) { + viper.Reset() + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + + cfg.Security.CSP.Enabled = true + cfg.Security.CSP.Policy = "default-src 'self'" + + cfg.LinuxDo.Enabled = true + cfg.LinuxDo.ClientID = "client" + cfg.LinuxDo.ClientSecret = "secret" + cfg.LinuxDo.AuthorizeURL = "https://example.com/oauth2/authorize" + cfg.LinuxDo.TokenURL = "https://example.com/oauth2/token" + cfg.LinuxDo.UserInfoURL = "https://example.com/oauth2/userinfo" + cfg.LinuxDo.RedirectURL = "https://example.com/api/v1/auth/oauth/linuxdo/callback" + cfg.LinuxDo.FrontendRedirectURL = "/auth/linuxdo/callback" + cfg.LinuxDo.TokenAuthMethod = "client_secret_post" + + if err := cfg.Validate(); err != nil { + t.Fatalf("Validate() unexpected error: %v", err) + } +} + +func TestValidateJWTSecretStrength(t *testing.T) { + if !isWeakJWTSecret("change-me-in-production") { + t.Fatalf("isWeakJWTSecret should detect weak secret") + } + if isWeakJWTSecret("StrongSecretValue") { + t.Fatalf("isWeakJWTSecret should accept strong secret") + } +} + +func TestGenerateJWTSecretWithLength(t *testing.T) { + secret, err := generateJWTSecret(16) + if err != nil { + t.Fatalf("generateJWTSecret error: %v", err) + } + if len(secret) == 0 { + t.Fatalf("generateJWTSecret returned empty string") + } +} + +func TestValidateAbsoluteHTTPURLMissingHost(t *testing.T) { + if err := ValidateAbsoluteHTTPURL("https://"); err == nil { + t.Fatalf("ValidateAbsoluteHTTPURL should reject missing host") + } +} + +func TestValidateFrontendRedirectURLInvalidChars(t *testing.T) { + if err := ValidateFrontendRedirectURL("/auth/\ncallback"); err == nil { + t.Fatalf("ValidateFrontendRedirectURL should reject invalid chars") + } + if err := ValidateFrontendRedirectURL("http://"); err == nil { + t.Fatalf("ValidateFrontendRedirectURL should reject missing host") + } + if err := ValidateFrontendRedirectURL("mailto:user@example.com"); err == nil { + t.Fatalf("ValidateFrontendRedirectURL should reject mailto") + } +} + +func TestWarnIfInsecureURLHTTPS(t *testing.T) { + warnIfInsecureURL("secure", "https://example.com") +} + +func TestValidateConfigErrors(t *testing.T) { + buildValid := func(t *testing.T) *Config { + t.Helper() + viper.Reset() + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + return cfg + } + + cases := []struct { + name string + mutate func(*Config) + wantErr string + }{ + { + name: "jwt expire hour positive", + mutate: func(c *Config) { c.JWT.ExpireHour = 0 }, + wantErr: "jwt.expire_hour must be positive", + }, + { + name: "jwt expire hour max", + mutate: func(c *Config) { c.JWT.ExpireHour = 200 }, + wantErr: "jwt.expire_hour must be <= 168", + }, + { + name: "csp policy required", + mutate: func(c *Config) { c.Security.CSP.Enabled = true; c.Security.CSP.Policy = "" }, + wantErr: "security.csp.policy", + }, + { + name: "linuxdo client id required", + mutate: func(c *Config) { + c.LinuxDo.Enabled = true + c.LinuxDo.ClientID = "" + }, + wantErr: "linuxdo_connect.client_id", + }, + { + name: "linuxdo token auth method", + mutate: func(c *Config) { + c.LinuxDo.Enabled = true + c.LinuxDo.ClientID = "client" + c.LinuxDo.ClientSecret = "secret" + c.LinuxDo.AuthorizeURL = "https://example.com/authorize" + c.LinuxDo.TokenURL = "https://example.com/token" + c.LinuxDo.UserInfoURL = "https://example.com/userinfo" + c.LinuxDo.RedirectURL = "https://example.com/callback" + c.LinuxDo.FrontendRedirectURL = "/auth/callback" + c.LinuxDo.TokenAuthMethod = "invalid" + }, + wantErr: "linuxdo_connect.token_auth_method", + }, + { + name: "billing circuit breaker threshold", + mutate: func(c *Config) { c.Billing.CircuitBreaker.FailureThreshold = 0 }, + wantErr: "billing.circuit_breaker.failure_threshold", + }, + { + name: "billing circuit breaker reset", + mutate: func(c *Config) { c.Billing.CircuitBreaker.ResetTimeoutSeconds = 0 }, + wantErr: "billing.circuit_breaker.reset_timeout_seconds", + }, + { + name: "billing circuit breaker half open", + mutate: func(c *Config) { c.Billing.CircuitBreaker.HalfOpenRequests = 0 }, + wantErr: "billing.circuit_breaker.half_open_requests", + }, + { + name: "database max open conns", + mutate: func(c *Config) { c.Database.MaxOpenConns = 0 }, + wantErr: "database.max_open_conns", + }, + { + name: "database max lifetime", + mutate: func(c *Config) { c.Database.ConnMaxLifetimeMinutes = -1 }, + wantErr: "database.conn_max_lifetime_minutes", + }, + { + name: "database idle exceeds open", + mutate: func(c *Config) { c.Database.MaxIdleConns = c.Database.MaxOpenConns + 1 }, + wantErr: "database.max_idle_conns cannot exceed", + }, + { + name: "redis dial timeout", + mutate: func(c *Config) { c.Redis.DialTimeoutSeconds = 0 }, + wantErr: "redis.dial_timeout_seconds", + }, + { + name: "redis read timeout", + mutate: func(c *Config) { c.Redis.ReadTimeoutSeconds = 0 }, + wantErr: "redis.read_timeout_seconds", + }, + { + name: "redis write timeout", + mutate: func(c *Config) { c.Redis.WriteTimeoutSeconds = 0 }, + wantErr: "redis.write_timeout_seconds", + }, + { + name: "redis pool size", + mutate: func(c *Config) { c.Redis.PoolSize = 0 }, + wantErr: "redis.pool_size", + }, + { + name: "redis idle exceeds pool", + mutate: func(c *Config) { c.Redis.MinIdleConns = c.Redis.PoolSize + 1 }, + wantErr: "redis.min_idle_conns cannot exceed", + }, + { + name: "dashboard cache disabled negative", + mutate: func(c *Config) { c.Dashboard.Enabled = false; c.Dashboard.StatsTTLSeconds = -1 }, + wantErr: "dashboard_cache.stats_ttl_seconds", + }, + { + name: "dashboard cache fresh ttl positive", + mutate: func(c *Config) { c.Dashboard.Enabled = true; c.Dashboard.StatsFreshTTLSeconds = 0 }, + wantErr: "dashboard_cache.stats_fresh_ttl_seconds", + }, + { + name: "dashboard aggregation enabled interval", + mutate: func(c *Config) { c.DashboardAgg.Enabled = true; c.DashboardAgg.IntervalSeconds = 0 }, + wantErr: "dashboard_aggregation.interval_seconds", + }, + { + name: "dashboard aggregation backfill positive", + mutate: func(c *Config) { + c.DashboardAgg.Enabled = true + c.DashboardAgg.BackfillEnabled = true + c.DashboardAgg.BackfillMaxDays = 0 + }, + wantErr: "dashboard_aggregation.backfill_max_days", + }, + { + name: "dashboard aggregation retention", + mutate: func(c *Config) { c.DashboardAgg.Enabled = true; c.DashboardAgg.Retention.UsageLogsDays = 0 }, + wantErr: "dashboard_aggregation.retention.usage_logs_days", + }, + { + name: "dashboard aggregation disabled interval", + mutate: func(c *Config) { c.DashboardAgg.Enabled = false; c.DashboardAgg.IntervalSeconds = -1 }, + wantErr: "dashboard_aggregation.interval_seconds", + }, + { + name: "usage cleanup max range", + mutate: func(c *Config) { c.UsageCleanup.Enabled = true; c.UsageCleanup.MaxRangeDays = 0 }, + wantErr: "usage_cleanup.max_range_days", + }, + { + name: "usage cleanup worker interval", + mutate: func(c *Config) { c.UsageCleanup.Enabled = true; c.UsageCleanup.WorkerIntervalSeconds = 0 }, + wantErr: "usage_cleanup.worker_interval_seconds", + }, + { + name: "usage cleanup batch size", + mutate: func(c *Config) { c.UsageCleanup.Enabled = true; c.UsageCleanup.BatchSize = 0 }, + wantErr: "usage_cleanup.batch_size", + }, + { + name: "usage cleanup disabled negative", + mutate: func(c *Config) { c.UsageCleanup.Enabled = false; c.UsageCleanup.BatchSize = -1 }, + wantErr: "usage_cleanup.batch_size", + }, + { + name: "gateway max body size", + mutate: func(c *Config) { c.Gateway.MaxBodySize = 0 }, + wantErr: "gateway.max_body_size", + }, + { + name: "gateway max idle conns", + mutate: func(c *Config) { c.Gateway.MaxIdleConns = 0 }, + wantErr: "gateway.max_idle_conns", + }, + { + name: "gateway max idle conns per host", + mutate: func(c *Config) { c.Gateway.MaxIdleConnsPerHost = 0 }, + wantErr: "gateway.max_idle_conns_per_host", + }, + { + name: "gateway idle timeout", + mutate: func(c *Config) { c.Gateway.IdleConnTimeoutSeconds = 0 }, + wantErr: "gateway.idle_conn_timeout_seconds", + }, + { + name: "gateway max upstream clients", + mutate: func(c *Config) { c.Gateway.MaxUpstreamClients = 0 }, + wantErr: "gateway.max_upstream_clients", + }, + { + name: "gateway client idle ttl", + mutate: func(c *Config) { c.Gateway.ClientIdleTTLSeconds = 0 }, + wantErr: "gateway.client_idle_ttl_seconds", + }, + { + name: "gateway concurrency slot ttl", + mutate: func(c *Config) { c.Gateway.ConcurrencySlotTTLMinutes = 0 }, + wantErr: "gateway.concurrency_slot_ttl_minutes", + }, + { + name: "gateway max conns per host", + mutate: func(c *Config) { c.Gateway.MaxConnsPerHost = -1 }, + wantErr: "gateway.max_conns_per_host", + }, + { + name: "gateway connection isolation", + mutate: func(c *Config) { c.Gateway.ConnectionPoolIsolation = "invalid" }, + wantErr: "gateway.connection_pool_isolation", + }, + { + name: "gateway stream keepalive range", + mutate: func(c *Config) { c.Gateway.StreamKeepaliveInterval = 4 }, + wantErr: "gateway.stream_keepalive_interval", + }, + { + name: "gateway stream data interval range", + mutate: func(c *Config) { c.Gateway.StreamDataIntervalTimeout = 5 }, + wantErr: "gateway.stream_data_interval_timeout", + }, + { + name: "gateway stream data interval negative", + mutate: func(c *Config) { c.Gateway.StreamDataIntervalTimeout = -1 }, + wantErr: "gateway.stream_data_interval_timeout must be non-negative", + }, + { + name: "gateway max line size", + mutate: func(c *Config) { c.Gateway.MaxLineSize = 1024 }, + wantErr: "gateway.max_line_size must be at least", + }, + { + name: "gateway max line size negative", + mutate: func(c *Config) { c.Gateway.MaxLineSize = -1 }, + wantErr: "gateway.max_line_size must be non-negative", + }, + { + name: "gateway scheduling sticky waiting", + mutate: func(c *Config) { c.Gateway.Scheduling.StickySessionMaxWaiting = 0 }, + wantErr: "gateway.scheduling.sticky_session_max_waiting", + }, + { + name: "gateway scheduling outbox poll", + mutate: func(c *Config) { c.Gateway.Scheduling.OutboxPollIntervalSeconds = 0 }, + wantErr: "gateway.scheduling.outbox_poll_interval_seconds", + }, + { + name: "gateway scheduling outbox failures", + mutate: func(c *Config) { c.Gateway.Scheduling.OutboxLagRebuildFailures = 0 }, + wantErr: "gateway.scheduling.outbox_lag_rebuild_failures", + }, + { + name: "gateway outbox lag rebuild", + mutate: func(c *Config) { + c.Gateway.Scheduling.OutboxLagWarnSeconds = 10 + c.Gateway.Scheduling.OutboxLagRebuildSeconds = 5 + }, + wantErr: "gateway.scheduling.outbox_lag_rebuild_seconds", + }, + { + name: "ops metrics collector ttl", + mutate: func(c *Config) { c.Ops.MetricsCollectorCache.TTL = -1 }, + wantErr: "ops.metrics_collector_cache.ttl", + }, + { + name: "ops cleanup retention", + mutate: func(c *Config) { c.Ops.Cleanup.ErrorLogRetentionDays = -1 }, + wantErr: "ops.cleanup.error_log_retention_days", + }, + { + name: "ops cleanup minute retention", + mutate: func(c *Config) { c.Ops.Cleanup.MinuteMetricsRetentionDays = -1 }, + wantErr: "ops.cleanup.minute_metrics_retention_days", + }, + } + + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + cfg := buildValid(t) + tt.mutate(cfg) + err := cfg.Validate() + if err == nil || !strings.Contains(err.Error(), tt.wantErr) { + t.Fatalf("Validate() error = %v, want %q", err, tt.wantErr) + } + }) + } +} diff --git a/backend/internal/handler/admin/admin_basic_handlers_test.go b/backend/internal/handler/admin/admin_basic_handlers_test.go new file mode 100644 index 00000000..e0f731e1 --- /dev/null +++ b/backend/internal/handler/admin/admin_basic_handlers_test.go @@ -0,0 +1,262 @@ +package admin + +import ( + "bytes" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/require" +) + +func setupAdminRouter() (*gin.Engine, *stubAdminService) { + gin.SetMode(gin.TestMode) + router := gin.New() + adminSvc := newStubAdminService() + + userHandler := NewUserHandler(adminSvc) + groupHandler := NewGroupHandler(adminSvc) + proxyHandler := NewProxyHandler(adminSvc) + redeemHandler := NewRedeemHandler(adminSvc) + + router.GET("/api/v1/admin/users", userHandler.List) + router.GET("/api/v1/admin/users/:id", userHandler.GetByID) + router.POST("/api/v1/admin/users", userHandler.Create) + router.PUT("/api/v1/admin/users/:id", userHandler.Update) + router.DELETE("/api/v1/admin/users/:id", userHandler.Delete) + router.POST("/api/v1/admin/users/:id/balance", userHandler.UpdateBalance) + router.GET("/api/v1/admin/users/:id/api-keys", userHandler.GetUserAPIKeys) + router.GET("/api/v1/admin/users/:id/usage", userHandler.GetUserUsage) + + router.GET("/api/v1/admin/groups", groupHandler.List) + router.GET("/api/v1/admin/groups/all", groupHandler.GetAll) + router.GET("/api/v1/admin/groups/:id", groupHandler.GetByID) + router.POST("/api/v1/admin/groups", groupHandler.Create) + router.PUT("/api/v1/admin/groups/:id", groupHandler.Update) + router.DELETE("/api/v1/admin/groups/:id", groupHandler.Delete) + router.GET("/api/v1/admin/groups/:id/stats", groupHandler.GetStats) + router.GET("/api/v1/admin/groups/:id/api-keys", groupHandler.GetGroupAPIKeys) + + router.GET("/api/v1/admin/proxies", proxyHandler.List) + router.GET("/api/v1/admin/proxies/all", proxyHandler.GetAll) + router.GET("/api/v1/admin/proxies/:id", proxyHandler.GetByID) + router.POST("/api/v1/admin/proxies", proxyHandler.Create) + router.PUT("/api/v1/admin/proxies/:id", proxyHandler.Update) + router.DELETE("/api/v1/admin/proxies/:id", proxyHandler.Delete) + router.POST("/api/v1/admin/proxies/batch-delete", proxyHandler.BatchDelete) + router.POST("/api/v1/admin/proxies/:id/test", proxyHandler.Test) + router.GET("/api/v1/admin/proxies/:id/stats", proxyHandler.GetStats) + router.GET("/api/v1/admin/proxies/:id/accounts", proxyHandler.GetProxyAccounts) + + router.GET("/api/v1/admin/redeem-codes", redeemHandler.List) + router.GET("/api/v1/admin/redeem-codes/:id", redeemHandler.GetByID) + router.POST("/api/v1/admin/redeem-codes", redeemHandler.Generate) + router.DELETE("/api/v1/admin/redeem-codes/:id", redeemHandler.Delete) + router.POST("/api/v1/admin/redeem-codes/batch-delete", redeemHandler.BatchDelete) + router.POST("/api/v1/admin/redeem-codes/:id/expire", redeemHandler.Expire) + router.GET("/api/v1/admin/redeem-codes/:id/stats", redeemHandler.GetStats) + + return router, adminSvc +} + +func TestUserHandlerEndpoints(t *testing.T) { + router, _ := setupAdminRouter() + + rec := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/api/v1/admin/users?page=1&page_size=20", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodGet, "/api/v1/admin/users/1", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + createBody := map[string]any{"email": "new@example.com", "password": "pass123", "balance": 1, "concurrency": 2} + body, _ := json.Marshal(createBody) + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodPost, "/api/v1/admin/users", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + updateBody := map[string]any{"email": "updated@example.com"} + body, _ = json.Marshal(updateBody) + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodPut, "/api/v1/admin/users/1", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodDelete, "/api/v1/admin/users/1", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodPost, "/api/v1/admin/users/1/balance", bytes.NewBufferString(`{"balance":1,"operation":"add"}`)) + req.Header.Set("Content-Type", "application/json") + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodGet, "/api/v1/admin/users/1/api-keys", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodGet, "/api/v1/admin/users/1/usage?period=today", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) +} + +func TestGroupHandlerEndpoints(t *testing.T) { + router, _ := setupAdminRouter() + + rec := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/api/v1/admin/groups", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodGet, "/api/v1/admin/groups/all", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodGet, "/api/v1/admin/groups/2", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + body, _ := json.Marshal(map[string]any{"name": "new", "platform": "anthropic", "subscription_type": "standard"}) + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodPost, "/api/v1/admin/groups", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + body, _ = json.Marshal(map[string]any{"name": "update"}) + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodPut, "/api/v1/admin/groups/2", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodDelete, "/api/v1/admin/groups/2", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodGet, "/api/v1/admin/groups/2/stats", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodGet, "/api/v1/admin/groups/2/api-keys", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) +} + +func TestProxyHandlerEndpoints(t *testing.T) { + router, _ := setupAdminRouter() + + rec := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/api/v1/admin/proxies", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodGet, "/api/v1/admin/proxies/all", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodGet, "/api/v1/admin/proxies/4", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + body, _ := json.Marshal(map[string]any{"name": "proxy", "protocol": "http", "host": "localhost", "port": 8080}) + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodPost, "/api/v1/admin/proxies", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + body, _ = json.Marshal(map[string]any{"name": "proxy2"}) + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodPut, "/api/v1/admin/proxies/4", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodDelete, "/api/v1/admin/proxies/4", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodPost, "/api/v1/admin/proxies/batch-delete", bytes.NewBufferString(`{"ids":[1,2]}`)) + req.Header.Set("Content-Type", "application/json") + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodPost, "/api/v1/admin/proxies/4/test", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodGet, "/api/v1/admin/proxies/4/stats", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodGet, "/api/v1/admin/proxies/4/accounts", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) +} + +func TestRedeemHandlerEndpoints(t *testing.T) { + router, _ := setupAdminRouter() + + rec := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/api/v1/admin/redeem-codes", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodGet, "/api/v1/admin/redeem-codes/5", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + body, _ := json.Marshal(map[string]any{"count": 1, "type": "balance", "value": 10}) + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodPost, "/api/v1/admin/redeem-codes", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodDelete, "/api/v1/admin/redeem-codes/5", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodPost, "/api/v1/admin/redeem-codes/batch-delete", bytes.NewBufferString(`{"ids":[1,2]}`)) + req.Header.Set("Content-Type", "application/json") + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodPost, "/api/v1/admin/redeem-codes/5/expire", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodGet, "/api/v1/admin/redeem-codes/5/stats", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) +} diff --git a/backend/internal/handler/admin/admin_helpers_test.go b/backend/internal/handler/admin/admin_helpers_test.go new file mode 100644 index 00000000..863c755c --- /dev/null +++ b/backend/internal/handler/admin/admin_helpers_test.go @@ -0,0 +1,134 @@ +package admin + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "net/netip" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/require" +) + +func TestParseTimeRange(t *testing.T) { + gin.SetMode(gin.TestMode) + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + req := httptest.NewRequest(http.MethodGet, "/?start_date=2024-01-01&end_date=2024-01-02&timezone=UTC", nil) + c.Request = req + + start, end := parseTimeRange(c) + require.Equal(t, time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), start) + require.Equal(t, time.Date(2024, 1, 3, 0, 0, 0, 0, time.UTC), end) + + req = httptest.NewRequest(http.MethodGet, "/?start_date=bad&timezone=UTC", nil) + c.Request = req + start, end = parseTimeRange(c) + require.False(t, start.IsZero()) + require.False(t, end.IsZero()) +} + +func TestParseOpsViewParam(t *testing.T) { + gin.SetMode(gin.TestMode) + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Request = httptest.NewRequest(http.MethodGet, "/?view=excluded", nil) + require.Equal(t, opsListViewExcluded, parseOpsViewParam(c)) + + c2, _ := gin.CreateTestContext(w) + c2.Request = httptest.NewRequest(http.MethodGet, "/?view=all", nil) + require.Equal(t, opsListViewAll, parseOpsViewParam(c2)) + + c3, _ := gin.CreateTestContext(w) + c3.Request = httptest.NewRequest(http.MethodGet, "/?view=unknown", nil) + require.Equal(t, opsListViewErrors, parseOpsViewParam(c3)) + + require.Equal(t, "", parseOpsViewParam(nil)) +} + +func TestParseOpsDuration(t *testing.T) { + dur, ok := parseOpsDuration("1h") + require.True(t, ok) + require.Equal(t, time.Hour, dur) + + _, ok = parseOpsDuration("invalid") + require.False(t, ok) +} + +func TestParseOpsTimeRange(t *testing.T) { + gin.SetMode(gin.TestMode) + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + now := time.Now().UTC() + startStr := now.Add(-time.Hour).Format(time.RFC3339) + endStr := now.Format(time.RFC3339) + c.Request = httptest.NewRequest(http.MethodGet, "/?start_time="+startStr+"&end_time="+endStr, nil) + start, end, err := parseOpsTimeRange(c, "1h") + require.NoError(t, err) + require.True(t, start.Before(end)) + + c2, _ := gin.CreateTestContext(w) + c2.Request = httptest.NewRequest(http.MethodGet, "/?start_time=bad", nil) + _, _, err = parseOpsTimeRange(c2, "1h") + require.Error(t, err) +} + +func TestParseOpsRealtimeWindow(t *testing.T) { + dur, label, ok := parseOpsRealtimeWindow("5m") + require.True(t, ok) + require.Equal(t, 5*time.Minute, dur) + require.Equal(t, "5min", label) + + _, _, ok = parseOpsRealtimeWindow("invalid") + require.False(t, ok) +} + +func TestPickThroughputBucketSeconds(t *testing.T) { + require.Equal(t, 60, pickThroughputBucketSeconds(30*time.Minute)) + require.Equal(t, 300, pickThroughputBucketSeconds(6*time.Hour)) + require.Equal(t, 3600, pickThroughputBucketSeconds(48*time.Hour)) +} + +func TestParseOpsQueryMode(t *testing.T) { + gin.SetMode(gin.TestMode) + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Request = httptest.NewRequest(http.MethodGet, "/?mode=raw", nil) + require.Equal(t, service.ParseOpsQueryMode("raw"), parseOpsQueryMode(c)) + require.Equal(t, service.OpsQueryMode(""), parseOpsQueryMode(nil)) +} + +func TestOpsAlertRuleValidation(t *testing.T) { + raw := map[string]json.RawMessage{ + "name": json.RawMessage(`"High error rate"`), + "metric_type": json.RawMessage(`"error_rate"`), + "operator": json.RawMessage(`">"`), + "threshold": json.RawMessage(`90`), + } + + validated, err := validateOpsAlertRulePayload(raw) + require.NoError(t, err) + require.Equal(t, "High error rate", validated.Name) + + _, err = validateOpsAlertRulePayload(map[string]json.RawMessage{}) + require.Error(t, err) + + require.True(t, isPercentOrRateMetric("error_rate")) + require.False(t, isPercentOrRateMetric("concurrency_queue_depth")) +} + +func TestOpsWSHelpers(t *testing.T) { + prefixes, invalid := parseTrustedProxyList("10.0.0.0/8,invalid") + require.Len(t, prefixes, 1) + require.Len(t, invalid, 1) + + host := hostWithoutPort("example.com:443") + require.Equal(t, "example.com", host) + + addr := netip.MustParseAddr("10.0.0.1") + require.True(t, isAddrInTrustedProxies(addr, prefixes)) + require.False(t, isAddrInTrustedProxies(netip.MustParseAddr("192.168.0.1"), prefixes)) +} diff --git a/backend/internal/handler/admin/admin_service_stub_test.go b/backend/internal/handler/admin/admin_service_stub_test.go new file mode 100644 index 00000000..457d52fc --- /dev/null +++ b/backend/internal/handler/admin/admin_service_stub_test.go @@ -0,0 +1,290 @@ +package admin + +import ( + "context" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" +) + +type stubAdminService struct { + users []service.User + apiKeys []service.APIKey + groups []service.Group + accounts []service.Account + proxies []service.Proxy + proxyCounts []service.ProxyWithAccountCount + redeems []service.RedeemCode +} + +func newStubAdminService() *stubAdminService { + now := time.Now().UTC() + user := service.User{ + ID: 1, + Email: "user@example.com", + Role: service.RoleUser, + Status: service.StatusActive, + CreatedAt: now, + UpdatedAt: now, + } + apiKey := service.APIKey{ + ID: 10, + UserID: user.ID, + Key: "sk-test", + Name: "test", + Status: service.StatusActive, + CreatedAt: now, + UpdatedAt: now, + } + group := service.Group{ + ID: 2, + Name: "group", + Platform: service.PlatformAnthropic, + Status: service.StatusActive, + CreatedAt: now, + UpdatedAt: now, + } + account := service.Account{ + ID: 3, + Name: "account", + Platform: service.PlatformAnthropic, + Type: service.AccountTypeOAuth, + Status: service.StatusActive, + CreatedAt: now, + UpdatedAt: now, + } + proxy := service.Proxy{ + ID: 4, + Name: "proxy", + Protocol: "http", + Host: "127.0.0.1", + Port: 8080, + Status: service.StatusActive, + CreatedAt: now, + UpdatedAt: now, + } + redeem := service.RedeemCode{ + ID: 5, + Code: "R-TEST", + Type: service.RedeemTypeBalance, + Value: 10, + Status: service.StatusUnused, + CreatedAt: now, + } + return &stubAdminService{ + users: []service.User{user}, + apiKeys: []service.APIKey{apiKey}, + groups: []service.Group{group}, + accounts: []service.Account{account}, + proxies: []service.Proxy{proxy}, + proxyCounts: []service.ProxyWithAccountCount{{Proxy: proxy, AccountCount: 1}}, + redeems: []service.RedeemCode{redeem}, + } +} + +func (s *stubAdminService) ListUsers(ctx context.Context, page, pageSize int, filters service.UserListFilters) ([]service.User, int64, error) { + return s.users, int64(len(s.users)), nil +} + +func (s *stubAdminService) GetUser(ctx context.Context, id int64) (*service.User, error) { + for i := range s.users { + if s.users[i].ID == id { + return &s.users[i], nil + } + } + user := service.User{ID: id, Email: "user@example.com", Status: service.StatusActive} + return &user, nil +} + +func (s *stubAdminService) CreateUser(ctx context.Context, input *service.CreateUserInput) (*service.User, error) { + user := service.User{ID: 100, Email: input.Email, Status: service.StatusActive} + return &user, nil +} + +func (s *stubAdminService) UpdateUser(ctx context.Context, id int64, input *service.UpdateUserInput) (*service.User, error) { + user := service.User{ID: id, Email: "updated@example.com", Status: service.StatusActive} + return &user, nil +} + +func (s *stubAdminService) DeleteUser(ctx context.Context, id int64) error { + return nil +} + +func (s *stubAdminService) UpdateUserBalance(ctx context.Context, userID int64, balance float64, operation string, notes string) (*service.User, error) { + user := service.User{ID: userID, Balance: balance, Status: service.StatusActive} + return &user, nil +} + +func (s *stubAdminService) GetUserAPIKeys(ctx context.Context, userID int64, page, pageSize int) ([]service.APIKey, int64, error) { + return s.apiKeys, int64(len(s.apiKeys)), nil +} + +func (s *stubAdminService) GetUserUsageStats(ctx context.Context, userID int64, period string) (any, error) { + return map[string]any{"user_id": userID}, nil +} + +func (s *stubAdminService) ListGroups(ctx context.Context, page, pageSize int, platform, status, search string, isExclusive *bool) ([]service.Group, int64, error) { + return s.groups, int64(len(s.groups)), nil +} + +func (s *stubAdminService) GetAllGroups(ctx context.Context) ([]service.Group, error) { + return s.groups, nil +} + +func (s *stubAdminService) GetAllGroupsByPlatform(ctx context.Context, platform string) ([]service.Group, error) { + return s.groups, nil +} + +func (s *stubAdminService) GetGroup(ctx context.Context, id int64) (*service.Group, error) { + group := service.Group{ID: id, Name: "group", Status: service.StatusActive} + return &group, nil +} + +func (s *stubAdminService) CreateGroup(ctx context.Context, input *service.CreateGroupInput) (*service.Group, error) { + group := service.Group{ID: 200, Name: input.Name, Status: service.StatusActive} + return &group, nil +} + +func (s *stubAdminService) UpdateGroup(ctx context.Context, id int64, input *service.UpdateGroupInput) (*service.Group, error) { + group := service.Group{ID: id, Name: input.Name, Status: service.StatusActive} + return &group, nil +} + +func (s *stubAdminService) DeleteGroup(ctx context.Context, id int64) error { + return nil +} + +func (s *stubAdminService) GetGroupAPIKeys(ctx context.Context, groupID int64, page, pageSize int) ([]service.APIKey, int64, error) { + return s.apiKeys, int64(len(s.apiKeys)), nil +} + +func (s *stubAdminService) ListAccounts(ctx context.Context, page, pageSize int, platform, accountType, status, search string) ([]service.Account, int64, error) { + return s.accounts, int64(len(s.accounts)), nil +} + +func (s *stubAdminService) GetAccount(ctx context.Context, id int64) (*service.Account, error) { + account := service.Account{ID: id, Name: "account", Status: service.StatusActive} + return &account, nil +} + +func (s *stubAdminService) GetAccountsByIDs(ctx context.Context, ids []int64) ([]*service.Account, error) { + out := make([]*service.Account, 0, len(ids)) + for _, id := range ids { + account := service.Account{ID: id, Name: "account", Status: service.StatusActive} + out = append(out, &account) + } + return out, nil +} + +func (s *stubAdminService) CreateAccount(ctx context.Context, input *service.CreateAccountInput) (*service.Account, error) { + account := service.Account{ID: 300, Name: input.Name, Status: service.StatusActive} + return &account, nil +} + +func (s *stubAdminService) UpdateAccount(ctx context.Context, id int64, input *service.UpdateAccountInput) (*service.Account, error) { + account := service.Account{ID: id, Name: input.Name, Status: service.StatusActive} + return &account, nil +} + +func (s *stubAdminService) DeleteAccount(ctx context.Context, id int64) error { + return nil +} + +func (s *stubAdminService) RefreshAccountCredentials(ctx context.Context, id int64) (*service.Account, error) { + account := service.Account{ID: id, Name: "account", Status: service.StatusActive} + return &account, nil +} + +func (s *stubAdminService) ClearAccountError(ctx context.Context, id int64) (*service.Account, error) { + account := service.Account{ID: id, Name: "account", Status: service.StatusActive} + return &account, nil +} + +func (s *stubAdminService) SetAccountSchedulable(ctx context.Context, id int64, schedulable bool) (*service.Account, error) { + account := service.Account{ID: id, Name: "account", Status: service.StatusActive, Schedulable: schedulable} + return &account, nil +} + +func (s *stubAdminService) BulkUpdateAccounts(ctx context.Context, input *service.BulkUpdateAccountsInput) (*service.BulkUpdateAccountsResult, error) { + return &service.BulkUpdateAccountsResult{Success: 1, Failed: 0, SuccessIDs: []int64{1}}, nil +} + +func (s *stubAdminService) ListProxies(ctx context.Context, page, pageSize int, protocol, status, search string) ([]service.Proxy, int64, error) { + return s.proxies, int64(len(s.proxies)), nil +} + +func (s *stubAdminService) ListProxiesWithAccountCount(ctx context.Context, page, pageSize int, protocol, status, search string) ([]service.ProxyWithAccountCount, int64, error) { + return s.proxyCounts, int64(len(s.proxyCounts)), nil +} + +func (s *stubAdminService) GetAllProxies(ctx context.Context) ([]service.Proxy, error) { + return s.proxies, nil +} + +func (s *stubAdminService) GetAllProxiesWithAccountCount(ctx context.Context) ([]service.ProxyWithAccountCount, error) { + return s.proxyCounts, nil +} + +func (s *stubAdminService) GetProxy(ctx context.Context, id int64) (*service.Proxy, error) { + proxy := service.Proxy{ID: id, Name: "proxy", Status: service.StatusActive} + return &proxy, nil +} + +func (s *stubAdminService) CreateProxy(ctx context.Context, input *service.CreateProxyInput) (*service.Proxy, error) { + proxy := service.Proxy{ID: 400, Name: input.Name, Status: service.StatusActive} + return &proxy, nil +} + +func (s *stubAdminService) UpdateProxy(ctx context.Context, id int64, input *service.UpdateProxyInput) (*service.Proxy, error) { + proxy := service.Proxy{ID: id, Name: input.Name, Status: service.StatusActive} + return &proxy, nil +} + +func (s *stubAdminService) DeleteProxy(ctx context.Context, id int64) error { + return nil +} + +func (s *stubAdminService) BatchDeleteProxies(ctx context.Context, ids []int64) (*service.ProxyBatchDeleteResult, error) { + return &service.ProxyBatchDeleteResult{DeletedIDs: ids}, nil +} + +func (s *stubAdminService) GetProxyAccounts(ctx context.Context, proxyID int64) ([]service.ProxyAccountSummary, error) { + return []service.ProxyAccountSummary{{ID: 1, Name: "account"}}, nil +} + +func (s *stubAdminService) CheckProxyExists(ctx context.Context, host string, port int, username, password string) (bool, error) { + return false, nil +} + +func (s *stubAdminService) TestProxy(ctx context.Context, id int64) (*service.ProxyTestResult, error) { + return &service.ProxyTestResult{Success: true, Message: "ok"}, nil +} + +func (s *stubAdminService) ListRedeemCodes(ctx context.Context, page, pageSize int, codeType, status, search string) ([]service.RedeemCode, int64, error) { + return s.redeems, int64(len(s.redeems)), nil +} + +func (s *stubAdminService) GetRedeemCode(ctx context.Context, id int64) (*service.RedeemCode, error) { + code := service.RedeemCode{ID: id, Code: "R-TEST", Status: service.StatusUnused} + return &code, nil +} + +func (s *stubAdminService) GenerateRedeemCodes(ctx context.Context, input *service.GenerateRedeemCodesInput) ([]service.RedeemCode, error) { + return s.redeems, nil +} + +func (s *stubAdminService) DeleteRedeemCode(ctx context.Context, id int64) error { + return nil +} + +func (s *stubAdminService) BatchDeleteRedeemCodes(ctx context.Context, ids []int64) (int64, error) { + return int64(len(ids)), nil +} + +func (s *stubAdminService) ExpireRedeemCode(ctx context.Context, id int64) (*service.RedeemCode, error) { + code := service.RedeemCode{ID: id, Code: "R-TEST", Status: service.StatusUsed} + return &code, nil +} + +// Ensure stub implements interface. +var _ service.AdminService = (*stubAdminService)(nil) diff --git a/backend/internal/handler/admin/dashboard_handler.go b/backend/internal/handler/admin/dashboard_handler.go index 3f07403d..18365186 100644 --- a/backend/internal/handler/admin/dashboard_handler.go +++ b/backend/internal/handler/admin/dashboard_handler.go @@ -186,7 +186,7 @@ func (h *DashboardHandler) GetRealtimeMetrics(c *gin.Context) { // GetUsageTrend handles getting usage trend data // GET /api/v1/admin/dashboard/trend -// Query params: start_date, end_date (YYYY-MM-DD), granularity (day/hour), user_id, api_key_id, model, account_id, group_id, stream +// Query params: start_date, end_date (YYYY-MM-DD), granularity (day/hour), user_id, api_key_id, model, account_id, group_id, stream, billing_type func (h *DashboardHandler) GetUsageTrend(c *gin.Context) { startTime, endTime := parseTimeRange(c) granularity := c.DefaultQuery("granularity", "day") @@ -195,6 +195,7 @@ func (h *DashboardHandler) GetUsageTrend(c *gin.Context) { var userID, apiKeyID, accountID, groupID int64 var model string var stream *bool + var billingType *int8 if userIDStr := c.Query("user_id"); userIDStr != "" { if id, err := strconv.ParseInt(userIDStr, 10, 64); err == nil { @@ -224,8 +225,17 @@ func (h *DashboardHandler) GetUsageTrend(c *gin.Context) { stream = &streamVal } } + if billingTypeStr := c.Query("billing_type"); billingTypeStr != "" { + if v, err := strconv.ParseInt(billingTypeStr, 10, 8); err == nil { + bt := int8(v) + billingType = &bt + } else { + response.BadRequest(c, "Invalid billing_type") + return + } + } - trend, err := h.dashboardService.GetUsageTrendWithFilters(c.Request.Context(), startTime, endTime, granularity, userID, apiKeyID, accountID, groupID, model, stream) + trend, err := h.dashboardService.GetUsageTrendWithFilters(c.Request.Context(), startTime, endTime, granularity, userID, apiKeyID, accountID, groupID, model, stream, billingType) if err != nil { response.Error(c, 500, "Failed to get usage trend") return @@ -241,13 +251,14 @@ func (h *DashboardHandler) GetUsageTrend(c *gin.Context) { // GetModelStats handles getting model usage statistics // GET /api/v1/admin/dashboard/models -// Query params: start_date, end_date (YYYY-MM-DD), user_id, api_key_id, account_id, group_id, stream +// Query params: start_date, end_date (YYYY-MM-DD), user_id, api_key_id, account_id, group_id, stream, billing_type func (h *DashboardHandler) GetModelStats(c *gin.Context) { startTime, endTime := parseTimeRange(c) // Parse optional filter params var userID, apiKeyID, accountID, groupID int64 var stream *bool + var billingType *int8 if userIDStr := c.Query("user_id"); userIDStr != "" { if id, err := strconv.ParseInt(userIDStr, 10, 64); err == nil { @@ -274,8 +285,17 @@ func (h *DashboardHandler) GetModelStats(c *gin.Context) { stream = &streamVal } } + if billingTypeStr := c.Query("billing_type"); billingTypeStr != "" { + if v, err := strconv.ParseInt(billingTypeStr, 10, 8); err == nil { + bt := int8(v) + billingType = &bt + } else { + response.BadRequest(c, "Invalid billing_type") + return + } + } - stats, err := h.dashboardService.GetModelStatsWithFilters(c.Request.Context(), startTime, endTime, userID, apiKeyID, accountID, groupID, stream) + stats, err := h.dashboardService.GetModelStatsWithFilters(c.Request.Context(), startTime, endTime, userID, apiKeyID, accountID, groupID, stream, billingType) if err != nil { response.Error(c, 500, "Failed to get model statistics") return diff --git a/backend/internal/handler/admin/usage_cleanup_handler_test.go b/backend/internal/handler/admin/usage_cleanup_handler_test.go new file mode 100644 index 00000000..d8684c39 --- /dev/null +++ b/backend/internal/handler/admin/usage_cleanup_handler_test.go @@ -0,0 +1,377 @@ +package admin + +import ( + "bytes" + "context" + "encoding/json" + "database/sql" + "errors" + "net/http" + "net/http/httptest" + "sync" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/handler/dto" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/require" +) + +type cleanupRepoStub struct { + mu sync.Mutex + created []*service.UsageCleanupTask + listTasks []service.UsageCleanupTask + listResult *pagination.PaginationResult + listErr error + statusByID map[int64]string +} + +func (s *cleanupRepoStub) CreateTask(ctx context.Context, task *service.UsageCleanupTask) error { + if task == nil { + return nil + } + s.mu.Lock() + defer s.mu.Unlock() + if task.ID == 0 { + task.ID = int64(len(s.created) + 1) + } + if task.CreatedAt.IsZero() { + task.CreatedAt = time.Now().UTC() + } + task.UpdatedAt = task.CreatedAt + clone := *task + s.created = append(s.created, &clone) + return nil +} + +func (s *cleanupRepoStub) ListTasks(ctx context.Context, params pagination.PaginationParams) ([]service.UsageCleanupTask, *pagination.PaginationResult, error) { + s.mu.Lock() + defer s.mu.Unlock() + return s.listTasks, s.listResult, s.listErr +} + +func (s *cleanupRepoStub) ClaimNextPendingTask(ctx context.Context, staleRunningAfterSeconds int64) (*service.UsageCleanupTask, error) { + return nil, nil +} + +func (s *cleanupRepoStub) GetTaskStatus(ctx context.Context, taskID int64) (string, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.statusByID == nil { + return "", sql.ErrNoRows + } + status, ok := s.statusByID[taskID] + if !ok { + return "", sql.ErrNoRows + } + return status, nil +} + +func (s *cleanupRepoStub) UpdateTaskProgress(ctx context.Context, taskID int64, deletedRows int64) error { + return nil +} + +func (s *cleanupRepoStub) CancelTask(ctx context.Context, taskID int64, canceledBy int64) (bool, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.statusByID == nil { + s.statusByID = map[int64]string{} + } + status := s.statusByID[taskID] + if status != service.UsageCleanupStatusPending && status != service.UsageCleanupStatusRunning { + return false, nil + } + s.statusByID[taskID] = service.UsageCleanupStatusCanceled + return true, nil +} + +func (s *cleanupRepoStub) MarkTaskSucceeded(ctx context.Context, taskID int64, deletedRows int64) error { + return nil +} + +func (s *cleanupRepoStub) MarkTaskFailed(ctx context.Context, taskID int64, deletedRows int64, errorMsg string) error { + return nil +} + +func (s *cleanupRepoStub) DeleteUsageLogsBatch(ctx context.Context, filters service.UsageCleanupFilters, limit int) (int64, error) { + return 0, nil +} + +var _ service.UsageCleanupRepository = (*cleanupRepoStub)(nil) + +func setupCleanupRouter(cleanupService *service.UsageCleanupService, userID int64) *gin.Engine { + gin.SetMode(gin.TestMode) + router := gin.New() + if userID > 0 { + router.Use(func(c *gin.Context) { + c.Set(string(middleware.ContextKeyUser), middleware.AuthSubject{UserID: userID}) + c.Next() + }) + } + + handler := NewUsageHandler(nil, nil, nil, cleanupService) + router.POST("/api/v1/admin/usage/cleanup-tasks", handler.CreateCleanupTask) + router.GET("/api/v1/admin/usage/cleanup-tasks", handler.ListCleanupTasks) + router.POST("/api/v1/admin/usage/cleanup-tasks/:id/cancel", handler.CancelCleanupTask) + return router +} + +func TestUsageHandlerCreateCleanupTaskUnauthorized(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, MaxRangeDays: 31}} + cleanupService := service.NewUsageCleanupService(repo, nil, nil, cfg) + router := setupCleanupRouter(cleanupService, 0) + + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/usage/cleanup-tasks", bytes.NewBufferString(`{}`)) + req.Header.Set("Content-Type", "application/json") + recorder := httptest.NewRecorder() + router.ServeHTTP(recorder, req) + + require.Equal(t, http.StatusUnauthorized, recorder.Code) +} + +func TestUsageHandlerCreateCleanupTaskUnavailable(t *testing.T) { + router := setupCleanupRouter(nil, 1) + + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/usage/cleanup-tasks", bytes.NewBufferString(`{}`)) + req.Header.Set("Content-Type", "application/json") + recorder := httptest.NewRecorder() + router.ServeHTTP(recorder, req) + + require.Equal(t, http.StatusServiceUnavailable, recorder.Code) +} + +func TestUsageHandlerCreateCleanupTaskBindError(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, MaxRangeDays: 31}} + cleanupService := service.NewUsageCleanupService(repo, nil, nil, cfg) + router := setupCleanupRouter(cleanupService, 88) + + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/usage/cleanup-tasks", bytes.NewBufferString("{bad-json")) + req.Header.Set("Content-Type", "application/json") + recorder := httptest.NewRecorder() + router.ServeHTTP(recorder, req) + + require.Equal(t, http.StatusBadRequest, recorder.Code) +} + +func TestUsageHandlerCreateCleanupTaskMissingRange(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, MaxRangeDays: 31}} + cleanupService := service.NewUsageCleanupService(repo, nil, nil, cfg) + router := setupCleanupRouter(cleanupService, 88) + + payload := map[string]any{ + "start_date": "2024-01-01", + "timezone": "UTC", + } + body, err := json.Marshal(payload) + require.NoError(t, err) + + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/usage/cleanup-tasks", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + recorder := httptest.NewRecorder() + router.ServeHTTP(recorder, req) + + require.Equal(t, http.StatusBadRequest, recorder.Code) +} + +func TestUsageHandlerCreateCleanupTaskInvalidDate(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, MaxRangeDays: 31}} + cleanupService := service.NewUsageCleanupService(repo, nil, nil, cfg) + router := setupCleanupRouter(cleanupService, 88) + + payload := map[string]any{ + "start_date": "2024-13-01", + "end_date": "2024-01-02", + "timezone": "UTC", + } + body, err := json.Marshal(payload) + require.NoError(t, err) + + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/usage/cleanup-tasks", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + recorder := httptest.NewRecorder() + router.ServeHTTP(recorder, req) + + require.Equal(t, http.StatusBadRequest, recorder.Code) +} + +func TestUsageHandlerCreateCleanupTaskInvalidEndDate(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, MaxRangeDays: 31}} + cleanupService := service.NewUsageCleanupService(repo, nil, nil, cfg) + router := setupCleanupRouter(cleanupService, 88) + + payload := map[string]any{ + "start_date": "2024-01-01", + "end_date": "2024-02-40", + "timezone": "UTC", + } + body, err := json.Marshal(payload) + require.NoError(t, err) + + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/usage/cleanup-tasks", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + recorder := httptest.NewRecorder() + router.ServeHTTP(recorder, req) + + require.Equal(t, http.StatusBadRequest, recorder.Code) +} + +func TestUsageHandlerCreateCleanupTaskSuccess(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, MaxRangeDays: 31}} + cleanupService := service.NewUsageCleanupService(repo, nil, nil, cfg) + router := setupCleanupRouter(cleanupService, 99) + + payload := map[string]any{ + "start_date": " 2024-01-01 ", + "end_date": "2024-01-02", + "timezone": "UTC", + "model": "gpt-4", + } + body, err := json.Marshal(payload) + require.NoError(t, err) + + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/usage/cleanup-tasks", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + recorder := httptest.NewRecorder() + router.ServeHTTP(recorder, req) + + require.Equal(t, http.StatusOK, recorder.Code) + + var resp response.Response + require.NoError(t, json.Unmarshal(recorder.Body.Bytes(), &resp)) + require.Equal(t, 0, resp.Code) + + repo.mu.Lock() + defer repo.mu.Unlock() + require.Len(t, repo.created, 1) + created := repo.created[0] + require.Equal(t, int64(99), created.CreatedBy) + require.NotNil(t, created.Filters.Model) + require.Equal(t, "gpt-4", *created.Filters.Model) + + start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + end := time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC).Add(24*time.Hour - time.Nanosecond) + require.True(t, created.Filters.StartTime.Equal(start)) + require.True(t, created.Filters.EndTime.Equal(end)) +} + +func TestUsageHandlerListCleanupTasksUnavailable(t *testing.T) { + router := setupCleanupRouter(nil, 0) + + req := httptest.NewRequest(http.MethodGet, "/api/v1/admin/usage/cleanup-tasks", nil) + recorder := httptest.NewRecorder() + router.ServeHTTP(recorder, req) + + require.Equal(t, http.StatusServiceUnavailable, recorder.Code) +} + +func TestUsageHandlerListCleanupTasksSuccess(t *testing.T) { + repo := &cleanupRepoStub{} + repo.listTasks = []service.UsageCleanupTask{ + { + ID: 7, + Status: service.UsageCleanupStatusSucceeded, + CreatedBy: 4, + }, + } + repo.listResult = &pagination.PaginationResult{Total: 1, Page: 1, PageSize: 20, Pages: 1} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, MaxRangeDays: 31}} + cleanupService := service.NewUsageCleanupService(repo, nil, nil, cfg) + router := setupCleanupRouter(cleanupService, 1) + + req := httptest.NewRequest(http.MethodGet, "/api/v1/admin/usage/cleanup-tasks", nil) + recorder := httptest.NewRecorder() + router.ServeHTTP(recorder, req) + + require.Equal(t, http.StatusOK, recorder.Code) + + var resp struct { + Code int `json:"code"` + Data struct { + Items []dto.UsageCleanupTask `json:"items"` + Total int64 `json:"total"` + Page int `json:"page"` + } `json:"data"` + } + require.NoError(t, json.Unmarshal(recorder.Body.Bytes(), &resp)) + require.Equal(t, 0, resp.Code) + require.Len(t, resp.Data.Items, 1) + require.Equal(t, int64(7), resp.Data.Items[0].ID) + require.Equal(t, int64(1), resp.Data.Total) + require.Equal(t, 1, resp.Data.Page) +} + +func TestUsageHandlerListCleanupTasksError(t *testing.T) { + repo := &cleanupRepoStub{listErr: errors.New("boom")} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, MaxRangeDays: 31}} + cleanupService := service.NewUsageCleanupService(repo, nil, nil, cfg) + router := setupCleanupRouter(cleanupService, 1) + + req := httptest.NewRequest(http.MethodGet, "/api/v1/admin/usage/cleanup-tasks", nil) + recorder := httptest.NewRecorder() + router.ServeHTTP(recorder, req) + + require.Equal(t, http.StatusInternalServerError, recorder.Code) +} + +func TestUsageHandlerCancelCleanupTaskUnauthorized(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + cleanupService := service.NewUsageCleanupService(repo, nil, nil, cfg) + router := setupCleanupRouter(cleanupService, 0) + + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/usage/cleanup-tasks/1/cancel", nil) + rec := httptest.NewRecorder() + router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusUnauthorized, rec.Code) +} + +func TestUsageHandlerCancelCleanupTaskNotFound(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + cleanupService := service.NewUsageCleanupService(repo, nil, nil, cfg) + router := setupCleanupRouter(cleanupService, 1) + + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/usage/cleanup-tasks/999/cancel", nil) + rec := httptest.NewRecorder() + router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusNotFound, rec.Code) +} + +func TestUsageHandlerCancelCleanupTaskConflict(t *testing.T) { + repo := &cleanupRepoStub{statusByID: map[int64]string{2: service.UsageCleanupStatusSucceeded}} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + cleanupService := service.NewUsageCleanupService(repo, nil, nil, cfg) + router := setupCleanupRouter(cleanupService, 1) + + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/usage/cleanup-tasks/2/cancel", nil) + rec := httptest.NewRecorder() + router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusConflict, rec.Code) +} + +func TestUsageHandlerCancelCleanupTaskSuccess(t *testing.T) { + repo := &cleanupRepoStub{statusByID: map[int64]string{3: service.UsageCleanupStatusPending}} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + cleanupService := service.NewUsageCleanupService(repo, nil, nil, cfg) + router := setupCleanupRouter(cleanupService, 1) + + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/usage/cleanup-tasks/3/cancel", nil) + rec := httptest.NewRecorder() + router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusOK, rec.Code) +} diff --git a/backend/internal/handler/admin/usage_handler.go b/backend/internal/handler/admin/usage_handler.go index c7b983f1..81aa78e1 100644 --- a/backend/internal/handler/admin/usage_handler.go +++ b/backend/internal/handler/admin/usage_handler.go @@ -1,7 +1,10 @@ package admin import ( + "log" + "net/http" "strconv" + "strings" "time" "github.com/Wei-Shaw/sub2api/internal/handler/dto" @@ -9,6 +12,7 @@ import ( "github.com/Wei-Shaw/sub2api/internal/pkg/response" "github.com/Wei-Shaw/sub2api/internal/pkg/timezone" "github.com/Wei-Shaw/sub2api/internal/pkg/usagestats" + "github.com/Wei-Shaw/sub2api/internal/server/middleware" "github.com/Wei-Shaw/sub2api/internal/service" "github.com/gin-gonic/gin" @@ -16,9 +20,10 @@ import ( // UsageHandler handles admin usage-related requests type UsageHandler struct { - usageService *service.UsageService - apiKeyService *service.APIKeyService - adminService service.AdminService + usageService *service.UsageService + apiKeyService *service.APIKeyService + adminService service.AdminService + cleanupService *service.UsageCleanupService } // NewUsageHandler creates a new admin usage handler @@ -26,14 +31,30 @@ func NewUsageHandler( usageService *service.UsageService, apiKeyService *service.APIKeyService, adminService service.AdminService, + cleanupService *service.UsageCleanupService, ) *UsageHandler { return &UsageHandler{ - usageService: usageService, - apiKeyService: apiKeyService, - adminService: adminService, + usageService: usageService, + apiKeyService: apiKeyService, + adminService: adminService, + cleanupService: cleanupService, } } +// CreateUsageCleanupTaskRequest represents cleanup task creation request +type CreateUsageCleanupTaskRequest struct { + StartDate string `json:"start_date"` + EndDate string `json:"end_date"` + UserID *int64 `json:"user_id"` + APIKeyID *int64 `json:"api_key_id"` + AccountID *int64 `json:"account_id"` + GroupID *int64 `json:"group_id"` + Model *string `json:"model"` + Stream *bool `json:"stream"` + BillingType *int8 `json:"billing_type"` + Timezone string `json:"timezone"` +} + // List handles listing all usage records with filters // GET /api/v1/admin/usage func (h *UsageHandler) List(c *gin.Context) { @@ -344,3 +365,162 @@ func (h *UsageHandler) SearchAPIKeys(c *gin.Context) { response.Success(c, result) } + +// ListCleanupTasks handles listing usage cleanup tasks +// GET /api/v1/admin/usage/cleanup-tasks +func (h *UsageHandler) ListCleanupTasks(c *gin.Context) { + if h.cleanupService == nil { + response.Error(c, http.StatusServiceUnavailable, "Usage cleanup service unavailable") + return + } + operator := int64(0) + if subject, ok := middleware.GetAuthSubjectFromContext(c); ok { + operator = subject.UserID + } + page, pageSize := response.ParsePagination(c) + log.Printf("[UsageCleanup] 请求清理任务列表: operator=%d page=%d page_size=%d", operator, page, pageSize) + params := pagination.PaginationParams{Page: page, PageSize: pageSize} + tasks, result, err := h.cleanupService.ListTasks(c.Request.Context(), params) + if err != nil { + log.Printf("[UsageCleanup] 查询清理任务列表失败: operator=%d page=%d page_size=%d err=%v", operator, page, pageSize, err) + response.ErrorFrom(c, err) + return + } + out := make([]dto.UsageCleanupTask, 0, len(tasks)) + for i := range tasks { + out = append(out, *dto.UsageCleanupTaskFromService(&tasks[i])) + } + log.Printf("[UsageCleanup] 返回清理任务列表: operator=%d total=%d items=%d page=%d page_size=%d", operator, result.Total, len(out), page, pageSize) + response.Paginated(c, out, result.Total, page, pageSize) +} + +// CreateCleanupTask handles creating a usage cleanup task +// POST /api/v1/admin/usage/cleanup-tasks +func (h *UsageHandler) CreateCleanupTask(c *gin.Context) { + if h.cleanupService == nil { + response.Error(c, http.StatusServiceUnavailable, "Usage cleanup service unavailable") + return + } + subject, ok := middleware.GetAuthSubjectFromContext(c) + if !ok || subject.UserID <= 0 { + response.Unauthorized(c, "Unauthorized") + return + } + + var req CreateUsageCleanupTaskRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + req.StartDate = strings.TrimSpace(req.StartDate) + req.EndDate = strings.TrimSpace(req.EndDate) + if req.StartDate == "" || req.EndDate == "" { + response.BadRequest(c, "start_date and end_date are required") + return + } + + startTime, err := timezone.ParseInUserLocation("2006-01-02", req.StartDate, req.Timezone) + if err != nil { + response.BadRequest(c, "Invalid start_date format, use YYYY-MM-DD") + return + } + endTime, err := timezone.ParseInUserLocation("2006-01-02", req.EndDate, req.Timezone) + if err != nil { + response.BadRequest(c, "Invalid end_date format, use YYYY-MM-DD") + return + } + endTime = endTime.Add(24*time.Hour - time.Nanosecond) + + filters := service.UsageCleanupFilters{ + StartTime: startTime, + EndTime: endTime, + UserID: req.UserID, + APIKeyID: req.APIKeyID, + AccountID: req.AccountID, + GroupID: req.GroupID, + Model: req.Model, + Stream: req.Stream, + BillingType: req.BillingType, + } + + var userID any + if filters.UserID != nil { + userID = *filters.UserID + } + var apiKeyID any + if filters.APIKeyID != nil { + apiKeyID = *filters.APIKeyID + } + var accountID any + if filters.AccountID != nil { + accountID = *filters.AccountID + } + var groupID any + if filters.GroupID != nil { + groupID = *filters.GroupID + } + var model any + if filters.Model != nil { + model = *filters.Model + } + var stream any + if filters.Stream != nil { + stream = *filters.Stream + } + var billingType any + if filters.BillingType != nil { + billingType = *filters.BillingType + } + + log.Printf("[UsageCleanup] 请求创建清理任务: operator=%d start=%s end=%s user_id=%v api_key_id=%v account_id=%v group_id=%v model=%v stream=%v billing_type=%v tz=%q", + subject.UserID, + filters.StartTime.Format(time.RFC3339), + filters.EndTime.Format(time.RFC3339), + userID, + apiKeyID, + accountID, + groupID, + model, + stream, + billingType, + req.Timezone, + ) + + task, err := h.cleanupService.CreateTask(c.Request.Context(), filters, subject.UserID) + if err != nil { + log.Printf("[UsageCleanup] 创建清理任务失败: operator=%d err=%v", subject.UserID, err) + response.ErrorFrom(c, err) + return + } + + log.Printf("[UsageCleanup] 清理任务已创建: task=%d operator=%d status=%s", task.ID, subject.UserID, task.Status) + response.Success(c, dto.UsageCleanupTaskFromService(task)) +} + +// CancelCleanupTask handles canceling a usage cleanup task +// POST /api/v1/admin/usage/cleanup-tasks/:id/cancel +func (h *UsageHandler) CancelCleanupTask(c *gin.Context) { + if h.cleanupService == nil { + response.Error(c, http.StatusServiceUnavailable, "Usage cleanup service unavailable") + return + } + subject, ok := middleware.GetAuthSubjectFromContext(c) + if !ok || subject.UserID <= 0 { + response.Unauthorized(c, "Unauthorized") + return + } + idStr := strings.TrimSpace(c.Param("id")) + taskID, err := strconv.ParseInt(idStr, 10, 64) + if err != nil || taskID <= 0 { + response.BadRequest(c, "Invalid task id") + return + } + log.Printf("[UsageCleanup] 请求取消清理任务: task=%d operator=%d", taskID, subject.UserID) + if err := h.cleanupService.CancelTask(c.Request.Context(), taskID, subject.UserID); err != nil { + log.Printf("[UsageCleanup] 取消清理任务失败: task=%d operator=%d err=%v", taskID, subject.UserID, err) + response.ErrorFrom(c, err) + return + } + log.Printf("[UsageCleanup] 清理任务已取消: task=%d operator=%d", taskID, subject.UserID) + response.Success(c, gin.H{"id": taskID, "status": service.UsageCleanupStatusCanceled}) +} diff --git a/backend/internal/handler/dto/mappers.go b/backend/internal/handler/dto/mappers.go index 4d59ddff..f43fac27 100644 --- a/backend/internal/handler/dto/mappers.go +++ b/backend/internal/handler/dto/mappers.go @@ -340,6 +340,36 @@ func UsageLogFromServiceAdmin(l *service.UsageLog) *UsageLog { return usageLogFromServiceBase(l, AccountSummaryFromService(l.Account), true) } +func UsageCleanupTaskFromService(task *service.UsageCleanupTask) *UsageCleanupTask { + if task == nil { + return nil + } + return &UsageCleanupTask{ + ID: task.ID, + Status: task.Status, + Filters: UsageCleanupFilters{ + StartTime: task.Filters.StartTime, + EndTime: task.Filters.EndTime, + UserID: task.Filters.UserID, + APIKeyID: task.Filters.APIKeyID, + AccountID: task.Filters.AccountID, + GroupID: task.Filters.GroupID, + Model: task.Filters.Model, + Stream: task.Filters.Stream, + BillingType: task.Filters.BillingType, + }, + CreatedBy: task.CreatedBy, + DeletedRows: task.DeletedRows, + ErrorMessage: task.ErrorMsg, + CanceledBy: task.CanceledBy, + CanceledAt: task.CanceledAt, + StartedAt: task.StartedAt, + FinishedAt: task.FinishedAt, + CreatedAt: task.CreatedAt, + UpdatedAt: task.UpdatedAt, + } +} + func SettingFromService(s *service.Setting) *Setting { if s == nil { return nil diff --git a/backend/internal/handler/dto/types.go b/backend/internal/handler/dto/types.go index 914f2b23..5fa5a3fd 100644 --- a/backend/internal/handler/dto/types.go +++ b/backend/internal/handler/dto/types.go @@ -223,6 +223,33 @@ type UsageLog struct { Subscription *UserSubscription `json:"subscription,omitempty"` } +type UsageCleanupFilters struct { + StartTime time.Time `json:"start_time"` + EndTime time.Time `json:"end_time"` + UserID *int64 `json:"user_id,omitempty"` + APIKeyID *int64 `json:"api_key_id,omitempty"` + AccountID *int64 `json:"account_id,omitempty"` + GroupID *int64 `json:"group_id,omitempty"` + Model *string `json:"model,omitempty"` + Stream *bool `json:"stream,omitempty"` + BillingType *int8 `json:"billing_type,omitempty"` +} + +type UsageCleanupTask struct { + ID int64 `json:"id"` + Status string `json:"status"` + Filters UsageCleanupFilters `json:"filters"` + CreatedBy int64 `json:"created_by"` + DeletedRows int64 `json:"deleted_rows"` + ErrorMessage *string `json:"error_message,omitempty"` + CanceledBy *int64 `json:"canceled_by,omitempty"` + CanceledAt *time.Time `json:"canceled_at,omitempty"` + StartedAt *time.Time `json:"started_at,omitempty"` + FinishedAt *time.Time `json:"finished_at,omitempty"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + // AccountSummary is a minimal account info for usage log display. // It intentionally excludes sensitive fields like Credentials, Proxy, etc. type AccountSummary struct { diff --git a/backend/internal/repository/dashboard_aggregation_repo.go b/backend/internal/repository/dashboard_aggregation_repo.go index 3543e061..59bbd6a3 100644 --- a/backend/internal/repository/dashboard_aggregation_repo.go +++ b/backend/internal/repository/dashboard_aggregation_repo.go @@ -77,6 +77,75 @@ func (r *dashboardAggregationRepository) AggregateRange(ctx context.Context, sta return nil } +func (r *dashboardAggregationRepository) RecomputeRange(ctx context.Context, start, end time.Time) error { + if r == nil || r.sql == nil { + return nil + } + loc := timezone.Location() + startLocal := start.In(loc) + endLocal := end.In(loc) + if !endLocal.After(startLocal) { + return nil + } + + hourStart := startLocal.Truncate(time.Hour) + hourEnd := endLocal.Truncate(time.Hour) + if endLocal.After(hourEnd) { + hourEnd = hourEnd.Add(time.Hour) + } + + dayStart := truncateToDay(startLocal) + dayEnd := truncateToDay(endLocal) + if endLocal.After(dayEnd) { + dayEnd = dayEnd.Add(24 * time.Hour) + } + + // 尽量使用事务保证范围内的一致性(允许在非 *sql.DB 的情况下退化为非事务执行)。 + if db, ok := r.sql.(*sql.DB); ok { + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return err + } + txRepo := newDashboardAggregationRepositoryWithSQL(tx) + if err := txRepo.recomputeRangeInTx(ctx, hourStart, hourEnd, dayStart, dayEnd); err != nil { + _ = tx.Rollback() + return err + } + return tx.Commit() + } + return r.recomputeRangeInTx(ctx, hourStart, hourEnd, dayStart, dayEnd) +} + +func (r *dashboardAggregationRepository) recomputeRangeInTx(ctx context.Context, hourStart, hourEnd, dayStart, dayEnd time.Time) error { + // 先清空范围内桶,再重建(避免仅增量插入导致活跃用户等指标无法回退)。 + if _, err := r.sql.ExecContext(ctx, "DELETE FROM usage_dashboard_hourly WHERE bucket_start >= $1 AND bucket_start < $2", hourStart, hourEnd); err != nil { + return err + } + if _, err := r.sql.ExecContext(ctx, "DELETE FROM usage_dashboard_hourly_users WHERE bucket_start >= $1 AND bucket_start < $2", hourStart, hourEnd); err != nil { + return err + } + if _, err := r.sql.ExecContext(ctx, "DELETE FROM usage_dashboard_daily WHERE bucket_date >= $1::date AND bucket_date < $2::date", dayStart, dayEnd); err != nil { + return err + } + if _, err := r.sql.ExecContext(ctx, "DELETE FROM usage_dashboard_daily_users WHERE bucket_date >= $1::date AND bucket_date < $2::date", dayStart, dayEnd); err != nil { + return err + } + + if err := r.insertHourlyActiveUsers(ctx, hourStart, hourEnd); err != nil { + return err + } + if err := r.insertDailyActiveUsers(ctx, hourStart, hourEnd); err != nil { + return err + } + if err := r.upsertHourlyAggregates(ctx, hourStart, hourEnd); err != nil { + return err + } + if err := r.upsertDailyAggregates(ctx, dayStart, dayEnd); err != nil { + return err + } + return nil +} + func (r *dashboardAggregationRepository) GetAggregationWatermark(ctx context.Context) (time.Time, error) { var ts time.Time query := "SELECT last_aggregated_at FROM usage_dashboard_aggregation_watermark WHERE id = 1" diff --git a/backend/internal/repository/usage_cleanup_repo.go b/backend/internal/repository/usage_cleanup_repo.go new file mode 100644 index 00000000..b703cc9f --- /dev/null +++ b/backend/internal/repository/usage_cleanup_repo.go @@ -0,0 +1,363 @@ +package repository + +import ( + "context" + "database/sql" + "encoding/json" + "errors" + "fmt" + "strings" + + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/service" +) + +type usageCleanupRepository struct { + sql sqlExecutor +} + +func NewUsageCleanupRepository(sqlDB *sql.DB) service.UsageCleanupRepository { + return &usageCleanupRepository{sql: sqlDB} +} + +func (r *usageCleanupRepository) CreateTask(ctx context.Context, task *service.UsageCleanupTask) error { + if task == nil { + return nil + } + filtersJSON, err := json.Marshal(task.Filters) + if err != nil { + return fmt.Errorf("marshal cleanup filters: %w", err) + } + query := ` + INSERT INTO usage_cleanup_tasks ( + status, + filters, + created_by, + deleted_rows + ) VALUES ($1, $2, $3, $4) + RETURNING id, created_at, updated_at + ` + if err := scanSingleRow(ctx, r.sql, query, []any{task.Status, filtersJSON, task.CreatedBy, task.DeletedRows}, &task.ID, &task.CreatedAt, &task.UpdatedAt); err != nil { + return err + } + return nil +} + +func (r *usageCleanupRepository) ListTasks(ctx context.Context, params pagination.PaginationParams) ([]service.UsageCleanupTask, *pagination.PaginationResult, error) { + var total int64 + if err := scanSingleRow(ctx, r.sql, "SELECT COUNT(*) FROM usage_cleanup_tasks", nil, &total); err != nil { + return nil, nil, err + } + if total == 0 { + return []service.UsageCleanupTask{}, paginationResultFromTotal(0, params), nil + } + + query := ` + SELECT id, status, filters, created_by, deleted_rows, error_message, + canceled_by, canceled_at, + started_at, finished_at, created_at, updated_at + FROM usage_cleanup_tasks + ORDER BY created_at DESC + LIMIT $1 OFFSET $2 + ` + rows, err := r.sql.QueryContext(ctx, query, params.Limit(), params.Offset()) + if err != nil { + return nil, nil, err + } + defer rows.Close() + + tasks := make([]service.UsageCleanupTask, 0) + for rows.Next() { + var task service.UsageCleanupTask + var filtersJSON []byte + var errMsg sql.NullString + var canceledBy sql.NullInt64 + var canceledAt sql.NullTime + var startedAt sql.NullTime + var finishedAt sql.NullTime + if err := rows.Scan( + &task.ID, + &task.Status, + &filtersJSON, + &task.CreatedBy, + &task.DeletedRows, + &errMsg, + &canceledBy, + &canceledAt, + &startedAt, + &finishedAt, + &task.CreatedAt, + &task.UpdatedAt, + ); err != nil { + return nil, nil, err + } + if err := json.Unmarshal(filtersJSON, &task.Filters); err != nil { + return nil, nil, fmt.Errorf("parse cleanup filters: %w", err) + } + if errMsg.Valid { + task.ErrorMsg = &errMsg.String + } + if canceledBy.Valid { + v := canceledBy.Int64 + task.CanceledBy = &v + } + if canceledAt.Valid { + task.CanceledAt = &canceledAt.Time + } + if startedAt.Valid { + task.StartedAt = &startedAt.Time + } + if finishedAt.Valid { + task.FinishedAt = &finishedAt.Time + } + tasks = append(tasks, task) + } + if err := rows.Err(); err != nil { + return nil, nil, err + } + return tasks, paginationResultFromTotal(total, params), nil +} + +func (r *usageCleanupRepository) ClaimNextPendingTask(ctx context.Context, staleRunningAfterSeconds int64) (*service.UsageCleanupTask, error) { + if staleRunningAfterSeconds <= 0 { + staleRunningAfterSeconds = 1800 + } + query := ` + WITH next AS ( + SELECT id + FROM usage_cleanup_tasks + WHERE status = $1 + OR ( + status = $2 + AND started_at IS NOT NULL + AND started_at < NOW() - ($3 * interval '1 second') + ) + ORDER BY created_at ASC + LIMIT 1 + FOR UPDATE SKIP LOCKED + ) + UPDATE usage_cleanup_tasks + SET status = $4, + started_at = NOW(), + finished_at = NULL, + error_message = NULL, + updated_at = NOW() + FROM next + WHERE usage_cleanup_tasks.id = next.id + RETURNING id, status, filters, created_by, deleted_rows, error_message, + started_at, finished_at, created_at, updated_at + ` + var task service.UsageCleanupTask + var filtersJSON []byte + var errMsg sql.NullString + var startedAt sql.NullTime + var finishedAt sql.NullTime + if err := scanSingleRow( + ctx, + r.sql, + query, + []any{ + service.UsageCleanupStatusPending, + service.UsageCleanupStatusRunning, + staleRunningAfterSeconds, + service.UsageCleanupStatusRunning, + }, + &task.ID, + &task.Status, + &filtersJSON, + &task.CreatedBy, + &task.DeletedRows, + &errMsg, + &startedAt, + &finishedAt, + &task.CreatedAt, + &task.UpdatedAt, + ); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + return nil, err + } + if err := json.Unmarshal(filtersJSON, &task.Filters); err != nil { + return nil, fmt.Errorf("parse cleanup filters: %w", err) + } + if errMsg.Valid { + task.ErrorMsg = &errMsg.String + } + if startedAt.Valid { + task.StartedAt = &startedAt.Time + } + if finishedAt.Valid { + task.FinishedAt = &finishedAt.Time + } + return &task, nil +} + +func (r *usageCleanupRepository) GetTaskStatus(ctx context.Context, taskID int64) (string, error) { + var status string + if err := scanSingleRow(ctx, r.sql, "SELECT status FROM usage_cleanup_tasks WHERE id = $1", []any{taskID}, &status); err != nil { + return "", err + } + return status, nil +} + +func (r *usageCleanupRepository) UpdateTaskProgress(ctx context.Context, taskID int64, deletedRows int64) error { + query := ` + UPDATE usage_cleanup_tasks + SET deleted_rows = $1, + updated_at = NOW() + WHERE id = $2 + ` + _, err := r.sql.ExecContext(ctx, query, deletedRows, taskID) + return err +} + +func (r *usageCleanupRepository) CancelTask(ctx context.Context, taskID int64, canceledBy int64) (bool, error) { + query := ` + UPDATE usage_cleanup_tasks + SET status = $1, + canceled_by = $3, + canceled_at = NOW(), + finished_at = NOW(), + error_message = NULL, + updated_at = NOW() + WHERE id = $2 + AND status IN ($4, $5) + RETURNING id + ` + var id int64 + err := scanSingleRow(ctx, r.sql, query, []any{ + service.UsageCleanupStatusCanceled, + taskID, + canceledBy, + service.UsageCleanupStatusPending, + service.UsageCleanupStatusRunning, + }, &id) + if errors.Is(err, sql.ErrNoRows) { + return false, nil + } + if err != nil { + return false, err + } + return true, nil +} + +func (r *usageCleanupRepository) MarkTaskSucceeded(ctx context.Context, taskID int64, deletedRows int64) error { + query := ` + UPDATE usage_cleanup_tasks + SET status = $1, + deleted_rows = $2, + finished_at = NOW(), + updated_at = NOW() + WHERE id = $3 + ` + _, err := r.sql.ExecContext(ctx, query, service.UsageCleanupStatusSucceeded, deletedRows, taskID) + return err +} + +func (r *usageCleanupRepository) MarkTaskFailed(ctx context.Context, taskID int64, deletedRows int64, errorMsg string) error { + query := ` + UPDATE usage_cleanup_tasks + SET status = $1, + deleted_rows = $2, + error_message = $3, + finished_at = NOW(), + updated_at = NOW() + WHERE id = $4 + ` + _, err := r.sql.ExecContext(ctx, query, service.UsageCleanupStatusFailed, deletedRows, errorMsg, taskID) + return err +} + +func (r *usageCleanupRepository) DeleteUsageLogsBatch(ctx context.Context, filters service.UsageCleanupFilters, limit int) (int64, error) { + if filters.StartTime.IsZero() || filters.EndTime.IsZero() { + return 0, fmt.Errorf("cleanup filters missing time range") + } + whereClause, args := buildUsageCleanupWhere(filters) + if whereClause == "" { + return 0, fmt.Errorf("cleanup filters missing time range") + } + args = append(args, limit) + query := fmt.Sprintf(` + WITH target AS ( + SELECT id + FROM usage_logs + WHERE %s + ORDER BY created_at ASC, id ASC + LIMIT $%d + ) + DELETE FROM usage_logs + WHERE id IN (SELECT id FROM target) + RETURNING id + `, whereClause, len(args)) + + rows, err := r.sql.QueryContext(ctx, query, args...) + if err != nil { + return 0, err + } + defer rows.Close() + + var deleted int64 + for rows.Next() { + deleted++ + } + if err := rows.Err(); err != nil { + return 0, err + } + return deleted, nil +} + +func buildUsageCleanupWhere(filters service.UsageCleanupFilters) (string, []any) { + conditions := make([]string, 0, 8) + args := make([]any, 0, 8) + idx := 1 + if !filters.StartTime.IsZero() { + conditions = append(conditions, fmt.Sprintf("created_at >= $%d", idx)) + args = append(args, filters.StartTime) + idx++ + } + if !filters.EndTime.IsZero() { + conditions = append(conditions, fmt.Sprintf("created_at <= $%d", idx)) + args = append(args, filters.EndTime) + idx++ + } + if filters.UserID != nil { + conditions = append(conditions, fmt.Sprintf("user_id = $%d", idx)) + args = append(args, *filters.UserID) + idx++ + } + if filters.APIKeyID != nil { + conditions = append(conditions, fmt.Sprintf("api_key_id = $%d", idx)) + args = append(args, *filters.APIKeyID) + idx++ + } + if filters.AccountID != nil { + conditions = append(conditions, fmt.Sprintf("account_id = $%d", idx)) + args = append(args, *filters.AccountID) + idx++ + } + if filters.GroupID != nil { + conditions = append(conditions, fmt.Sprintf("group_id = $%d", idx)) + args = append(args, *filters.GroupID) + idx++ + } + if filters.Model != nil { + model := strings.TrimSpace(*filters.Model) + if model != "" { + conditions = append(conditions, fmt.Sprintf("model = $%d", idx)) + args = append(args, model) + idx++ + } + } + if filters.Stream != nil { + conditions = append(conditions, fmt.Sprintf("stream = $%d", idx)) + args = append(args, *filters.Stream) + idx++ + } + if filters.BillingType != nil { + conditions = append(conditions, fmt.Sprintf("billing_type = $%d", idx)) + args = append(args, *filters.BillingType) + idx++ + } + return strings.Join(conditions, " AND "), args +} diff --git a/backend/internal/repository/usage_cleanup_repo_test.go b/backend/internal/repository/usage_cleanup_repo_test.go new file mode 100644 index 00000000..e5582709 --- /dev/null +++ b/backend/internal/repository/usage_cleanup_repo_test.go @@ -0,0 +1,440 @@ +package repository + +import ( + "context" + "database/sql" + "encoding/json" + "testing" + "time" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/stretchr/testify/require" +) + +func newSQLMock(t *testing.T) (*sql.DB, sqlmock.Sqlmock) { + t.Helper() + db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherRegexp)) + require.NoError(t, err) + t.Cleanup(func() { _ = db.Close() }) + return db, mock +} + +func TestNewUsageCleanupRepository(t *testing.T) { + db, _ := newSQLMock(t) + repo := NewUsageCleanupRepository(db) + require.NotNil(t, repo) +} + +func TestUsageCleanupRepositoryCreateTask(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + end := start.Add(24 * time.Hour) + task := &service.UsageCleanupTask{ + Status: service.UsageCleanupStatusPending, + Filters: service.UsageCleanupFilters{StartTime: start, EndTime: end}, + CreatedBy: 12, + } + now := time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC) + + mock.ExpectQuery("INSERT INTO usage_cleanup_tasks"). + WithArgs(task.Status, sqlmock.AnyArg(), task.CreatedBy, task.DeletedRows). + WillReturnRows(sqlmock.NewRows([]string{"id", "created_at", "updated_at"}).AddRow(int64(1), now, now)) + + err := repo.CreateTask(context.Background(), task) + require.NoError(t, err) + require.Equal(t, int64(1), task.ID) + require.Equal(t, now, task.CreatedAt) + require.Equal(t, now, task.UpdatedAt) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryCreateTaskNil(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + err := repo.CreateTask(context.Background(), nil) + require.NoError(t, err) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryCreateTaskQueryError(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + task := &service.UsageCleanupTask{ + Status: service.UsageCleanupStatusPending, + Filters: service.UsageCleanupFilters{StartTime: time.Now(), EndTime: time.Now().Add(time.Hour)}, + CreatedBy: 1, + } + + mock.ExpectQuery("INSERT INTO usage_cleanup_tasks"). + WithArgs(task.Status, sqlmock.AnyArg(), task.CreatedBy, task.DeletedRows). + WillReturnError(sql.ErrConnDone) + + err := repo.CreateTask(context.Background(), task) + require.Error(t, err) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryListTasksEmpty(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + mock.ExpectQuery("SELECT COUNT\\(\\*\\) FROM usage_cleanup_tasks"). + WillReturnRows(sqlmock.NewRows([]string{"count"}).AddRow(int64(0))) + + tasks, result, err := repo.ListTasks(context.Background(), pagination.PaginationParams{Page: 1, PageSize: 20}) + require.NoError(t, err) + require.Empty(t, tasks) + require.Equal(t, int64(0), result.Total) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryListTasks(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + end := start.Add(2 * time.Hour) + filters := service.UsageCleanupFilters{StartTime: start, EndTime: end} + filtersJSON, err := json.Marshal(filters) + require.NoError(t, err) + + createdAt := time.Date(2024, 1, 2, 12, 0, 0, 0, time.UTC) + updatedAt := createdAt.Add(time.Minute) + rows := sqlmock.NewRows([]string{ + "id", "status", "filters", "created_by", "deleted_rows", "error_message", + "canceled_by", "canceled_at", + "started_at", "finished_at", "created_at", "updated_at", + }).AddRow( + int64(1), + service.UsageCleanupStatusSucceeded, + filtersJSON, + int64(2), + int64(9), + "error", + nil, + nil, + start, + end, + createdAt, + updatedAt, + ) + + mock.ExpectQuery("SELECT COUNT\\(\\*\\) FROM usage_cleanup_tasks"). + WillReturnRows(sqlmock.NewRows([]string{"count"}).AddRow(int64(1))) + mock.ExpectQuery("SELECT id, status, filters, created_by, deleted_rows, error_message"). + WithArgs(20, 0). + WillReturnRows(rows) + + tasks, result, err := repo.ListTasks(context.Background(), pagination.PaginationParams{Page: 1, PageSize: 20}) + require.NoError(t, err) + require.Len(t, tasks, 1) + require.Equal(t, int64(1), tasks[0].ID) + require.Equal(t, service.UsageCleanupStatusSucceeded, tasks[0].Status) + require.Equal(t, int64(2), tasks[0].CreatedBy) + require.Equal(t, int64(9), tasks[0].DeletedRows) + require.NotNil(t, tasks[0].ErrorMsg) + require.Equal(t, "error", *tasks[0].ErrorMsg) + require.NotNil(t, tasks[0].StartedAt) + require.NotNil(t, tasks[0].FinishedAt) + require.Equal(t, int64(1), result.Total) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryListTasksInvalidFilters(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + rows := sqlmock.NewRows([]string{ + "id", "status", "filters", "created_by", "deleted_rows", "error_message", + "canceled_by", "canceled_at", + "started_at", "finished_at", "created_at", "updated_at", + }).AddRow( + int64(1), + service.UsageCleanupStatusSucceeded, + []byte("not-json"), + int64(2), + int64(9), + nil, + nil, + nil, + nil, + nil, + time.Now().UTC(), + time.Now().UTC(), + ) + + mock.ExpectQuery("SELECT COUNT\\(\\*\\) FROM usage_cleanup_tasks"). + WillReturnRows(sqlmock.NewRows([]string{"count"}).AddRow(int64(1))) + mock.ExpectQuery("SELECT id, status, filters, created_by, deleted_rows, error_message"). + WithArgs(20, 0). + WillReturnRows(rows) + + _, _, err := repo.ListTasks(context.Background(), pagination.PaginationParams{Page: 1, PageSize: 20}) + require.Error(t, err) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryClaimNextPendingTaskNone(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + mock.ExpectQuery("UPDATE usage_cleanup_tasks"). + WithArgs(service.UsageCleanupStatusPending, service.UsageCleanupStatusRunning, int64(1800), service.UsageCleanupStatusRunning). + WillReturnRows(sqlmock.NewRows([]string{ + "id", "status", "filters", "created_by", "deleted_rows", "error_message", + "started_at", "finished_at", "created_at", "updated_at", + })) + + task, err := repo.ClaimNextPendingTask(context.Background(), 1800) + require.NoError(t, err) + require.Nil(t, task) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryClaimNextPendingTask(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + end := start.Add(24 * time.Hour) + filters := service.UsageCleanupFilters{StartTime: start, EndTime: end} + filtersJSON, err := json.Marshal(filters) + require.NoError(t, err) + + rows := sqlmock.NewRows([]string{ + "id", "status", "filters", "created_by", "deleted_rows", "error_message", + "started_at", "finished_at", "created_at", "updated_at", + }).AddRow( + int64(4), + service.UsageCleanupStatusRunning, + filtersJSON, + int64(7), + int64(0), + nil, + start, + nil, + start, + start, + ) + + mock.ExpectQuery("UPDATE usage_cleanup_tasks"). + WithArgs(service.UsageCleanupStatusPending, service.UsageCleanupStatusRunning, int64(1800), service.UsageCleanupStatusRunning). + WillReturnRows(rows) + + task, err := repo.ClaimNextPendingTask(context.Background(), 1800) + require.NoError(t, err) + require.NotNil(t, task) + require.Equal(t, int64(4), task.ID) + require.Equal(t, service.UsageCleanupStatusRunning, task.Status) + require.Equal(t, int64(7), task.CreatedBy) + require.NotNil(t, task.StartedAt) + require.Nil(t, task.ErrorMsg) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryClaimNextPendingTaskError(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + mock.ExpectQuery("UPDATE usage_cleanup_tasks"). + WithArgs(service.UsageCleanupStatusPending, service.UsageCleanupStatusRunning, int64(1800), service.UsageCleanupStatusRunning). + WillReturnError(sql.ErrConnDone) + + _, err := repo.ClaimNextPendingTask(context.Background(), 1800) + require.Error(t, err) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryClaimNextPendingTaskInvalidFilters(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + rows := sqlmock.NewRows([]string{ + "id", "status", "filters", "created_by", "deleted_rows", "error_message", + "started_at", "finished_at", "created_at", "updated_at", + }).AddRow( + int64(4), + service.UsageCleanupStatusRunning, + []byte("invalid"), + int64(7), + int64(0), + nil, + nil, + nil, + time.Now().UTC(), + time.Now().UTC(), + ) + + mock.ExpectQuery("UPDATE usage_cleanup_tasks"). + WithArgs(service.UsageCleanupStatusPending, service.UsageCleanupStatusRunning, int64(1800), service.UsageCleanupStatusRunning). + WillReturnRows(rows) + + _, err := repo.ClaimNextPendingTask(context.Background(), 1800) + require.Error(t, err) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryMarkTaskSucceeded(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + mock.ExpectExec("UPDATE usage_cleanup_tasks"). + WithArgs(service.UsageCleanupStatusSucceeded, int64(12), int64(9)). + WillReturnResult(sqlmock.NewResult(0, 1)) + + err := repo.MarkTaskSucceeded(context.Background(), 9, 12) + require.NoError(t, err) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryMarkTaskFailed(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + mock.ExpectExec("UPDATE usage_cleanup_tasks"). + WithArgs(service.UsageCleanupStatusFailed, int64(4), "boom", int64(2)). + WillReturnResult(sqlmock.NewResult(0, 1)) + + err := repo.MarkTaskFailed(context.Background(), 2, 4, "boom") + require.NoError(t, err) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryGetTaskStatus(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + mock.ExpectQuery("SELECT status FROM usage_cleanup_tasks"). + WithArgs(int64(9)). + WillReturnRows(sqlmock.NewRows([]string{"status"}).AddRow(service.UsageCleanupStatusPending)) + + status, err := repo.GetTaskStatus(context.Background(), 9) + require.NoError(t, err) + require.Equal(t, service.UsageCleanupStatusPending, status) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryUpdateTaskProgress(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + mock.ExpectExec("UPDATE usage_cleanup_tasks"). + WithArgs(int64(123), int64(8)). + WillReturnResult(sqlmock.NewResult(0, 1)) + + err := repo.UpdateTaskProgress(context.Background(), 8, 123) + require.NoError(t, err) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryCancelTask(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + mock.ExpectQuery("UPDATE usage_cleanup_tasks"). + WithArgs(service.UsageCleanupStatusCanceled, int64(6), int64(9), service.UsageCleanupStatusPending, service.UsageCleanupStatusRunning). + WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(int64(6))) + + ok, err := repo.CancelTask(context.Background(), 6, 9) + require.NoError(t, err) + require.True(t, ok) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryDeleteUsageLogsBatchMissingRange(t *testing.T) { + db, _ := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + _, err := repo.DeleteUsageLogsBatch(context.Background(), service.UsageCleanupFilters{}, 10) + require.Error(t, err) +} + +func TestUsageCleanupRepositoryDeleteUsageLogsBatch(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + end := start.Add(24 * time.Hour) + userID := int64(3) + model := " gpt-4 " + filters := service.UsageCleanupFilters{ + StartTime: start, + EndTime: end, + UserID: &userID, + Model: &model, + } + + mock.ExpectQuery("DELETE FROM usage_logs"). + WithArgs(start, end, userID, "gpt-4", 2). + WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(int64(1)).AddRow(int64(2))) + + deleted, err := repo.DeleteUsageLogsBatch(context.Background(), filters, 2) + require.NoError(t, err) + require.Equal(t, int64(2), deleted) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryDeleteUsageLogsBatchQueryError(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + end := start.Add(24 * time.Hour) + filters := service.UsageCleanupFilters{StartTime: start, EndTime: end} + + mock.ExpectQuery("DELETE FROM usage_logs"). + WithArgs(start, end, 5). + WillReturnError(sql.ErrConnDone) + + _, err := repo.DeleteUsageLogsBatch(context.Background(), filters, 5) + require.Error(t, err) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestBuildUsageCleanupWhere(t *testing.T) { + start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + end := start.Add(24 * time.Hour) + userID := int64(1) + apiKeyID := int64(2) + accountID := int64(3) + groupID := int64(4) + model := " gpt-4 " + stream := true + billingType := int8(2) + + where, args := buildUsageCleanupWhere(service.UsageCleanupFilters{ + StartTime: start, + EndTime: end, + UserID: &userID, + APIKeyID: &apiKeyID, + AccountID: &accountID, + GroupID: &groupID, + Model: &model, + Stream: &stream, + BillingType: &billingType, + }) + + require.Equal(t, "created_at >= $1 AND created_at <= $2 AND user_id = $3 AND api_key_id = $4 AND account_id = $5 AND group_id = $6 AND model = $7 AND stream = $8 AND billing_type = $9", where) + require.Equal(t, []any{start, end, userID, apiKeyID, accountID, groupID, "gpt-4", stream, billingType}, args) +} + +func TestBuildUsageCleanupWhereModelEmpty(t *testing.T) { + start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + end := start.Add(24 * time.Hour) + model := " " + + where, args := buildUsageCleanupWhere(service.UsageCleanupFilters{ + StartTime: start, + EndTime: end, + Model: &model, + }) + + require.Equal(t, "created_at >= $1 AND created_at <= $2", where) + require.Equal(t, []any{start, end}, args) +} diff --git a/backend/internal/repository/usage_log_repo.go b/backend/internal/repository/usage_log_repo.go index 4a2aaade..963db7ba 100644 --- a/backend/internal/repository/usage_log_repo.go +++ b/backend/internal/repository/usage_log_repo.go @@ -1411,7 +1411,7 @@ func (r *usageLogRepository) GetBatchAPIKeyUsageStats(ctx context.Context, apiKe } // GetUsageTrendWithFilters returns usage trend data with optional filters -func (r *usageLogRepository) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool) (results []TrendDataPoint, err error) { +func (r *usageLogRepository) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool, billingType *int8) (results []TrendDataPoint, err error) { dateFormat := "YYYY-MM-DD" if granularity == "hour" { dateFormat = "YYYY-MM-DD HH24:00" @@ -1456,6 +1456,10 @@ func (r *usageLogRepository) GetUsageTrendWithFilters(ctx context.Context, start query += fmt.Sprintf(" AND stream = $%d", len(args)+1) args = append(args, *stream) } + if billingType != nil { + query += fmt.Sprintf(" AND billing_type = $%d", len(args)+1) + args = append(args, int16(*billingType)) + } query += " GROUP BY date ORDER BY date ASC" rows, err := r.sql.QueryContext(ctx, query, args...) @@ -1479,7 +1483,7 @@ func (r *usageLogRepository) GetUsageTrendWithFilters(ctx context.Context, start } // GetModelStatsWithFilters returns model statistics with optional filters -func (r *usageLogRepository) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool) (results []ModelStat, err error) { +func (r *usageLogRepository) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool, billingType *int8) (results []ModelStat, err error) { actualCostExpr := "COALESCE(SUM(actual_cost), 0) as actual_cost" // 当仅按 account_id 聚合时,实际费用使用账号倍率(total_cost * account_rate_multiplier)。 if accountID > 0 && userID == 0 && apiKeyID == 0 { @@ -1520,6 +1524,10 @@ func (r *usageLogRepository) GetModelStatsWithFilters(ctx context.Context, start query += fmt.Sprintf(" AND stream = $%d", len(args)+1) args = append(args, *stream) } + if billingType != nil { + query += fmt.Sprintf(" AND billing_type = $%d", len(args)+1) + args = append(args, int16(*billingType)) + } query += " GROUP BY model ORDER BY total_tokens DESC" rows, err := r.sql.QueryContext(ctx, query, args...) @@ -1825,7 +1833,7 @@ func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID } } - models, err := r.GetModelStatsWithFilters(ctx, startTime, endTime, 0, 0, accountID, 0, nil) + models, err := r.GetModelStatsWithFilters(ctx, startTime, endTime, 0, 0, accountID, 0, nil, nil) if err != nil { models = []ModelStat{} } diff --git a/backend/internal/repository/usage_log_repo_integration_test.go b/backend/internal/repository/usage_log_repo_integration_test.go index 7174be18..eb220f22 100644 --- a/backend/internal/repository/usage_log_repo_integration_test.go +++ b/backend/internal/repository/usage_log_repo_integration_test.go @@ -944,17 +944,17 @@ func (s *UsageLogRepoSuite) TestGetUsageTrendWithFilters() { endTime := base.Add(48 * time.Hour) // Test with user filter - trend, err := s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", user.ID, 0, 0, 0, "", nil) + trend, err := s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", user.ID, 0, 0, 0, "", nil, nil) s.Require().NoError(err, "GetUsageTrendWithFilters user filter") s.Require().Len(trend, 2) // Test with apiKey filter - trend, err = s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", 0, apiKey.ID, 0, 0, "", nil) + trend, err = s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", 0, apiKey.ID, 0, 0, "", nil, nil) s.Require().NoError(err, "GetUsageTrendWithFilters apiKey filter") s.Require().Len(trend, 2) // Test with both filters - trend, err = s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", user.ID, apiKey.ID, 0, 0, "", nil) + trend, err = s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", user.ID, apiKey.ID, 0, 0, "", nil, nil) s.Require().NoError(err, "GetUsageTrendWithFilters both filters") s.Require().Len(trend, 2) } @@ -971,7 +971,7 @@ func (s *UsageLogRepoSuite) TestGetUsageTrendWithFilters_HourlyGranularity() { startTime := base.Add(-1 * time.Hour) endTime := base.Add(3 * time.Hour) - trend, err := s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "hour", user.ID, 0, 0, 0, "", nil) + trend, err := s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "hour", user.ID, 0, 0, 0, "", nil, nil) s.Require().NoError(err, "GetUsageTrendWithFilters hourly") s.Require().Len(trend, 2) } @@ -1017,17 +1017,17 @@ func (s *UsageLogRepoSuite) TestGetModelStatsWithFilters() { endTime := base.Add(2 * time.Hour) // Test with user filter - stats, err := s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, user.ID, 0, 0, 0, nil) + stats, err := s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, user.ID, 0, 0, 0, nil, nil) s.Require().NoError(err, "GetModelStatsWithFilters user filter") s.Require().Len(stats, 2) // Test with apiKey filter - stats, err = s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, 0, apiKey.ID, 0, 0, nil) + stats, err = s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, 0, apiKey.ID, 0, 0, nil, nil) s.Require().NoError(err, "GetModelStatsWithFilters apiKey filter") s.Require().Len(stats, 2) // Test with account filter - stats, err = s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, 0, 0, account.ID, 0, nil) + stats, err = s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, 0, 0, account.ID, 0, nil, nil) s.Require().NoError(err, "GetModelStatsWithFilters account filter") s.Require().Len(stats, 2) } diff --git a/backend/internal/repository/wire.go b/backend/internal/repository/wire.go index 91ef9413..9dc91eca 100644 --- a/backend/internal/repository/wire.go +++ b/backend/internal/repository/wire.go @@ -47,6 +47,7 @@ var ProviderSet = wire.NewSet( NewRedeemCodeRepository, NewPromoCodeRepository, NewUsageLogRepository, + NewUsageCleanupRepository, NewDashboardAggregationRepository, NewSettingRepository, NewOpsRepository, diff --git a/backend/internal/server/api_contract_test.go b/backend/internal/server/api_contract_test.go index 7971c65f..7076f8c5 100644 --- a/backend/internal/server/api_contract_test.go +++ b/backend/internal/server/api_contract_test.go @@ -1242,11 +1242,11 @@ func (r *stubUsageLogRepo) GetDashboardStats(ctx context.Context) (*usagestats.D return nil, errors.New("not implemented") } -func (r *stubUsageLogRepo) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool) ([]usagestats.TrendDataPoint, error) { +func (r *stubUsageLogRepo) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool, billingType *int8) ([]usagestats.TrendDataPoint, error) { return nil, errors.New("not implemented") } -func (r *stubUsageLogRepo) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool) ([]usagestats.ModelStat, error) { +func (r *stubUsageLogRepo) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool, billingType *int8) ([]usagestats.ModelStat, error) { return nil, errors.New("not implemented") } diff --git a/backend/internal/server/routes/admin.go b/backend/internal/server/routes/admin.go index ff05b32a..050e724d 100644 --- a/backend/internal/server/routes/admin.go +++ b/backend/internal/server/routes/admin.go @@ -354,6 +354,9 @@ func registerUsageRoutes(admin *gin.RouterGroup, h *handler.Handlers) { usage.GET("/stats", h.Admin.Usage.Stats) usage.GET("/search-users", h.Admin.Usage.SearchUsers) usage.GET("/search-api-keys", h.Admin.Usage.SearchAPIKeys) + usage.GET("/cleanup-tasks", h.Admin.Usage.ListCleanupTasks) + usage.POST("/cleanup-tasks", h.Admin.Usage.CreateCleanupTask) + usage.POST("/cleanup-tasks/:id/cancel", h.Admin.Usage.CancelCleanupTask) } } diff --git a/backend/internal/service/account_usage_service.go b/backend/internal/service/account_usage_service.go index d9ed5609..f1c07d5e 100644 --- a/backend/internal/service/account_usage_service.go +++ b/backend/internal/service/account_usage_service.go @@ -32,8 +32,8 @@ type UsageLogRepository interface { // Admin dashboard stats GetDashboardStats(ctx context.Context) (*usagestats.DashboardStats, error) - GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool) ([]usagestats.TrendDataPoint, error) - GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool) ([]usagestats.ModelStat, error) + GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool, billingType *int8) ([]usagestats.TrendDataPoint, error) + GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool, billingType *int8) ([]usagestats.ModelStat, error) GetAPIKeyUsageTrend(ctx context.Context, startTime, endTime time.Time, granularity string, limit int) ([]usagestats.APIKeyUsageTrendPoint, error) GetUserUsageTrend(ctx context.Context, startTime, endTime time.Time, granularity string, limit int) ([]usagestats.UserUsageTrendPoint, error) GetBatchUserUsageStats(ctx context.Context, userIDs []int64) (map[int64]*usagestats.BatchUserUsageStats, error) @@ -272,7 +272,7 @@ func (s *AccountUsageService) getGeminiUsage(ctx context.Context, account *Accou } dayStart := geminiDailyWindowStart(now) - stats, err := s.usageLogRepo.GetModelStatsWithFilters(ctx, dayStart, now, 0, 0, account.ID, 0, nil) + stats, err := s.usageLogRepo.GetModelStatsWithFilters(ctx, dayStart, now, 0, 0, account.ID, 0, nil, nil) if err != nil { return nil, fmt.Errorf("get gemini usage stats failed: %w", err) } @@ -294,7 +294,7 @@ func (s *AccountUsageService) getGeminiUsage(ctx context.Context, account *Accou // Minute window (RPM) - fixed-window approximation: current minute [truncate(now), truncate(now)+1m) minuteStart := now.Truncate(time.Minute) minuteResetAt := minuteStart.Add(time.Minute) - minuteStats, err := s.usageLogRepo.GetModelStatsWithFilters(ctx, minuteStart, now, 0, 0, account.ID, 0, nil) + minuteStats, err := s.usageLogRepo.GetModelStatsWithFilters(ctx, minuteStart, now, 0, 0, account.ID, 0, nil, nil) if err != nil { return nil, fmt.Errorf("get gemini minute usage stats failed: %w", err) } diff --git a/backend/internal/service/dashboard_aggregation_service.go b/backend/internal/service/dashboard_aggregation_service.go index da5c0e7d..8f7e8144 100644 --- a/backend/internal/service/dashboard_aggregation_service.go +++ b/backend/internal/service/dashboard_aggregation_service.go @@ -21,11 +21,15 @@ var ( ErrDashboardBackfillDisabled = errors.New("仪表盘聚合回填已禁用") // ErrDashboardBackfillTooLarge 当回填跨度超过限制时返回。 ErrDashboardBackfillTooLarge = errors.New("回填时间跨度过大") + errDashboardAggregationRunning = errors.New("聚合作业正在运行") ) // DashboardAggregationRepository 定义仪表盘预聚合仓储接口。 type DashboardAggregationRepository interface { AggregateRange(ctx context.Context, start, end time.Time) error + // RecomputeRange 重新计算指定时间范围内的聚合数据(包含活跃用户等派生表)。 + // 设计目的:当 usage_logs 被批量删除/回滚后,确保聚合表可恢复一致性。 + RecomputeRange(ctx context.Context, start, end time.Time) error GetAggregationWatermark(ctx context.Context) (time.Time, error) UpdateAggregationWatermark(ctx context.Context, aggregatedAt time.Time) error CleanupAggregates(ctx context.Context, hourlyCutoff, dailyCutoff time.Time) error @@ -112,6 +116,41 @@ func (s *DashboardAggregationService) TriggerBackfill(start, end time.Time) erro return nil } +// TriggerRecomputeRange 触发指定范围的重新计算(异步)。 +// 与 TriggerBackfill 不同: +// - 不依赖 backfill_enabled(这是内部一致性修复) +// - 不更新 watermark(避免影响正常增量聚合游标) +func (s *DashboardAggregationService) TriggerRecomputeRange(start, end time.Time) error { + if s == nil || s.repo == nil { + return errors.New("聚合服务未初始化") + } + if !s.cfg.Enabled { + return errors.New("聚合服务已禁用") + } + if !end.After(start) { + return errors.New("重新计算时间范围无效") + } + + go func() { + const maxRetries = 3 + for i := 0; i < maxRetries; i++ { + ctx, cancel := context.WithTimeout(context.Background(), defaultDashboardAggregationBackfillTimeout) + err := s.recomputeRange(ctx, start, end) + cancel() + if err == nil { + return + } + if !errors.Is(err, errDashboardAggregationRunning) { + log.Printf("[DashboardAggregation] 重新计算失败: %v", err) + return + } + time.Sleep(5 * time.Second) + } + log.Printf("[DashboardAggregation] 重新计算放弃: 聚合作业持续占用") + }() + return nil +} + func (s *DashboardAggregationService) recomputeRecentDays() { days := s.cfg.RecomputeDays if days <= 0 { @@ -128,6 +167,24 @@ func (s *DashboardAggregationService) recomputeRecentDays() { } } +func (s *DashboardAggregationService) recomputeRange(ctx context.Context, start, end time.Time) error { + if !atomic.CompareAndSwapInt32(&s.running, 0, 1) { + return errDashboardAggregationRunning + } + defer atomic.StoreInt32(&s.running, 0) + + jobStart := time.Now().UTC() + if err := s.repo.RecomputeRange(ctx, start, end); err != nil { + return err + } + log.Printf("[DashboardAggregation] 重新计算完成 (start=%s end=%s duration=%s)", + start.UTC().Format(time.RFC3339), + end.UTC().Format(time.RFC3339), + time.Since(jobStart).String(), + ) + return nil +} + func (s *DashboardAggregationService) runScheduledAggregation() { if !atomic.CompareAndSwapInt32(&s.running, 0, 1) { return @@ -179,7 +236,7 @@ func (s *DashboardAggregationService) runScheduledAggregation() { func (s *DashboardAggregationService) backfillRange(ctx context.Context, start, end time.Time) error { if !atomic.CompareAndSwapInt32(&s.running, 0, 1) { - return errors.New("聚合作业正在运行") + return errDashboardAggregationRunning } defer atomic.StoreInt32(&s.running, 0) diff --git a/backend/internal/service/dashboard_aggregation_service_test.go b/backend/internal/service/dashboard_aggregation_service_test.go index 2fc22105..a7058985 100644 --- a/backend/internal/service/dashboard_aggregation_service_test.go +++ b/backend/internal/service/dashboard_aggregation_service_test.go @@ -27,6 +27,10 @@ func (s *dashboardAggregationRepoTestStub) AggregateRange(ctx context.Context, s return s.aggregateErr } +func (s *dashboardAggregationRepoTestStub) RecomputeRange(ctx context.Context, start, end time.Time) error { + return s.AggregateRange(ctx, start, end) +} + func (s *dashboardAggregationRepoTestStub) GetAggregationWatermark(ctx context.Context) (time.Time, error) { return s.watermark, nil } diff --git a/backend/internal/service/dashboard_service.go b/backend/internal/service/dashboard_service.go index a9811919..cd11923e 100644 --- a/backend/internal/service/dashboard_service.go +++ b/backend/internal/service/dashboard_service.go @@ -124,16 +124,16 @@ func (s *DashboardService) GetDashboardStats(ctx context.Context) (*usagestats.D return stats, nil } -func (s *DashboardService) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool) ([]usagestats.TrendDataPoint, error) { - trend, err := s.usageRepo.GetUsageTrendWithFilters(ctx, startTime, endTime, granularity, userID, apiKeyID, accountID, groupID, model, stream) +func (s *DashboardService) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool, billingType *int8) ([]usagestats.TrendDataPoint, error) { + trend, err := s.usageRepo.GetUsageTrendWithFilters(ctx, startTime, endTime, granularity, userID, apiKeyID, accountID, groupID, model, stream, billingType) if err != nil { return nil, fmt.Errorf("get usage trend with filters: %w", err) } return trend, nil } -func (s *DashboardService) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool) ([]usagestats.ModelStat, error) { - stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, startTime, endTime, userID, apiKeyID, accountID, groupID, stream) +func (s *DashboardService) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool, billingType *int8) ([]usagestats.ModelStat, error) { + stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, startTime, endTime, userID, apiKeyID, accountID, groupID, stream, billingType) if err != nil { return nil, fmt.Errorf("get model stats with filters: %w", err) } diff --git a/backend/internal/service/dashboard_service_test.go b/backend/internal/service/dashboard_service_test.go index db3c78c3..59b83e66 100644 --- a/backend/internal/service/dashboard_service_test.go +++ b/backend/internal/service/dashboard_service_test.go @@ -101,6 +101,10 @@ func (s *dashboardAggregationRepoStub) AggregateRange(ctx context.Context, start return nil } +func (s *dashboardAggregationRepoStub) RecomputeRange(ctx context.Context, start, end time.Time) error { + return nil +} + func (s *dashboardAggregationRepoStub) GetAggregationWatermark(ctx context.Context) (time.Time, error) { if s.err != nil { return time.Time{}, s.err diff --git a/backend/internal/service/ratelimit_service.go b/backend/internal/service/ratelimit_service.go index 47a04cf5..2d75dd5a 100644 --- a/backend/internal/service/ratelimit_service.go +++ b/backend/internal/service/ratelimit_service.go @@ -190,7 +190,7 @@ func (s *RateLimitService) PreCheckUsage(ctx context.Context, account *Account, start := geminiDailyWindowStart(now) totals, ok := s.getGeminiUsageTotals(account.ID, start, now) if !ok { - stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, start, now, 0, 0, account.ID, 0, nil) + stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, start, now, 0, 0, account.ID, 0, nil, nil) if err != nil { return true, err } @@ -237,7 +237,7 @@ func (s *RateLimitService) PreCheckUsage(ctx context.Context, account *Account, if limit > 0 { start := now.Truncate(time.Minute) - stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, start, now, 0, 0, account.ID, 0, nil) + stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, start, now, 0, 0, account.ID, 0, nil, nil) if err != nil { return true, err } diff --git a/backend/internal/service/usage_cleanup.go b/backend/internal/service/usage_cleanup.go new file mode 100644 index 00000000..7e3ffbb9 --- /dev/null +++ b/backend/internal/service/usage_cleanup.go @@ -0,0 +1,74 @@ +package service + +import ( + "context" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" +) + +const ( + UsageCleanupStatusPending = "pending" + UsageCleanupStatusRunning = "running" + UsageCleanupStatusSucceeded = "succeeded" + UsageCleanupStatusFailed = "failed" + UsageCleanupStatusCanceled = "canceled" +) + +// UsageCleanupFilters 定义清理任务过滤条件 +// 时间范围为必填,其他字段可选 +// JSON 序列化用于存储任务参数 +// +// start_time/end_time 使用 RFC3339 时间格式 +// 以 UTC 或用户时区解析后的时间为准 +// +// 说明: +// - nil 表示未设置该过滤条件 +// - 过滤条件均为精确匹配 +type UsageCleanupFilters struct { + StartTime time.Time `json:"start_time"` + EndTime time.Time `json:"end_time"` + UserID *int64 `json:"user_id,omitempty"` + APIKeyID *int64 `json:"api_key_id,omitempty"` + AccountID *int64 `json:"account_id,omitempty"` + GroupID *int64 `json:"group_id,omitempty"` + Model *string `json:"model,omitempty"` + Stream *bool `json:"stream,omitempty"` + BillingType *int8 `json:"billing_type,omitempty"` +} + +// UsageCleanupTask 表示使用记录清理任务 +// 状态包含 pending/running/succeeded/failed/canceled +type UsageCleanupTask struct { + ID int64 + Status string + Filters UsageCleanupFilters + CreatedBy int64 + DeletedRows int64 + ErrorMsg *string + CanceledBy *int64 + CanceledAt *time.Time + StartedAt *time.Time + FinishedAt *time.Time + CreatedAt time.Time + UpdatedAt time.Time +} + +// UsageCleanupRepository 定义清理任务持久层接口 +type UsageCleanupRepository interface { + CreateTask(ctx context.Context, task *UsageCleanupTask) error + ListTasks(ctx context.Context, params pagination.PaginationParams) ([]UsageCleanupTask, *pagination.PaginationResult, error) + // ClaimNextPendingTask 抢占下一条可执行任务: + // - 优先 pending + // - 若 running 超过 staleRunningAfterSeconds(可能由于进程退出/崩溃/超时),允许重新抢占继续执行 + ClaimNextPendingTask(ctx context.Context, staleRunningAfterSeconds int64) (*UsageCleanupTask, error) + // GetTaskStatus 查询任务状态;若不存在返回 sql.ErrNoRows + GetTaskStatus(ctx context.Context, taskID int64) (string, error) + // UpdateTaskProgress 更新任务进度(deleted_rows)用于断点续跑/展示 + UpdateTaskProgress(ctx context.Context, taskID int64, deletedRows int64) error + // CancelTask 将任务标记为 canceled(仅允许 pending/running) + CancelTask(ctx context.Context, taskID int64, canceledBy int64) (bool, error) + MarkTaskSucceeded(ctx context.Context, taskID int64, deletedRows int64) error + MarkTaskFailed(ctx context.Context, taskID int64, deletedRows int64, errorMsg string) error + DeleteUsageLogsBatch(ctx context.Context, filters UsageCleanupFilters, limit int) (int64, error) +} diff --git a/backend/internal/service/usage_cleanup_service.go b/backend/internal/service/usage_cleanup_service.go new file mode 100644 index 00000000..8ca02cfc --- /dev/null +++ b/backend/internal/service/usage_cleanup_service.go @@ -0,0 +1,400 @@ +package service + +import ( + "context" + "database/sql" + "errors" + "fmt" + "log" + "net/http" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" +) + +const ( + usageCleanupWorkerName = "usage_cleanup_worker" +) + +// UsageCleanupService 负责创建与执行使用记录清理任务 +type UsageCleanupService struct { + repo UsageCleanupRepository + timingWheel *TimingWheelService + dashboard *DashboardAggregationService + cfg *config.Config + + running int32 + startOnce sync.Once + stopOnce sync.Once + + workerCtx context.Context + workerCancel context.CancelFunc +} + +func NewUsageCleanupService(repo UsageCleanupRepository, timingWheel *TimingWheelService, dashboard *DashboardAggregationService, cfg *config.Config) *UsageCleanupService { + workerCtx, workerCancel := context.WithCancel(context.Background()) + return &UsageCleanupService{ + repo: repo, + timingWheel: timingWheel, + dashboard: dashboard, + cfg: cfg, + workerCtx: workerCtx, + workerCancel: workerCancel, + } +} + +func describeUsageCleanupFilters(filters UsageCleanupFilters) string { + var parts []string + parts = append(parts, "start="+filters.StartTime.UTC().Format(time.RFC3339)) + parts = append(parts, "end="+filters.EndTime.UTC().Format(time.RFC3339)) + if filters.UserID != nil { + parts = append(parts, fmt.Sprintf("user_id=%d", *filters.UserID)) + } + if filters.APIKeyID != nil { + parts = append(parts, fmt.Sprintf("api_key_id=%d", *filters.APIKeyID)) + } + if filters.AccountID != nil { + parts = append(parts, fmt.Sprintf("account_id=%d", *filters.AccountID)) + } + if filters.GroupID != nil { + parts = append(parts, fmt.Sprintf("group_id=%d", *filters.GroupID)) + } + if filters.Model != nil { + parts = append(parts, "model="+strings.TrimSpace(*filters.Model)) + } + if filters.Stream != nil { + parts = append(parts, fmt.Sprintf("stream=%t", *filters.Stream)) + } + if filters.BillingType != nil { + parts = append(parts, fmt.Sprintf("billing_type=%d", *filters.BillingType)) + } + return strings.Join(parts, " ") +} + +func (s *UsageCleanupService) Start() { + if s == nil { + return + } + if s.cfg != nil && !s.cfg.UsageCleanup.Enabled { + log.Printf("[UsageCleanup] not started (disabled)") + return + } + if s.repo == nil || s.timingWheel == nil { + log.Printf("[UsageCleanup] not started (missing deps)") + return + } + + interval := s.workerInterval() + s.startOnce.Do(func() { + s.timingWheel.ScheduleRecurring(usageCleanupWorkerName, interval, s.runOnce) + log.Printf("[UsageCleanup] started (interval=%s max_range_days=%d batch_size=%d task_timeout=%s)", interval, s.maxRangeDays(), s.batchSize(), s.taskTimeout()) + }) +} + +func (s *UsageCleanupService) Stop() { + if s == nil { + return + } + s.stopOnce.Do(func() { + if s.workerCancel != nil { + s.workerCancel() + } + if s.timingWheel != nil { + s.timingWheel.Cancel(usageCleanupWorkerName) + } + log.Printf("[UsageCleanup] stopped") + }) +} + +func (s *UsageCleanupService) ListTasks(ctx context.Context, params pagination.PaginationParams) ([]UsageCleanupTask, *pagination.PaginationResult, error) { + if s == nil || s.repo == nil { + return nil, nil, fmt.Errorf("cleanup service not ready") + } + return s.repo.ListTasks(ctx, params) +} + +func (s *UsageCleanupService) CreateTask(ctx context.Context, filters UsageCleanupFilters, createdBy int64) (*UsageCleanupTask, error) { + if s == nil || s.repo == nil { + return nil, fmt.Errorf("cleanup service not ready") + } + if s.cfg != nil && !s.cfg.UsageCleanup.Enabled { + return nil, infraerrors.New(http.StatusServiceUnavailable, "USAGE_CLEANUP_DISABLED", "usage cleanup is disabled") + } + if createdBy <= 0 { + return nil, infraerrors.BadRequest("USAGE_CLEANUP_INVALID_CREATOR", "invalid creator") + } + + log.Printf("[UsageCleanup] create_task requested: operator=%d %s", createdBy, describeUsageCleanupFilters(filters)) + sanitizeUsageCleanupFilters(&filters) + if err := s.validateFilters(filters); err != nil { + log.Printf("[UsageCleanup] create_task rejected: operator=%d err=%v %s", createdBy, err, describeUsageCleanupFilters(filters)) + return nil, err + } + + task := &UsageCleanupTask{ + Status: UsageCleanupStatusPending, + Filters: filters, + CreatedBy: createdBy, + } + if err := s.repo.CreateTask(ctx, task); err != nil { + log.Printf("[UsageCleanup] create_task persist failed: operator=%d err=%v %s", createdBy, err, describeUsageCleanupFilters(filters)) + return nil, fmt.Errorf("create cleanup task: %w", err) + } + log.Printf("[UsageCleanup] create_task persisted: task=%d operator=%d status=%s deleted_rows=%d %s", task.ID, createdBy, task.Status, task.DeletedRows, describeUsageCleanupFilters(filters)) + go s.runOnce() + return task, nil +} + +func (s *UsageCleanupService) runOnce() { + if !atomic.CompareAndSwapInt32(&s.running, 0, 1) { + log.Printf("[UsageCleanup] run_once skipped: already_running=true") + return + } + defer atomic.StoreInt32(&s.running, 0) + + parent := context.Background() + if s != nil && s.workerCtx != nil { + parent = s.workerCtx + } + ctx, cancel := context.WithTimeout(parent, s.taskTimeout()) + defer cancel() + + task, err := s.repo.ClaimNextPendingTask(ctx, int64(s.taskTimeout().Seconds())) + if err != nil { + log.Printf("[UsageCleanup] claim pending task failed: %v", err) + return + } + if task == nil { + log.Printf("[UsageCleanup] run_once done: no_task=true") + return + } + + log.Printf("[UsageCleanup] task claimed: task=%d status=%s created_by=%d deleted_rows=%d %s", task.ID, task.Status, task.CreatedBy, task.DeletedRows, describeUsageCleanupFilters(task.Filters)) + s.executeTask(ctx, task) +} + +func (s *UsageCleanupService) executeTask(ctx context.Context, task *UsageCleanupTask) { + if task == nil { + return + } + + batchSize := s.batchSize() + deletedTotal := task.DeletedRows + start := time.Now() + log.Printf("[UsageCleanup] task started: task=%d batch_size=%d deleted_rows=%d %s", task.ID, batchSize, deletedTotal, describeUsageCleanupFilters(task.Filters)) + var batchNum int + + for { + if ctx != nil && ctx.Err() != nil { + log.Printf("[UsageCleanup] task interrupted: task=%d err=%v", task.ID, ctx.Err()) + return + } + canceled, err := s.isTaskCanceled(ctx, task.ID) + if err != nil { + s.markTaskFailed(task.ID, deletedTotal, err) + return + } + if canceled { + log.Printf("[UsageCleanup] task canceled: task=%d deleted_rows=%d duration=%s", task.ID, deletedTotal, time.Since(start)) + return + } + + batchNum++ + deleted, err := s.repo.DeleteUsageLogsBatch(ctx, task.Filters, batchSize) + if err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + // 任务被中断(例如服务停止/超时),保持 running 状态,后续通过 stale reclaim 续跑。 + log.Printf("[UsageCleanup] task interrupted: task=%d err=%v", task.ID, err) + return + } + s.markTaskFailed(task.ID, deletedTotal, err) + return + } + deletedTotal += deleted + if deleted > 0 { + updateCtx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + if err := s.repo.UpdateTaskProgress(updateCtx, task.ID, deletedTotal); err != nil { + log.Printf("[UsageCleanup] task progress update failed: task=%d deleted_rows=%d err=%v", task.ID, deletedTotal, err) + } + cancel() + } + if batchNum <= 3 || batchNum%20 == 0 || deleted < int64(batchSize) { + log.Printf("[UsageCleanup] task batch done: task=%d batch=%d deleted=%d deleted_total=%d", task.ID, batchNum, deleted, deletedTotal) + } + if deleted == 0 || deleted < int64(batchSize) { + break + } + } + + updateCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + if err := s.repo.MarkTaskSucceeded(updateCtx, task.ID, deletedTotal); err != nil { + log.Printf("[UsageCleanup] update task succeeded failed: task=%d err=%v", task.ID, err) + } else { + log.Printf("[UsageCleanup] task succeeded: task=%d deleted_rows=%d duration=%s", task.ID, deletedTotal, time.Since(start)) + } + + if s.dashboard != nil { + if err := s.dashboard.TriggerRecomputeRange(task.Filters.StartTime, task.Filters.EndTime); err != nil { + log.Printf("[UsageCleanup] trigger dashboard recompute failed: task=%d err=%v", task.ID, err) + } else { + log.Printf("[UsageCleanup] trigger dashboard recompute: task=%d start=%s end=%s", task.ID, task.Filters.StartTime.UTC().Format(time.RFC3339), task.Filters.EndTime.UTC().Format(time.RFC3339)) + } + } +} + +func (s *UsageCleanupService) markTaskFailed(taskID int64, deletedRows int64, err error) { + msg := strings.TrimSpace(err.Error()) + if len(msg) > 500 { + msg = msg[:500] + } + log.Printf("[UsageCleanup] task failed: task=%d deleted_rows=%d err=%s", taskID, deletedRows, msg) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + if updateErr := s.repo.MarkTaskFailed(ctx, taskID, deletedRows, msg); updateErr != nil { + log.Printf("[UsageCleanup] update task failed failed: task=%d err=%v", taskID, updateErr) + } +} + +func (s *UsageCleanupService) isTaskCanceled(ctx context.Context, taskID int64) (bool, error) { + if s == nil || s.repo == nil { + return false, fmt.Errorf("cleanup service not ready") + } + checkCtx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + status, err := s.repo.GetTaskStatus(checkCtx, taskID) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return false, nil + } + return false, err + } + if status == UsageCleanupStatusCanceled { + log.Printf("[UsageCleanup] task cancel detected: task=%d", taskID) + } + return status == UsageCleanupStatusCanceled, nil +} + +func (s *UsageCleanupService) validateFilters(filters UsageCleanupFilters) error { + if filters.StartTime.IsZero() || filters.EndTime.IsZero() { + return infraerrors.BadRequest("USAGE_CLEANUP_MISSING_RANGE", "start_date and end_date are required") + } + if filters.EndTime.Before(filters.StartTime) { + return infraerrors.BadRequest("USAGE_CLEANUP_INVALID_RANGE", "end_date must be after start_date") + } + maxDays := s.maxRangeDays() + if maxDays > 0 { + delta := filters.EndTime.Sub(filters.StartTime) + if delta > time.Duration(maxDays)*24*time.Hour { + return infraerrors.BadRequest("USAGE_CLEANUP_RANGE_TOO_LARGE", fmt.Sprintf("date range exceeds %d days", maxDays)) + } + } + return nil +} + +func (s *UsageCleanupService) CancelTask(ctx context.Context, taskID int64, canceledBy int64) error { + if s == nil || s.repo == nil { + return fmt.Errorf("cleanup service not ready") + } + if s.cfg != nil && !s.cfg.UsageCleanup.Enabled { + return infraerrors.New(http.StatusServiceUnavailable, "USAGE_CLEANUP_DISABLED", "usage cleanup is disabled") + } + if canceledBy <= 0 { + return infraerrors.BadRequest("USAGE_CLEANUP_INVALID_CANCELLER", "invalid canceller") + } + status, err := s.repo.GetTaskStatus(ctx, taskID) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return infraerrors.New(http.StatusNotFound, "USAGE_CLEANUP_TASK_NOT_FOUND", "cleanup task not found") + } + return err + } + log.Printf("[UsageCleanup] cancel_task requested: task=%d operator=%d status=%s", taskID, canceledBy, status) + if status != UsageCleanupStatusPending && status != UsageCleanupStatusRunning { + return infraerrors.New(http.StatusConflict, "USAGE_CLEANUP_CANCEL_CONFLICT", "cleanup task cannot be canceled in current status") + } + ok, err := s.repo.CancelTask(ctx, taskID, canceledBy) + if err != nil { + return err + } + if !ok { + // 状态可能并发改变 + return infraerrors.New(http.StatusConflict, "USAGE_CLEANUP_CANCEL_CONFLICT", "cleanup task cannot be canceled in current status") + } + log.Printf("[UsageCleanup] cancel_task done: task=%d operator=%d", taskID, canceledBy) + return nil +} + +func sanitizeUsageCleanupFilters(filters *UsageCleanupFilters) { + if filters == nil { + return + } + if filters.UserID != nil && *filters.UserID <= 0 { + filters.UserID = nil + } + if filters.APIKeyID != nil && *filters.APIKeyID <= 0 { + filters.APIKeyID = nil + } + if filters.AccountID != nil && *filters.AccountID <= 0 { + filters.AccountID = nil + } + if filters.GroupID != nil && *filters.GroupID <= 0 { + filters.GroupID = nil + } + if filters.Model != nil { + model := strings.TrimSpace(*filters.Model) + if model == "" { + filters.Model = nil + } else { + filters.Model = &model + } + } + if filters.BillingType != nil && *filters.BillingType < 0 { + filters.BillingType = nil + } +} + +func (s *UsageCleanupService) maxRangeDays() int { + if s == nil || s.cfg == nil { + return 31 + } + if s.cfg.UsageCleanup.MaxRangeDays > 0 { + return s.cfg.UsageCleanup.MaxRangeDays + } + return 31 +} + +func (s *UsageCleanupService) batchSize() int { + if s == nil || s.cfg == nil { + return 5000 + } + if s.cfg.UsageCleanup.BatchSize > 0 { + return s.cfg.UsageCleanup.BatchSize + } + return 5000 +} + +func (s *UsageCleanupService) workerInterval() time.Duration { + if s == nil || s.cfg == nil { + return 10 * time.Second + } + if s.cfg.UsageCleanup.WorkerIntervalSeconds > 0 { + return time.Duration(s.cfg.UsageCleanup.WorkerIntervalSeconds) * time.Second + } + return 10 * time.Second +} + +func (s *UsageCleanupService) taskTimeout() time.Duration { + if s == nil || s.cfg == nil { + return 30 * time.Minute + } + if s.cfg.UsageCleanup.TaskTimeoutSeconds > 0 { + return time.Duration(s.cfg.UsageCleanup.TaskTimeoutSeconds) * time.Second + } + return 30 * time.Minute +} diff --git a/backend/internal/service/usage_cleanup_service_test.go b/backend/internal/service/usage_cleanup_service_test.go new file mode 100644 index 00000000..37d3eb19 --- /dev/null +++ b/backend/internal/service/usage_cleanup_service_test.go @@ -0,0 +1,420 @@ +package service + +import ( + "context" + "database/sql" + "errors" + "net/http" + "strings" + "sync" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/stretchr/testify/require" +) + +type cleanupDeleteResponse struct { + deleted int64 + err error +} + +type cleanupDeleteCall struct { + filters UsageCleanupFilters + limit int +} + +type cleanupMarkCall struct { + taskID int64 + deletedRows int64 + errMsg string +} + +type cleanupRepoStub struct { + mu sync.Mutex + created []*UsageCleanupTask + createErr error + listTasks []UsageCleanupTask + listResult *pagination.PaginationResult + listErr error + claimQueue []*UsageCleanupTask + claimErr error + deleteQueue []cleanupDeleteResponse + deleteCalls []cleanupDeleteCall + markSucceeded []cleanupMarkCall + markFailed []cleanupMarkCall + statusByID map[int64]string + progressCalls []cleanupMarkCall + cancelCalls []int64 +} + +func (s *cleanupRepoStub) CreateTask(ctx context.Context, task *UsageCleanupTask) error { + if task == nil { + return nil + } + s.mu.Lock() + defer s.mu.Unlock() + if s.createErr != nil { + return s.createErr + } + if task.ID == 0 { + task.ID = int64(len(s.created) + 1) + } + if task.CreatedAt.IsZero() { + task.CreatedAt = time.Now().UTC() + } + if task.UpdatedAt.IsZero() { + task.UpdatedAt = task.CreatedAt + } + clone := *task + s.created = append(s.created, &clone) + return nil +} + +func (s *cleanupRepoStub) ListTasks(ctx context.Context, params pagination.PaginationParams) ([]UsageCleanupTask, *pagination.PaginationResult, error) { + s.mu.Lock() + defer s.mu.Unlock() + return s.listTasks, s.listResult, s.listErr +} + +func (s *cleanupRepoStub) ClaimNextPendingTask(ctx context.Context, staleRunningAfterSeconds int64) (*UsageCleanupTask, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.claimErr != nil { + return nil, s.claimErr + } + if len(s.claimQueue) == 0 { + return nil, nil + } + task := s.claimQueue[0] + s.claimQueue = s.claimQueue[1:] + if s.statusByID == nil { + s.statusByID = map[int64]string{} + } + s.statusByID[task.ID] = UsageCleanupStatusRunning + return task, nil +} + +func (s *cleanupRepoStub) GetTaskStatus(ctx context.Context, taskID int64) (string, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.statusByID == nil { + return "", sql.ErrNoRows + } + status, ok := s.statusByID[taskID] + if !ok { + return "", sql.ErrNoRows + } + return status, nil +} + +func (s *cleanupRepoStub) UpdateTaskProgress(ctx context.Context, taskID int64, deletedRows int64) error { + s.mu.Lock() + defer s.mu.Unlock() + s.progressCalls = append(s.progressCalls, cleanupMarkCall{taskID: taskID, deletedRows: deletedRows}) + return nil +} + +func (s *cleanupRepoStub) CancelTask(ctx context.Context, taskID int64, canceledBy int64) (bool, error) { + s.mu.Lock() + defer s.mu.Unlock() + s.cancelCalls = append(s.cancelCalls, taskID) + if s.statusByID == nil { + s.statusByID = map[int64]string{} + } + status := s.statusByID[taskID] + if status != UsageCleanupStatusPending && status != UsageCleanupStatusRunning { + return false, nil + } + s.statusByID[taskID] = UsageCleanupStatusCanceled + return true, nil +} + +func (s *cleanupRepoStub) MarkTaskSucceeded(ctx context.Context, taskID int64, deletedRows int64) error { + s.mu.Lock() + defer s.mu.Unlock() + s.markSucceeded = append(s.markSucceeded, cleanupMarkCall{taskID: taskID, deletedRows: deletedRows}) + if s.statusByID == nil { + s.statusByID = map[int64]string{} + } + s.statusByID[taskID] = UsageCleanupStatusSucceeded + return nil +} + +func (s *cleanupRepoStub) MarkTaskFailed(ctx context.Context, taskID int64, deletedRows int64, errorMsg string) error { + s.mu.Lock() + defer s.mu.Unlock() + s.markFailed = append(s.markFailed, cleanupMarkCall{taskID: taskID, deletedRows: deletedRows, errMsg: errorMsg}) + if s.statusByID == nil { + s.statusByID = map[int64]string{} + } + s.statusByID[taskID] = UsageCleanupStatusFailed + return nil +} + +func (s *cleanupRepoStub) DeleteUsageLogsBatch(ctx context.Context, filters UsageCleanupFilters, limit int) (int64, error) { + s.mu.Lock() + defer s.mu.Unlock() + s.deleteCalls = append(s.deleteCalls, cleanupDeleteCall{filters: filters, limit: limit}) + if len(s.deleteQueue) == 0 { + return 0, nil + } + resp := s.deleteQueue[0] + s.deleteQueue = s.deleteQueue[1:] + return resp.deleted, resp.err +} + +func TestUsageCleanupServiceCreateTaskSanitizeFilters(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, MaxRangeDays: 31}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + + start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + end := start.Add(24 * time.Hour) + userID := int64(-1) + apiKeyID := int64(10) + model := " gpt-4 " + billingType := int8(-2) + filters := UsageCleanupFilters{ + StartTime: start, + EndTime: end, + UserID: &userID, + APIKeyID: &apiKeyID, + Model: &model, + BillingType: &billingType, + } + + task, err := svc.CreateTask(context.Background(), filters, 9) + require.NoError(t, err) + require.Equal(t, UsageCleanupStatusPending, task.Status) + require.Nil(t, task.Filters.UserID) + require.NotNil(t, task.Filters.APIKeyID) + require.Equal(t, apiKeyID, *task.Filters.APIKeyID) + require.NotNil(t, task.Filters.Model) + require.Equal(t, "gpt-4", *task.Filters.Model) + require.Nil(t, task.Filters.BillingType) + require.Equal(t, int64(9), task.CreatedBy) +} + +func TestUsageCleanupServiceCreateTaskInvalidCreator(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + + filters := UsageCleanupFilters{ + StartTime: time.Now(), + EndTime: time.Now().Add(24 * time.Hour), + } + _, err := svc.CreateTask(context.Background(), filters, 0) + require.Error(t, err) + require.Equal(t, "USAGE_CLEANUP_INVALID_CREATOR", infraerrors.Reason(err)) +} + +func TestUsageCleanupServiceCreateTaskDisabled(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: false}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + + filters := UsageCleanupFilters{ + StartTime: time.Now(), + EndTime: time.Now().Add(24 * time.Hour), + } + _, err := svc.CreateTask(context.Background(), filters, 1) + require.Error(t, err) + require.Equal(t, http.StatusServiceUnavailable, infraerrors.Code(err)) + require.Equal(t, "USAGE_CLEANUP_DISABLED", infraerrors.Reason(err)) +} + +func TestUsageCleanupServiceCreateTaskRangeTooLarge(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, MaxRangeDays: 1}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + + start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + end := start.Add(48 * time.Hour) + filters := UsageCleanupFilters{StartTime: start, EndTime: end} + + _, err := svc.CreateTask(context.Background(), filters, 1) + require.Error(t, err) + require.Equal(t, "USAGE_CLEANUP_RANGE_TOO_LARGE", infraerrors.Reason(err)) +} + +func TestUsageCleanupServiceCreateTaskMissingRange(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + + _, err := svc.CreateTask(context.Background(), UsageCleanupFilters{}, 1) + require.Error(t, err) + require.Equal(t, "USAGE_CLEANUP_MISSING_RANGE", infraerrors.Reason(err)) +} + +func TestUsageCleanupServiceCreateTaskRepoError(t *testing.T) { + repo := &cleanupRepoStub{createErr: errors.New("db down")} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + + filters := UsageCleanupFilters{ + StartTime: time.Now(), + EndTime: time.Now().Add(24 * time.Hour), + } + _, err := svc.CreateTask(context.Background(), filters, 1) + require.Error(t, err) + require.Contains(t, err.Error(), "create cleanup task") +} + +func TestUsageCleanupServiceRunOnceSuccess(t *testing.T) { + repo := &cleanupRepoStub{ + claimQueue: []*UsageCleanupTask{ + {ID: 5, Filters: UsageCleanupFilters{StartTime: time.Now(), EndTime: time.Now().Add(2 * time.Hour)}}, + }, + deleteQueue: []cleanupDeleteResponse{ + {deleted: 2}, + {deleted: 2}, + {deleted: 1}, + }, + } + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, BatchSize: 2, TaskTimeoutSeconds: 30}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + + svc.runOnce() + + repo.mu.Lock() + defer repo.mu.Unlock() + require.Len(t, repo.deleteCalls, 3) + require.Len(t, repo.markSucceeded, 1) + require.Empty(t, repo.markFailed) + require.Equal(t, int64(5), repo.markSucceeded[0].taskID) + require.Equal(t, int64(5), repo.markSucceeded[0].deletedRows) +} + +func TestUsageCleanupServiceRunOnceClaimError(t *testing.T) { + repo := &cleanupRepoStub{claimErr: errors.New("claim failed")} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + svc.runOnce() + + repo.mu.Lock() + defer repo.mu.Unlock() + require.Empty(t, repo.markSucceeded) + require.Empty(t, repo.markFailed) +} + +func TestUsageCleanupServiceRunOnceAlreadyRunning(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + svc.running = 1 + svc.runOnce() +} + +func TestUsageCleanupServiceExecuteTaskFailed(t *testing.T) { + longMsg := strings.Repeat("x", 600) + repo := &cleanupRepoStub{ + deleteQueue: []cleanupDeleteResponse{ + {err: errors.New(longMsg)}, + }, + } + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, BatchSize: 3}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + task := &UsageCleanupTask{ + ID: 11, + Filters: UsageCleanupFilters{ + StartTime: time.Now(), + EndTime: time.Now().Add(24 * time.Hour), + }, + } + + svc.executeTask(context.Background(), task) + + repo.mu.Lock() + defer repo.mu.Unlock() + require.Len(t, repo.markFailed, 1) + require.Equal(t, int64(11), repo.markFailed[0].taskID) + require.Equal(t, 500, len(repo.markFailed[0].errMsg)) +} + +func TestUsageCleanupServiceListTasks(t *testing.T) { + repo := &cleanupRepoStub{ + listTasks: []UsageCleanupTask{{ID: 1}, {ID: 2}}, + listResult: &pagination.PaginationResult{ + Total: 2, + Page: 1, + PageSize: 20, + Pages: 1, + }, + } + svc := NewUsageCleanupService(repo, nil, nil, &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}}) + + tasks, result, err := svc.ListTasks(context.Background(), pagination.PaginationParams{Page: 1, PageSize: 20}) + require.NoError(t, err) + require.Len(t, tasks, 2) + require.Equal(t, int64(2), result.Total) +} + +func TestUsageCleanupServiceListTasksNotReady(t *testing.T) { + var nilSvc *UsageCleanupService + _, _, err := nilSvc.ListTasks(context.Background(), pagination.PaginationParams{Page: 1, PageSize: 20}) + require.Error(t, err) + + svc := NewUsageCleanupService(nil, nil, nil, &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}}) + _, _, err = svc.ListTasks(context.Background(), pagination.PaginationParams{Page: 1, PageSize: 20}) + require.Error(t, err) +} + +func TestUsageCleanupServiceDefaultsAndLifecycle(t *testing.T) { + var nilSvc *UsageCleanupService + require.Equal(t, 31, nilSvc.maxRangeDays()) + require.Equal(t, 5000, nilSvc.batchSize()) + require.Equal(t, 10*time.Second, nilSvc.workerInterval()) + require.Equal(t, 30*time.Minute, nilSvc.taskTimeout()) + nilSvc.Start() + nilSvc.Stop() + + repo := &cleanupRepoStub{} + cfgDisabled := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: false}} + svcDisabled := NewUsageCleanupService(repo, nil, nil, cfgDisabled) + svcDisabled.Start() + svcDisabled.Stop() + + timingWheel, err := NewTimingWheelService() + require.NoError(t, err) + + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, WorkerIntervalSeconds: 5}} + svc := NewUsageCleanupService(repo, timingWheel, nil, cfg) + require.Equal(t, 5*time.Second, svc.workerInterval()) + svc.Start() + svc.Stop() + + cfgFallback := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + svcFallback := NewUsageCleanupService(repo, timingWheel, nil, cfgFallback) + require.Equal(t, 31, svcFallback.maxRangeDays()) + require.Equal(t, 5000, svcFallback.batchSize()) + require.Equal(t, 10*time.Second, svcFallback.workerInterval()) + + svcMissingDeps := NewUsageCleanupService(nil, nil, nil, cfgFallback) + svcMissingDeps.Start() +} + +func TestSanitizeUsageCleanupFiltersModelEmpty(t *testing.T) { + model := " " + apiKeyID := int64(-5) + accountID := int64(-1) + groupID := int64(-2) + filters := UsageCleanupFilters{ + UserID: &apiKeyID, + APIKeyID: &apiKeyID, + AccountID: &accountID, + GroupID: &groupID, + Model: &model, + } + + sanitizeUsageCleanupFilters(&filters) + require.Nil(t, filters.UserID) + require.Nil(t, filters.APIKeyID) + require.Nil(t, filters.AccountID) + require.Nil(t, filters.GroupID) + require.Nil(t, filters.Model) +} diff --git a/backend/internal/service/wire.go b/backend/internal/service/wire.go index acc0a5fb..0b9bc20c 100644 --- a/backend/internal/service/wire.go +++ b/backend/internal/service/wire.go @@ -57,6 +57,13 @@ func ProvideDashboardAggregationService(repo DashboardAggregationRepository, tim return svc } +// ProvideUsageCleanupService 创建并启动使用记录清理任务服务 +func ProvideUsageCleanupService(repo UsageCleanupRepository, timingWheel *TimingWheelService, dashboardAgg *DashboardAggregationService, cfg *config.Config) *UsageCleanupService { + svc := NewUsageCleanupService(repo, timingWheel, dashboardAgg, cfg) + svc.Start() + return svc +} + // ProvideAccountExpiryService creates and starts AccountExpiryService. func ProvideAccountExpiryService(accountRepo AccountRepository) *AccountExpiryService { svc := NewAccountExpiryService(accountRepo, time.Minute) @@ -248,6 +255,7 @@ var ProviderSet = wire.NewSet( ProvideAccountExpiryService, ProvideTimingWheelService, ProvideDashboardAggregationService, + ProvideUsageCleanupService, ProvideDeferredService, NewAntigravityQuotaFetcher, NewUserAttributeService, diff --git a/backend/migrations/042_add_usage_cleanup_tasks.sql b/backend/migrations/042_add_usage_cleanup_tasks.sql new file mode 100644 index 00000000..ce4be91f --- /dev/null +++ b/backend/migrations/042_add_usage_cleanup_tasks.sql @@ -0,0 +1,21 @@ +-- 042_add_usage_cleanup_tasks.sql +-- 使用记录清理任务表 + +CREATE TABLE IF NOT EXISTS usage_cleanup_tasks ( + id BIGSERIAL PRIMARY KEY, + status VARCHAR(20) NOT NULL, + filters JSONB NOT NULL, + created_by BIGINT NOT NULL REFERENCES users(id) ON DELETE RESTRICT, + deleted_rows BIGINT NOT NULL DEFAULT 0, + error_message TEXT, + started_at TIMESTAMPTZ, + finished_at TIMESTAMPTZ, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX IF NOT EXISTS idx_usage_cleanup_tasks_status_created_at + ON usage_cleanup_tasks(status, created_at DESC); + +CREATE INDEX IF NOT EXISTS idx_usage_cleanup_tasks_created_at + ON usage_cleanup_tasks(created_at DESC); diff --git a/backend/migrations/043_add_usage_cleanup_cancel_audit.sql b/backend/migrations/043_add_usage_cleanup_cancel_audit.sql new file mode 100644 index 00000000..42ca6696 --- /dev/null +++ b/backend/migrations/043_add_usage_cleanup_cancel_audit.sql @@ -0,0 +1,10 @@ +-- 043_add_usage_cleanup_cancel_audit.sql +-- usage_cleanup_tasks 取消任务审计字段 + +ALTER TABLE usage_cleanup_tasks + ADD COLUMN IF NOT EXISTS canceled_by BIGINT REFERENCES users(id) ON DELETE SET NULL, + ADD COLUMN IF NOT EXISTS canceled_at TIMESTAMPTZ; + +CREATE INDEX IF NOT EXISTS idx_usage_cleanup_tasks_canceled_at + ON usage_cleanup_tasks(canceled_at DESC); + diff --git a/config.yaml b/config.yaml index 424ce9eb..5e7513fb 100644 --- a/config.yaml +++ b/config.yaml @@ -251,6 +251,27 @@ dashboard_aggregation: # 日聚合保留天数 daily_days: 730 +# ============================================================================= +# Usage Cleanup Task Configuration +# 使用记录清理任务配置(重启生效) +# ============================================================================= +usage_cleanup: + # Enable cleanup task worker + # 启用清理任务执行器 + enabled: true + # Max date range (days) per task + # 单次任务最大时间跨度(天) + max_range_days: 31 + # Batch delete size + # 单批删除数量 + batch_size: 5000 + # Worker interval (seconds) + # 执行器轮询间隔(秒) + worker_interval_seconds: 10 + # Task execution timeout (seconds) + # 单次任务最大执行时长(秒) + task_timeout_seconds: 1800 + # ============================================================================= # Concurrency Wait Configuration # 并发等待配置 diff --git a/deploy/config.example.yaml b/deploy/config.example.yaml index 9e85d1ff..1f4aa266 100644 --- a/deploy/config.example.yaml +++ b/deploy/config.example.yaml @@ -292,6 +292,27 @@ dashboard_aggregation: # 日聚合保留天数 daily_days: 730 +# ============================================================================= +# Usage Cleanup Task Configuration +# 使用记录清理任务配置(重启生效) +# ============================================================================= +usage_cleanup: + # Enable cleanup task worker + # 启用清理任务执行器 + enabled: true + # Max date range (days) per task + # 单次任务最大时间跨度(天) + max_range_days: 31 + # Batch delete size + # 单批删除数量 + batch_size: 5000 + # Worker interval (seconds) + # 执行器轮询间隔(秒) + worker_interval_seconds: 10 + # Task execution timeout (seconds) + # 单次任务最大执行时长(秒) + task_timeout_seconds: 1800 + # ============================================================================= # Concurrency Wait Configuration # 并发等待配置 diff --git a/frontend/src/api/admin/dashboard.ts b/frontend/src/api/admin/dashboard.ts index 9b338788..ae48bec2 100644 --- a/frontend/src/api/admin/dashboard.ts +++ b/frontend/src/api/admin/dashboard.ts @@ -50,6 +50,7 @@ export interface TrendParams { account_id?: number group_id?: number stream?: boolean + billing_type?: number | null } export interface TrendResponse { @@ -78,6 +79,7 @@ export interface ModelStatsParams { account_id?: number group_id?: number stream?: boolean + billing_type?: number | null } export interface ModelStatsResponse { diff --git a/frontend/src/api/admin/usage.ts b/frontend/src/api/admin/usage.ts index dd85fc24..c271a2d0 100644 --- a/frontend/src/api/admin/usage.ts +++ b/frontend/src/api/admin/usage.ts @@ -31,6 +31,46 @@ export interface SimpleApiKey { user_id: number } +export interface UsageCleanupFilters { + start_time: string + end_time: string + user_id?: number + api_key_id?: number + account_id?: number + group_id?: number + model?: string | null + stream?: boolean | null + billing_type?: number | null +} + +export interface UsageCleanupTask { + id: number + status: string + filters: UsageCleanupFilters + created_by: number + deleted_rows: number + error_message?: string | null + canceled_by?: number | null + canceled_at?: string | null + started_at?: string | null + finished_at?: string | null + created_at: string + updated_at: string +} + +export interface CreateUsageCleanupTaskRequest { + start_date: string + end_date: string + user_id?: number + api_key_id?: number + account_id?: number + group_id?: number + model?: string | null + stream?: boolean | null + billing_type?: number | null + timezone?: string +} + export interface AdminUsageQueryParams extends UsageQueryParams { user_id?: number } @@ -108,11 +148,51 @@ export async function searchApiKeys(userId?: number, keyword?: string): Promise< return data } +/** + * List usage cleanup tasks (admin only) + * @param params - Query parameters for pagination + * @returns Paginated list of cleanup tasks + */ +export async function listCleanupTasks( + params: { page?: number; page_size?: number }, + options?: { signal?: AbortSignal } +): Promise> { + const { data } = await apiClient.get>('/admin/usage/cleanup-tasks', { + params, + signal: options?.signal + }) + return data +} + +/** + * Create a usage cleanup task (admin only) + * @param payload - Cleanup task parameters + * @returns Created cleanup task + */ +export async function createCleanupTask(payload: CreateUsageCleanupTaskRequest): Promise { + const { data } = await apiClient.post('/admin/usage/cleanup-tasks', payload) + return data +} + +/** + * Cancel a usage cleanup task (admin only) + * @param taskId - Task ID to cancel + */ +export async function cancelCleanupTask(taskId: number): Promise<{ id: number; status: string }> { + const { data } = await apiClient.post<{ id: number; status: string }>( + `/admin/usage/cleanup-tasks/${taskId}/cancel` + ) + return data +} + export const adminUsageAPI = { list, getStats, searchUsers, - searchApiKeys + searchApiKeys, + listCleanupTasks, + createCleanupTask, + cancelCleanupTask } export default adminUsageAPI diff --git a/frontend/src/components/admin/usage/UsageCleanupDialog.vue b/frontend/src/components/admin/usage/UsageCleanupDialog.vue new file mode 100644 index 00000000..4cd562e8 --- /dev/null +++ b/frontend/src/components/admin/usage/UsageCleanupDialog.vue @@ -0,0 +1,339 @@ + + + diff --git a/frontend/src/components/admin/usage/UsageFilters.vue b/frontend/src/components/admin/usage/UsageFilters.vue index 0926d83c..b17e0fdc 100644 --- a/frontend/src/components/admin/usage/UsageFilters.vue +++ b/frontend/src/components/admin/usage/UsageFilters.vue @@ -127,6 +127,12 @@ +
+
@@ -147,10 +153,13 @@
-
+
+ @@ -174,16 +183,20 @@ interface Props { exporting: boolean startDate: string endDate: string + showActions?: boolean } -const props = defineProps() +const props = withDefaults(defineProps(), { + showActions: true +}) const emit = defineEmits([ 'update:modelValue', 'update:startDate', 'update:endDate', 'change', 'reset', - 'export' + 'export', + 'cleanup' ]) const { t } = useI18n() @@ -221,6 +234,12 @@ const streamTypeOptions = ref([ { value: false, label: t('usage.sync') } ]) +const billingTypeOptions = ref([ + { value: null, label: t('admin.usage.allBillingTypes') }, + { value: 0, label: t('admin.usage.billingTypeBalance') }, + { value: 1, label: t('admin.usage.billingTypeSubscription') } +]) + const emitChange = () => emit('change') const updateStartDate = (value: string) => { diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index e4fe1bd1..2a000d0b 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -1893,7 +1893,43 @@ export default { cacheCreationTokens: 'Cache Creation Tokens', cacheReadTokens: 'Cache Read Tokens', failedToLoad: 'Failed to load usage records', - ipAddress: 'IP' + billingType: 'Billing Type', + allBillingTypes: 'All Billing Types', + billingTypeBalance: 'Balance', + billingTypeSubscription: 'Subscription', + ipAddress: 'IP', + cleanup: { + button: 'Cleanup', + title: 'Cleanup Usage Records', + warning: 'Cleanup is irreversible and will affect historical stats.', + submit: 'Submit Cleanup', + submitting: 'Submitting...', + confirmTitle: 'Confirm Cleanup', + confirmMessage: 'Are you sure you want to submit this cleanup task? This action cannot be undone.', + confirmSubmit: 'Confirm Cleanup', + cancel: 'Cancel', + cancelConfirmTitle: 'Confirm Cancel', + cancelConfirmMessage: 'Are you sure you want to cancel this cleanup task?', + cancelConfirm: 'Confirm Cancel', + cancelSuccess: 'Cleanup task canceled', + cancelFailed: 'Failed to cancel cleanup task', + recentTasks: 'Recent Cleanup Tasks', + loadingTasks: 'Loading tasks...', + noTasks: 'No cleanup tasks yet', + range: 'Range', + deletedRows: 'Deleted', + missingRange: 'Please select a date range', + submitSuccess: 'Cleanup task created', + submitFailed: 'Failed to create cleanup task', + loadFailed: 'Failed to load cleanup tasks', + status: { + pending: 'Pending', + running: 'Running', + succeeded: 'Succeeded', + failed: 'Failed', + canceled: 'Canceled' + } + } }, // Ops Monitoring diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index 35242c69..0c27f7a3 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -2041,7 +2041,43 @@ export default { cacheCreationTokens: '缓存创建 Token', cacheReadTokens: '缓存读取 Token', failedToLoad: '加载使用记录失败', - ipAddress: 'IP' + billingType: '计费类型', + allBillingTypes: '全部计费类型', + billingTypeBalance: '钱包余额', + billingTypeSubscription: '订阅套餐', + ipAddress: 'IP', + cleanup: { + button: '清理', + title: '清理使用记录', + warning: '清理不可恢复,且会影响历史统计回看。', + submit: '提交清理', + submitting: '提交中...', + confirmTitle: '确认清理', + confirmMessage: '确定要提交清理任务吗?清理不可恢复。', + confirmSubmit: '确认清理', + cancel: '取消任务', + cancelConfirmTitle: '确认取消', + cancelConfirmMessage: '确定要取消该清理任务吗?', + cancelConfirm: '确认取消', + cancelSuccess: '清理任务已取消', + cancelFailed: '取消清理任务失败', + recentTasks: '最近清理任务', + loadingTasks: '正在加载任务...', + noTasks: '暂无清理任务', + range: '时间范围', + deletedRows: '删除数量', + missingRange: '请选择时间范围', + submitSuccess: '清理任务已创建', + submitFailed: '创建清理任务失败', + loadFailed: '加载清理任务失败', + status: { + pending: '待执行', + running: '执行中', + succeeded: '已完成', + failed: '失败', + canceled: '已取消' + } + } }, // Ops Monitoring diff --git a/frontend/src/types/index.ts b/frontend/src/types/index.ts index 523033c2..1bb6e5d6 100644 --- a/frontend/src/types/index.ts +++ b/frontend/src/types/index.ts @@ -618,6 +618,7 @@ export interface UsageLog { actual_cost: number rate_multiplier: number account_rate_multiplier?: number | null + billing_type: number stream: boolean duration_ms: number @@ -642,6 +643,33 @@ export interface UsageLog { subscription?: UserSubscription } +export interface UsageCleanupFilters { + start_time: string + end_time: string + user_id?: number + api_key_id?: number + account_id?: number + group_id?: number + model?: string | null + stream?: boolean | null + billing_type?: number | null +} + +export interface UsageCleanupTask { + id: number + status: string + filters: UsageCleanupFilters + created_by: number + deleted_rows: number + error_message?: string | null + canceled_by?: number | null + canceled_at?: string | null + started_at?: string | null + finished_at?: string | null + created_at: string + updated_at: string +} + export interface RedeemCode { id: number code: string @@ -865,6 +893,7 @@ export interface UsageQueryParams { group_id?: number model?: string stream?: boolean + billing_type?: number | null start_date?: string end_date?: string } diff --git a/frontend/src/views/admin/UsageView.vue b/frontend/src/views/admin/UsageView.vue index 6f62f59e..40b63ec3 100644 --- a/frontend/src/views/admin/UsageView.vue +++ b/frontend/src/views/admin/UsageView.vue @@ -17,12 +17,19 @@
- + + diff --git a/frontend/src/views/admin/SubscriptionsView.vue b/frontend/src/views/admin/SubscriptionsView.vue index 7b38b455..d5a47788 100644 --- a/frontend/src/views/admin/SubscriptionsView.vue +++ b/frontend/src/views/admin/SubscriptionsView.vue @@ -85,6 +85,57 @@
+ +
+ + +
+
+ +
+
+ {{ t('admin.subscriptions.columns.user') }} +
+ + +
+ + +
+
+
- {{ - row.user?.email || t('admin.redeem.userPrefix', { id: row.user_id }) - }} + + {{ userColumnMode === 'email' + ? (row.user?.email || t('admin.redeem.userPrefix', { id: row.user_id })) + : (row.user?.username || '-') + }} + @@ -545,8 +602,43 @@ import Icon from '@/components/icons/Icon.vue' const { t } = useI18n() const appStore = useAppStore() -const columns = computed(() => [ - { key: 'user', label: t('admin.subscriptions.columns.user'), sortable: true }, +// User column display mode: 'email' or 'username' +const userColumnMode = ref<'email' | 'username'>('email') +const USER_COLUMN_MODE_KEY = 'subscription-user-column-mode' + +const loadUserColumnMode = () => { + try { + const saved = localStorage.getItem(USER_COLUMN_MODE_KEY) + if (saved === 'email' || saved === 'username') { + userColumnMode.value = saved + } + } catch (e) { + console.error('Failed to load user column mode:', e) + } +} + +const saveUserColumnMode = () => { + try { + localStorage.setItem(USER_COLUMN_MODE_KEY, userColumnMode.value) + } catch (e) { + console.error('Failed to save user column mode:', e) + } +} + +const setUserColumnMode = (mode: 'email' | 'username') => { + userColumnMode.value = mode + saveUserColumnMode() +} + +// All available columns +const allColumns = computed(() => [ + { + key: 'user', + label: userColumnMode.value === 'email' + ? t('admin.subscriptions.columns.user') + : t('admin.users.columns.username'), + sortable: true + }, { key: 'group', label: t('admin.subscriptions.columns.group'), sortable: true }, { key: 'usage', label: t('admin.subscriptions.columns.usage'), sortable: false }, { key: 'expires_at', label: t('admin.subscriptions.columns.expires'), sortable: true }, @@ -554,6 +646,69 @@ const columns = computed(() => [ { key: 'actions', label: t('admin.subscriptions.columns.actions'), sortable: false } ]) +// Columns that can be toggled (exclude user and actions which are always visible) +const toggleableColumns = computed(() => + allColumns.value.filter(col => col.key !== 'user' && col.key !== 'actions') +) + +// Hidden columns set +const hiddenColumns = reactive>(new Set()) + +// Default hidden columns +const DEFAULT_HIDDEN_COLUMNS: string[] = [] + +// localStorage key +const HIDDEN_COLUMNS_KEY = 'subscription-hidden-columns' + +// Load saved column settings +const loadSavedColumns = () => { + try { + const saved = localStorage.getItem(HIDDEN_COLUMNS_KEY) + if (saved) { + const parsed = JSON.parse(saved) as string[] + parsed.forEach(key => hiddenColumns.add(key)) + } else { + DEFAULT_HIDDEN_COLUMNS.forEach(key => hiddenColumns.add(key)) + } + } catch (e) { + console.error('Failed to load saved columns:', e) + DEFAULT_HIDDEN_COLUMNS.forEach(key => hiddenColumns.add(key)) + } +} + +// Save column settings to localStorage +const saveColumnsToStorage = () => { + try { + localStorage.setItem(HIDDEN_COLUMNS_KEY, JSON.stringify([...hiddenColumns])) + } catch (e) { + console.error('Failed to save columns:', e) + } +} + +// Toggle column visibility +const toggleColumn = (key: string) => { + if (hiddenColumns.has(key)) { + hiddenColumns.delete(key) + } else { + hiddenColumns.add(key) + } + saveColumnsToStorage() +} + +// Check if column is visible +const isColumnVisible = (key: string) => !hiddenColumns.has(key) + +// Filtered columns for display +const columns = computed(() => + allColumns.value.filter(col => + col.key === 'user' || col.key === 'actions' || !hiddenColumns.has(col.key) + ) +) + +// Column dropdown state +const showColumnDropdown = ref(false) +const columnDropdownRef = ref(null) + // Filter options const statusOptions = computed(() => [ { value: '', label: t('admin.subscriptions.allStatus') }, @@ -949,14 +1104,19 @@ const formatResetTime = (windowStart: string, period: 'daily' | 'weekly' | 'mont } } -// Handle click outside to close user dropdown +// Handle click outside to close dropdowns const handleClickOutside = (event: MouseEvent) => { const target = event.target as HTMLElement if (!target.closest('[data-assign-user-search]')) showUserDropdown.value = false if (!target.closest('[data-filter-user-search]')) showFilterUserDropdown.value = false + if (columnDropdownRef.value && !columnDropdownRef.value.contains(target)) { + showColumnDropdown.value = false + } } onMounted(() => { + loadUserColumnMode() + loadSavedColumns() loadSubscriptions() loadGroups() document.addEventListener('click', handleClickOutside) From bf7b79f2f037a20b930fe5d5e9760f190ef0ce6b Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Sun, 18 Jan 2026 11:58:53 +0800 Subject: [PATCH 050/147] =?UTF-8?q?fix(=E6=95=B0=E6=8D=AE=E5=BA=93):=20?= =?UTF-8?q?=E4=BC=98=E5=8C=96=E4=BB=BB=E5=8A=A1=E7=8A=B6=E6=80=81=E6=9B=B4?= =?UTF-8?q?=E6=96=B0=E6=9F=A5=E8=AF=A2=EF=BC=8C=E4=BD=BF=E7=94=A8=E5=88=AB?= =?UTF-8?q?=E5=90=8D=E6=8F=90=E9=AB=98=E5=8F=AF=E8=AF=BB=E6=80=A7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/repository/usage_cleanup_repo.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/backend/internal/repository/usage_cleanup_repo.go b/backend/internal/repository/usage_cleanup_repo.go index b703cc9f..b6dfa42a 100644 --- a/backend/internal/repository/usage_cleanup_repo.go +++ b/backend/internal/repository/usage_cleanup_repo.go @@ -136,16 +136,16 @@ func (r *usageCleanupRepository) ClaimNextPendingTask(ctx context.Context, stale LIMIT 1 FOR UPDATE SKIP LOCKED ) - UPDATE usage_cleanup_tasks + UPDATE usage_cleanup_tasks AS tasks SET status = $4, started_at = NOW(), finished_at = NULL, error_message = NULL, updated_at = NOW() FROM next - WHERE usage_cleanup_tasks.id = next.id - RETURNING id, status, filters, created_by, deleted_rows, error_message, - started_at, finished_at, created_at, updated_at + WHERE tasks.id = next.id + RETURNING tasks.id, tasks.status, tasks.filters, tasks.created_by, tasks.deleted_rows, tasks.error_message, + tasks.started_at, tasks.finished_at, tasks.created_at, tasks.updated_at ` var task service.UsageCleanupTask var filtersJSON []byte From bd18f4b8ef2d1bbb713c362a5efbe20d4bc4fbc8 Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Sun, 18 Jan 2026 14:18:28 +0800 Subject: [PATCH 051/147] =?UTF-8?q?feat(=E6=B8=85=E7=90=86=E4=BB=BB?= =?UTF-8?q?=E5=8A=A1):=20=E5=BC=95=E5=85=A5Ent=E5=AD=98=E5=82=A8=E5=B9=B6?= =?UTF-8?q?=E8=A1=A5=E5=85=85=E6=97=A5=E5=BF=97=E4=B8=8E=E6=B5=8B=E8=AF=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 新增 usage_cleanup_task Ent schema 与仓储实现,支持清理任务排序分页 补充清理任务全链路日志、仪表盘重算触发及 UI 过滤调整 完善 repository/service 单测并引入 sqlite 测试依赖 --- backend/cmd/server/wire_gen.go | 2 +- backend/ent/client.go | 159 ++- backend/ent/ent.go | 2 + backend/ent/hook/hook.go | 12 + backend/ent/intercept/intercept.go | 30 + backend/ent/migrate/schema.go | 42 + backend/ent/mutation.go | 1086 +++++++++++++++ backend/ent/predicate/predicate.go | 3 + backend/ent/runtime/runtime.go | 38 + backend/ent/schema/mixins/soft_delete.go | 5 +- backend/ent/schema/usage_cleanup_task.go | 75 ++ backend/ent/tx.go | 3 + backend/ent/usagecleanuptask.go | 236 ++++ .../ent/usagecleanuptask/usagecleanuptask.go | 137 ++ backend/ent/usagecleanuptask/where.go | 620 +++++++++ backend/ent/usagecleanuptask_create.go | 1190 +++++++++++++++++ backend/ent/usagecleanuptask_delete.go | 88 ++ backend/ent/usagecleanuptask_query.go | 564 ++++++++ backend/ent/usagecleanuptask_update.go | 702 ++++++++++ backend/go.mod | 8 +- backend/go.sum | 15 + .../admin/usage_cleanup_handler_test.go | 2 +- .../internal/repository/usage_cleanup_repo.go | 234 +++- .../repository/usage_cleanup_repo_ent_test.go | 251 ++++ .../repository/usage_cleanup_repo_test.go | 44 +- .../service/dashboard_aggregation_service.go | 2 +- .../internal/service/usage_cleanup_service.go | 18 +- .../service/usage_cleanup_service_test.go | 397 +++++- .../admin/usage/UsageCleanupDialog.vue | 2 +- 29 files changed, 5920 insertions(+), 47 deletions(-) create mode 100644 backend/ent/schema/usage_cleanup_task.go create mode 100644 backend/ent/usagecleanuptask.go create mode 100644 backend/ent/usagecleanuptask/usagecleanuptask.go create mode 100644 backend/ent/usagecleanuptask/where.go create mode 100644 backend/ent/usagecleanuptask_create.go create mode 100644 backend/ent/usagecleanuptask_delete.go create mode 100644 backend/ent/usagecleanuptask_query.go create mode 100644 backend/ent/usagecleanuptask_update.go create mode 100644 backend/internal/repository/usage_cleanup_repo_ent_test.go diff --git a/backend/cmd/server/wire_gen.go b/backend/cmd/server/wire_gen.go index 509cf13a..e5bfa515 100644 --- a/backend/cmd/server/wire_gen.go +++ b/backend/cmd/server/wire_gen.go @@ -153,7 +153,7 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { updateService := service.ProvideUpdateService(updateCache, gitHubReleaseClient, serviceBuildInfo) systemHandler := handler.ProvideSystemHandler(updateService) adminSubscriptionHandler := admin.NewSubscriptionHandler(subscriptionService) - usageCleanupRepository := repository.NewUsageCleanupRepository(db) + usageCleanupRepository := repository.NewUsageCleanupRepository(client, db) usageCleanupService := service.ProvideUsageCleanupService(usageCleanupRepository, timingWheelService, dashboardAggregationService, configConfig) adminUsageHandler := admin.NewUsageHandler(usageService, apiKeyService, adminService, usageCleanupService) userAttributeDefinitionRepository := repository.NewUserAttributeDefinitionRepository(client) diff --git a/backend/ent/client.go b/backend/ent/client.go index 35cf644f..f6c13e84 100644 --- a/backend/ent/client.go +++ b/backend/ent/client.go @@ -24,6 +24,7 @@ import ( "github.com/Wei-Shaw/sub2api/ent/proxy" "github.com/Wei-Shaw/sub2api/ent/redeemcode" "github.com/Wei-Shaw/sub2api/ent/setting" + "github.com/Wei-Shaw/sub2api/ent/usagecleanuptask" "github.com/Wei-Shaw/sub2api/ent/usagelog" "github.com/Wei-Shaw/sub2api/ent/user" "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" @@ -57,6 +58,8 @@ type Client struct { RedeemCode *RedeemCodeClient // Setting is the client for interacting with the Setting builders. Setting *SettingClient + // UsageCleanupTask is the client for interacting with the UsageCleanupTask builders. + UsageCleanupTask *UsageCleanupTaskClient // UsageLog is the client for interacting with the UsageLog builders. UsageLog *UsageLogClient // User is the client for interacting with the User builders. @@ -89,6 +92,7 @@ func (c *Client) init() { c.Proxy = NewProxyClient(c.config) c.RedeemCode = NewRedeemCodeClient(c.config) c.Setting = NewSettingClient(c.config) + c.UsageCleanupTask = NewUsageCleanupTaskClient(c.config) c.UsageLog = NewUsageLogClient(c.config) c.User = NewUserClient(c.config) c.UserAllowedGroup = NewUserAllowedGroupClient(c.config) @@ -196,6 +200,7 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) { Proxy: NewProxyClient(cfg), RedeemCode: NewRedeemCodeClient(cfg), Setting: NewSettingClient(cfg), + UsageCleanupTask: NewUsageCleanupTaskClient(cfg), UsageLog: NewUsageLogClient(cfg), User: NewUserClient(cfg), UserAllowedGroup: NewUserAllowedGroupClient(cfg), @@ -230,6 +235,7 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) Proxy: NewProxyClient(cfg), RedeemCode: NewRedeemCodeClient(cfg), Setting: NewSettingClient(cfg), + UsageCleanupTask: NewUsageCleanupTaskClient(cfg), UsageLog: NewUsageLogClient(cfg), User: NewUserClient(cfg), UserAllowedGroup: NewUserAllowedGroupClient(cfg), @@ -266,8 +272,9 @@ func (c *Client) Close() error { func (c *Client) Use(hooks ...Hook) { for _, n := range []interface{ Use(...Hook) }{ c.APIKey, c.Account, c.AccountGroup, c.Group, c.PromoCode, c.PromoCodeUsage, - c.Proxy, c.RedeemCode, c.Setting, c.UsageLog, c.User, c.UserAllowedGroup, - c.UserAttributeDefinition, c.UserAttributeValue, c.UserSubscription, + c.Proxy, c.RedeemCode, c.Setting, c.UsageCleanupTask, c.UsageLog, c.User, + c.UserAllowedGroup, c.UserAttributeDefinition, c.UserAttributeValue, + c.UserSubscription, } { n.Use(hooks...) } @@ -278,8 +285,9 @@ func (c *Client) Use(hooks ...Hook) { func (c *Client) Intercept(interceptors ...Interceptor) { for _, n := range []interface{ Intercept(...Interceptor) }{ c.APIKey, c.Account, c.AccountGroup, c.Group, c.PromoCode, c.PromoCodeUsage, - c.Proxy, c.RedeemCode, c.Setting, c.UsageLog, c.User, c.UserAllowedGroup, - c.UserAttributeDefinition, c.UserAttributeValue, c.UserSubscription, + c.Proxy, c.RedeemCode, c.Setting, c.UsageCleanupTask, c.UsageLog, c.User, + c.UserAllowedGroup, c.UserAttributeDefinition, c.UserAttributeValue, + c.UserSubscription, } { n.Intercept(interceptors...) } @@ -306,6 +314,8 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { return c.RedeemCode.mutate(ctx, m) case *SettingMutation: return c.Setting.mutate(ctx, m) + case *UsageCleanupTaskMutation: + return c.UsageCleanupTask.mutate(ctx, m) case *UsageLogMutation: return c.UsageLog.mutate(ctx, m) case *UserMutation: @@ -1847,6 +1857,139 @@ func (c *SettingClient) mutate(ctx context.Context, m *SettingMutation) (Value, } } +// UsageCleanupTaskClient is a client for the UsageCleanupTask schema. +type UsageCleanupTaskClient struct { + config +} + +// NewUsageCleanupTaskClient returns a client for the UsageCleanupTask from the given config. +func NewUsageCleanupTaskClient(c config) *UsageCleanupTaskClient { + return &UsageCleanupTaskClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `usagecleanuptask.Hooks(f(g(h())))`. +func (c *UsageCleanupTaskClient) Use(hooks ...Hook) { + c.hooks.UsageCleanupTask = append(c.hooks.UsageCleanupTask, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `usagecleanuptask.Intercept(f(g(h())))`. +func (c *UsageCleanupTaskClient) Intercept(interceptors ...Interceptor) { + c.inters.UsageCleanupTask = append(c.inters.UsageCleanupTask, interceptors...) +} + +// Create returns a builder for creating a UsageCleanupTask entity. +func (c *UsageCleanupTaskClient) Create() *UsageCleanupTaskCreate { + mutation := newUsageCleanupTaskMutation(c.config, OpCreate) + return &UsageCleanupTaskCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of UsageCleanupTask entities. +func (c *UsageCleanupTaskClient) CreateBulk(builders ...*UsageCleanupTaskCreate) *UsageCleanupTaskCreateBulk { + return &UsageCleanupTaskCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *UsageCleanupTaskClient) MapCreateBulk(slice any, setFunc func(*UsageCleanupTaskCreate, int)) *UsageCleanupTaskCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &UsageCleanupTaskCreateBulk{err: fmt.Errorf("calling to UsageCleanupTaskClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*UsageCleanupTaskCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &UsageCleanupTaskCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for UsageCleanupTask. +func (c *UsageCleanupTaskClient) Update() *UsageCleanupTaskUpdate { + mutation := newUsageCleanupTaskMutation(c.config, OpUpdate) + return &UsageCleanupTaskUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *UsageCleanupTaskClient) UpdateOne(_m *UsageCleanupTask) *UsageCleanupTaskUpdateOne { + mutation := newUsageCleanupTaskMutation(c.config, OpUpdateOne, withUsageCleanupTask(_m)) + return &UsageCleanupTaskUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *UsageCleanupTaskClient) UpdateOneID(id int64) *UsageCleanupTaskUpdateOne { + mutation := newUsageCleanupTaskMutation(c.config, OpUpdateOne, withUsageCleanupTaskID(id)) + return &UsageCleanupTaskUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for UsageCleanupTask. +func (c *UsageCleanupTaskClient) Delete() *UsageCleanupTaskDelete { + mutation := newUsageCleanupTaskMutation(c.config, OpDelete) + return &UsageCleanupTaskDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *UsageCleanupTaskClient) DeleteOne(_m *UsageCleanupTask) *UsageCleanupTaskDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *UsageCleanupTaskClient) DeleteOneID(id int64) *UsageCleanupTaskDeleteOne { + builder := c.Delete().Where(usagecleanuptask.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &UsageCleanupTaskDeleteOne{builder} +} + +// Query returns a query builder for UsageCleanupTask. +func (c *UsageCleanupTaskClient) Query() *UsageCleanupTaskQuery { + return &UsageCleanupTaskQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeUsageCleanupTask}, + inters: c.Interceptors(), + } +} + +// Get returns a UsageCleanupTask entity by its id. +func (c *UsageCleanupTaskClient) Get(ctx context.Context, id int64) (*UsageCleanupTask, error) { + return c.Query().Where(usagecleanuptask.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *UsageCleanupTaskClient) GetX(ctx context.Context, id int64) *UsageCleanupTask { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *UsageCleanupTaskClient) Hooks() []Hook { + return c.hooks.UsageCleanupTask +} + +// Interceptors returns the client interceptors. +func (c *UsageCleanupTaskClient) Interceptors() []Interceptor { + return c.inters.UsageCleanupTask +} + +func (c *UsageCleanupTaskClient) mutate(ctx context.Context, m *UsageCleanupTaskMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&UsageCleanupTaskCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&UsageCleanupTaskUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&UsageCleanupTaskUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&UsageCleanupTaskDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown UsageCleanupTask mutation op: %q", m.Op()) + } +} + // UsageLogClient is a client for the UsageLog schema. type UsageLogClient struct { config @@ -2974,13 +3117,13 @@ func (c *UserSubscriptionClient) mutate(ctx context.Context, m *UserSubscription type ( hooks struct { APIKey, Account, AccountGroup, Group, PromoCode, PromoCodeUsage, Proxy, - RedeemCode, Setting, UsageLog, User, UserAllowedGroup, UserAttributeDefinition, - UserAttributeValue, UserSubscription []ent.Hook + RedeemCode, Setting, UsageCleanupTask, UsageLog, User, UserAllowedGroup, + UserAttributeDefinition, UserAttributeValue, UserSubscription []ent.Hook } inters struct { APIKey, Account, AccountGroup, Group, PromoCode, PromoCodeUsage, Proxy, - RedeemCode, Setting, UsageLog, User, UserAllowedGroup, UserAttributeDefinition, - UserAttributeValue, UserSubscription []ent.Interceptor + RedeemCode, Setting, UsageCleanupTask, UsageLog, User, UserAllowedGroup, + UserAttributeDefinition, UserAttributeValue, UserSubscription []ent.Interceptor } ) diff --git a/backend/ent/ent.go b/backend/ent/ent.go index 410375a7..4bcc2642 100644 --- a/backend/ent/ent.go +++ b/backend/ent/ent.go @@ -21,6 +21,7 @@ import ( "github.com/Wei-Shaw/sub2api/ent/proxy" "github.com/Wei-Shaw/sub2api/ent/redeemcode" "github.com/Wei-Shaw/sub2api/ent/setting" + "github.com/Wei-Shaw/sub2api/ent/usagecleanuptask" "github.com/Wei-Shaw/sub2api/ent/usagelog" "github.com/Wei-Shaw/sub2api/ent/user" "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" @@ -96,6 +97,7 @@ func checkColumn(t, c string) error { proxy.Table: proxy.ValidColumn, redeemcode.Table: redeemcode.ValidColumn, setting.Table: setting.ValidColumn, + usagecleanuptask.Table: usagecleanuptask.ValidColumn, usagelog.Table: usagelog.ValidColumn, user.Table: user.ValidColumn, userallowedgroup.Table: userallowedgroup.ValidColumn, diff --git a/backend/ent/hook/hook.go b/backend/ent/hook/hook.go index 532b0d2c..edd84f5e 100644 --- a/backend/ent/hook/hook.go +++ b/backend/ent/hook/hook.go @@ -117,6 +117,18 @@ func (f SettingFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, err return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.SettingMutation", m) } +// The UsageCleanupTaskFunc type is an adapter to allow the use of ordinary +// function as UsageCleanupTask mutator. +type UsageCleanupTaskFunc func(context.Context, *ent.UsageCleanupTaskMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f UsageCleanupTaskFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.UsageCleanupTaskMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.UsageCleanupTaskMutation", m) +} + // The UsageLogFunc type is an adapter to allow the use of ordinary // function as UsageLog mutator. type UsageLogFunc func(context.Context, *ent.UsageLogMutation) (ent.Value, error) diff --git a/backend/ent/intercept/intercept.go b/backend/ent/intercept/intercept.go index 765d39b4..f18c0624 100644 --- a/backend/ent/intercept/intercept.go +++ b/backend/ent/intercept/intercept.go @@ -18,6 +18,7 @@ import ( "github.com/Wei-Shaw/sub2api/ent/proxy" "github.com/Wei-Shaw/sub2api/ent/redeemcode" "github.com/Wei-Shaw/sub2api/ent/setting" + "github.com/Wei-Shaw/sub2api/ent/usagecleanuptask" "github.com/Wei-Shaw/sub2api/ent/usagelog" "github.com/Wei-Shaw/sub2api/ent/user" "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" @@ -325,6 +326,33 @@ func (f TraverseSetting) Traverse(ctx context.Context, q ent.Query) error { return fmt.Errorf("unexpected query type %T. expect *ent.SettingQuery", q) } +// The UsageCleanupTaskFunc type is an adapter to allow the use of ordinary function as a Querier. +type UsageCleanupTaskFunc func(context.Context, *ent.UsageCleanupTaskQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f UsageCleanupTaskFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.UsageCleanupTaskQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.UsageCleanupTaskQuery", q) +} + +// The TraverseUsageCleanupTask type is an adapter to allow the use of ordinary function as Traverser. +type TraverseUsageCleanupTask func(context.Context, *ent.UsageCleanupTaskQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseUsageCleanupTask) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseUsageCleanupTask) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.UsageCleanupTaskQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.UsageCleanupTaskQuery", q) +} + // The UsageLogFunc type is an adapter to allow the use of ordinary function as a Querier. type UsageLogFunc func(context.Context, *ent.UsageLogQuery) (ent.Value, error) @@ -508,6 +536,8 @@ func NewQuery(q ent.Query) (Query, error) { return &query[*ent.RedeemCodeQuery, predicate.RedeemCode, redeemcode.OrderOption]{typ: ent.TypeRedeemCode, tq: q}, nil case *ent.SettingQuery: return &query[*ent.SettingQuery, predicate.Setting, setting.OrderOption]{typ: ent.TypeSetting, tq: q}, nil + case *ent.UsageCleanupTaskQuery: + return &query[*ent.UsageCleanupTaskQuery, predicate.UsageCleanupTask, usagecleanuptask.OrderOption]{typ: ent.TypeUsageCleanupTask, tq: q}, nil case *ent.UsageLogQuery: return &query[*ent.UsageLogQuery, predicate.UsageLog, usagelog.OrderOption]{typ: ent.TypeUsageLog, tq: q}, nil case *ent.UserQuery: diff --git a/backend/ent/migrate/schema.go b/backend/ent/migrate/schema.go index b377804f..d1f05186 100644 --- a/backend/ent/migrate/schema.go +++ b/backend/ent/migrate/schema.go @@ -434,6 +434,44 @@ var ( Columns: SettingsColumns, PrimaryKey: []*schema.Column{SettingsColumns[0]}, } + // UsageCleanupTasksColumns holds the columns for the "usage_cleanup_tasks" table. + UsageCleanupTasksColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "status", Type: field.TypeString, Size: 20}, + {Name: "filters", Type: field.TypeJSON}, + {Name: "created_by", Type: field.TypeInt64}, + {Name: "deleted_rows", Type: field.TypeInt64, Default: 0}, + {Name: "error_message", Type: field.TypeString, Nullable: true}, + {Name: "canceled_by", Type: field.TypeInt64, Nullable: true}, + {Name: "canceled_at", Type: field.TypeTime, Nullable: true}, + {Name: "started_at", Type: field.TypeTime, Nullable: true}, + {Name: "finished_at", Type: field.TypeTime, Nullable: true}, + } + // UsageCleanupTasksTable holds the schema information for the "usage_cleanup_tasks" table. + UsageCleanupTasksTable = &schema.Table{ + Name: "usage_cleanup_tasks", + Columns: UsageCleanupTasksColumns, + PrimaryKey: []*schema.Column{UsageCleanupTasksColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "usagecleanuptask_status_created_at", + Unique: false, + Columns: []*schema.Column{UsageCleanupTasksColumns[3], UsageCleanupTasksColumns[1]}, + }, + { + Name: "usagecleanuptask_created_at", + Unique: false, + Columns: []*schema.Column{UsageCleanupTasksColumns[1]}, + }, + { + Name: "usagecleanuptask_canceled_at", + Unique: false, + Columns: []*schema.Column{UsageCleanupTasksColumns[9]}, + }, + }, + } // UsageLogsColumns holds the columns for the "usage_logs" table. UsageLogsColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt64, Increment: true}, @@ -805,6 +843,7 @@ var ( ProxiesTable, RedeemCodesTable, SettingsTable, + UsageCleanupTasksTable, UsageLogsTable, UsersTable, UserAllowedGroupsTable, @@ -851,6 +890,9 @@ func init() { SettingsTable.Annotation = &entsql.Annotation{ Table: "settings", } + UsageCleanupTasksTable.Annotation = &entsql.Annotation{ + Table: "usage_cleanup_tasks", + } UsageLogsTable.ForeignKeys[0].RefTable = APIKeysTable UsageLogsTable.ForeignKeys[1].RefTable = AccountsTable UsageLogsTable.ForeignKeys[2].RefTable = GroupsTable diff --git a/backend/ent/mutation.go b/backend/ent/mutation.go index cd2fe8e0..9b330616 100644 --- a/backend/ent/mutation.go +++ b/backend/ent/mutation.go @@ -4,6 +4,7 @@ package ent import ( "context" + "encoding/json" "errors" "fmt" "sync" @@ -21,6 +22,7 @@ import ( "github.com/Wei-Shaw/sub2api/ent/proxy" "github.com/Wei-Shaw/sub2api/ent/redeemcode" "github.com/Wei-Shaw/sub2api/ent/setting" + "github.com/Wei-Shaw/sub2api/ent/usagecleanuptask" "github.com/Wei-Shaw/sub2api/ent/usagelog" "github.com/Wei-Shaw/sub2api/ent/user" "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" @@ -47,6 +49,7 @@ const ( TypeProxy = "Proxy" TypeRedeemCode = "RedeemCode" TypeSetting = "Setting" + TypeUsageCleanupTask = "UsageCleanupTask" TypeUsageLog = "UsageLog" TypeUser = "User" TypeUserAllowedGroup = "UserAllowedGroup" @@ -10370,6 +10373,1089 @@ func (m *SettingMutation) ResetEdge(name string) error { return fmt.Errorf("unknown Setting edge %s", name) } +// UsageCleanupTaskMutation represents an operation that mutates the UsageCleanupTask nodes in the graph. +type UsageCleanupTaskMutation struct { + config + op Op + typ string + id *int64 + created_at *time.Time + updated_at *time.Time + status *string + filters *json.RawMessage + appendfilters json.RawMessage + created_by *int64 + addcreated_by *int64 + deleted_rows *int64 + adddeleted_rows *int64 + error_message *string + canceled_by *int64 + addcanceled_by *int64 + canceled_at *time.Time + started_at *time.Time + finished_at *time.Time + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*UsageCleanupTask, error) + predicates []predicate.UsageCleanupTask +} + +var _ ent.Mutation = (*UsageCleanupTaskMutation)(nil) + +// usagecleanuptaskOption allows management of the mutation configuration using functional options. +type usagecleanuptaskOption func(*UsageCleanupTaskMutation) + +// newUsageCleanupTaskMutation creates new mutation for the UsageCleanupTask entity. +func newUsageCleanupTaskMutation(c config, op Op, opts ...usagecleanuptaskOption) *UsageCleanupTaskMutation { + m := &UsageCleanupTaskMutation{ + config: c, + op: op, + typ: TypeUsageCleanupTask, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withUsageCleanupTaskID sets the ID field of the mutation. +func withUsageCleanupTaskID(id int64) usagecleanuptaskOption { + return func(m *UsageCleanupTaskMutation) { + var ( + err error + once sync.Once + value *UsageCleanupTask + ) + m.oldValue = func(ctx context.Context) (*UsageCleanupTask, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().UsageCleanupTask.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withUsageCleanupTask sets the old UsageCleanupTask of the mutation. +func withUsageCleanupTask(node *UsageCleanupTask) usagecleanuptaskOption { + return func(m *UsageCleanupTaskMutation) { + m.oldValue = func(context.Context) (*UsageCleanupTask, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m UsageCleanupTaskMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m UsageCleanupTaskMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *UsageCleanupTaskMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *UsageCleanupTaskMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().UsageCleanupTask.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *UsageCleanupTaskMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *UsageCleanupTaskMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the UsageCleanupTask entity. +// If the UsageCleanupTask object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageCleanupTaskMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *UsageCleanupTaskMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *UsageCleanupTaskMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *UsageCleanupTaskMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the UsageCleanupTask entity. +// If the UsageCleanupTask object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageCleanupTaskMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *UsageCleanupTaskMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetStatus sets the "status" field. +func (m *UsageCleanupTaskMutation) SetStatus(s string) { + m.status = &s +} + +// Status returns the value of the "status" field in the mutation. +func (m *UsageCleanupTaskMutation) Status() (r string, exists bool) { + v := m.status + if v == nil { + return + } + return *v, true +} + +// OldStatus returns the old "status" field's value of the UsageCleanupTask entity. +// If the UsageCleanupTask object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageCleanupTaskMutation) OldStatus(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStatus: %w", err) + } + return oldValue.Status, nil +} + +// ResetStatus resets all changes to the "status" field. +func (m *UsageCleanupTaskMutation) ResetStatus() { + m.status = nil +} + +// SetFilters sets the "filters" field. +func (m *UsageCleanupTaskMutation) SetFilters(jm json.RawMessage) { + m.filters = &jm + m.appendfilters = nil +} + +// Filters returns the value of the "filters" field in the mutation. +func (m *UsageCleanupTaskMutation) Filters() (r json.RawMessage, exists bool) { + v := m.filters + if v == nil { + return + } + return *v, true +} + +// OldFilters returns the old "filters" field's value of the UsageCleanupTask entity. +// If the UsageCleanupTask object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageCleanupTaskMutation) OldFilters(ctx context.Context) (v json.RawMessage, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldFilters is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldFilters requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldFilters: %w", err) + } + return oldValue.Filters, nil +} + +// AppendFilters adds jm to the "filters" field. +func (m *UsageCleanupTaskMutation) AppendFilters(jm json.RawMessage) { + m.appendfilters = append(m.appendfilters, jm...) +} + +// AppendedFilters returns the list of values that were appended to the "filters" field in this mutation. +func (m *UsageCleanupTaskMutation) AppendedFilters() (json.RawMessage, bool) { + if len(m.appendfilters) == 0 { + return nil, false + } + return m.appendfilters, true +} + +// ResetFilters resets all changes to the "filters" field. +func (m *UsageCleanupTaskMutation) ResetFilters() { + m.filters = nil + m.appendfilters = nil +} + +// SetCreatedBy sets the "created_by" field. +func (m *UsageCleanupTaskMutation) SetCreatedBy(i int64) { + m.created_by = &i + m.addcreated_by = nil +} + +// CreatedBy returns the value of the "created_by" field in the mutation. +func (m *UsageCleanupTaskMutation) CreatedBy() (r int64, exists bool) { + v := m.created_by + if v == nil { + return + } + return *v, true +} + +// OldCreatedBy returns the old "created_by" field's value of the UsageCleanupTask entity. +// If the UsageCleanupTask object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageCleanupTaskMutation) OldCreatedBy(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedBy is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedBy requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedBy: %w", err) + } + return oldValue.CreatedBy, nil +} + +// AddCreatedBy adds i to the "created_by" field. +func (m *UsageCleanupTaskMutation) AddCreatedBy(i int64) { + if m.addcreated_by != nil { + *m.addcreated_by += i + } else { + m.addcreated_by = &i + } +} + +// AddedCreatedBy returns the value that was added to the "created_by" field in this mutation. +func (m *UsageCleanupTaskMutation) AddedCreatedBy() (r int64, exists bool) { + v := m.addcreated_by + if v == nil { + return + } + return *v, true +} + +// ResetCreatedBy resets all changes to the "created_by" field. +func (m *UsageCleanupTaskMutation) ResetCreatedBy() { + m.created_by = nil + m.addcreated_by = nil +} + +// SetDeletedRows sets the "deleted_rows" field. +func (m *UsageCleanupTaskMutation) SetDeletedRows(i int64) { + m.deleted_rows = &i + m.adddeleted_rows = nil +} + +// DeletedRows returns the value of the "deleted_rows" field in the mutation. +func (m *UsageCleanupTaskMutation) DeletedRows() (r int64, exists bool) { + v := m.deleted_rows + if v == nil { + return + } + return *v, true +} + +// OldDeletedRows returns the old "deleted_rows" field's value of the UsageCleanupTask entity. +// If the UsageCleanupTask object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageCleanupTaskMutation) OldDeletedRows(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDeletedRows is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDeletedRows requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDeletedRows: %w", err) + } + return oldValue.DeletedRows, nil +} + +// AddDeletedRows adds i to the "deleted_rows" field. +func (m *UsageCleanupTaskMutation) AddDeletedRows(i int64) { + if m.adddeleted_rows != nil { + *m.adddeleted_rows += i + } else { + m.adddeleted_rows = &i + } +} + +// AddedDeletedRows returns the value that was added to the "deleted_rows" field in this mutation. +func (m *UsageCleanupTaskMutation) AddedDeletedRows() (r int64, exists bool) { + v := m.adddeleted_rows + if v == nil { + return + } + return *v, true +} + +// ResetDeletedRows resets all changes to the "deleted_rows" field. +func (m *UsageCleanupTaskMutation) ResetDeletedRows() { + m.deleted_rows = nil + m.adddeleted_rows = nil +} + +// SetErrorMessage sets the "error_message" field. +func (m *UsageCleanupTaskMutation) SetErrorMessage(s string) { + m.error_message = &s +} + +// ErrorMessage returns the value of the "error_message" field in the mutation. +func (m *UsageCleanupTaskMutation) ErrorMessage() (r string, exists bool) { + v := m.error_message + if v == nil { + return + } + return *v, true +} + +// OldErrorMessage returns the old "error_message" field's value of the UsageCleanupTask entity. +// If the UsageCleanupTask object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageCleanupTaskMutation) OldErrorMessage(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldErrorMessage is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldErrorMessage requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldErrorMessage: %w", err) + } + return oldValue.ErrorMessage, nil +} + +// ClearErrorMessage clears the value of the "error_message" field. +func (m *UsageCleanupTaskMutation) ClearErrorMessage() { + m.error_message = nil + m.clearedFields[usagecleanuptask.FieldErrorMessage] = struct{}{} +} + +// ErrorMessageCleared returns if the "error_message" field was cleared in this mutation. +func (m *UsageCleanupTaskMutation) ErrorMessageCleared() bool { + _, ok := m.clearedFields[usagecleanuptask.FieldErrorMessage] + return ok +} + +// ResetErrorMessage resets all changes to the "error_message" field. +func (m *UsageCleanupTaskMutation) ResetErrorMessage() { + m.error_message = nil + delete(m.clearedFields, usagecleanuptask.FieldErrorMessage) +} + +// SetCanceledBy sets the "canceled_by" field. +func (m *UsageCleanupTaskMutation) SetCanceledBy(i int64) { + m.canceled_by = &i + m.addcanceled_by = nil +} + +// CanceledBy returns the value of the "canceled_by" field in the mutation. +func (m *UsageCleanupTaskMutation) CanceledBy() (r int64, exists bool) { + v := m.canceled_by + if v == nil { + return + } + return *v, true +} + +// OldCanceledBy returns the old "canceled_by" field's value of the UsageCleanupTask entity. +// If the UsageCleanupTask object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageCleanupTaskMutation) OldCanceledBy(ctx context.Context) (v *int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCanceledBy is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCanceledBy requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCanceledBy: %w", err) + } + return oldValue.CanceledBy, nil +} + +// AddCanceledBy adds i to the "canceled_by" field. +func (m *UsageCleanupTaskMutation) AddCanceledBy(i int64) { + if m.addcanceled_by != nil { + *m.addcanceled_by += i + } else { + m.addcanceled_by = &i + } +} + +// AddedCanceledBy returns the value that was added to the "canceled_by" field in this mutation. +func (m *UsageCleanupTaskMutation) AddedCanceledBy() (r int64, exists bool) { + v := m.addcanceled_by + if v == nil { + return + } + return *v, true +} + +// ClearCanceledBy clears the value of the "canceled_by" field. +func (m *UsageCleanupTaskMutation) ClearCanceledBy() { + m.canceled_by = nil + m.addcanceled_by = nil + m.clearedFields[usagecleanuptask.FieldCanceledBy] = struct{}{} +} + +// CanceledByCleared returns if the "canceled_by" field was cleared in this mutation. +func (m *UsageCleanupTaskMutation) CanceledByCleared() bool { + _, ok := m.clearedFields[usagecleanuptask.FieldCanceledBy] + return ok +} + +// ResetCanceledBy resets all changes to the "canceled_by" field. +func (m *UsageCleanupTaskMutation) ResetCanceledBy() { + m.canceled_by = nil + m.addcanceled_by = nil + delete(m.clearedFields, usagecleanuptask.FieldCanceledBy) +} + +// SetCanceledAt sets the "canceled_at" field. +func (m *UsageCleanupTaskMutation) SetCanceledAt(t time.Time) { + m.canceled_at = &t +} + +// CanceledAt returns the value of the "canceled_at" field in the mutation. +func (m *UsageCleanupTaskMutation) CanceledAt() (r time.Time, exists bool) { + v := m.canceled_at + if v == nil { + return + } + return *v, true +} + +// OldCanceledAt returns the old "canceled_at" field's value of the UsageCleanupTask entity. +// If the UsageCleanupTask object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageCleanupTaskMutation) OldCanceledAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCanceledAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCanceledAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCanceledAt: %w", err) + } + return oldValue.CanceledAt, nil +} + +// ClearCanceledAt clears the value of the "canceled_at" field. +func (m *UsageCleanupTaskMutation) ClearCanceledAt() { + m.canceled_at = nil + m.clearedFields[usagecleanuptask.FieldCanceledAt] = struct{}{} +} + +// CanceledAtCleared returns if the "canceled_at" field was cleared in this mutation. +func (m *UsageCleanupTaskMutation) CanceledAtCleared() bool { + _, ok := m.clearedFields[usagecleanuptask.FieldCanceledAt] + return ok +} + +// ResetCanceledAt resets all changes to the "canceled_at" field. +func (m *UsageCleanupTaskMutation) ResetCanceledAt() { + m.canceled_at = nil + delete(m.clearedFields, usagecleanuptask.FieldCanceledAt) +} + +// SetStartedAt sets the "started_at" field. +func (m *UsageCleanupTaskMutation) SetStartedAt(t time.Time) { + m.started_at = &t +} + +// StartedAt returns the value of the "started_at" field in the mutation. +func (m *UsageCleanupTaskMutation) StartedAt() (r time.Time, exists bool) { + v := m.started_at + if v == nil { + return + } + return *v, true +} + +// OldStartedAt returns the old "started_at" field's value of the UsageCleanupTask entity. +// If the UsageCleanupTask object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageCleanupTaskMutation) OldStartedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStartedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStartedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStartedAt: %w", err) + } + return oldValue.StartedAt, nil +} + +// ClearStartedAt clears the value of the "started_at" field. +func (m *UsageCleanupTaskMutation) ClearStartedAt() { + m.started_at = nil + m.clearedFields[usagecleanuptask.FieldStartedAt] = struct{}{} +} + +// StartedAtCleared returns if the "started_at" field was cleared in this mutation. +func (m *UsageCleanupTaskMutation) StartedAtCleared() bool { + _, ok := m.clearedFields[usagecleanuptask.FieldStartedAt] + return ok +} + +// ResetStartedAt resets all changes to the "started_at" field. +func (m *UsageCleanupTaskMutation) ResetStartedAt() { + m.started_at = nil + delete(m.clearedFields, usagecleanuptask.FieldStartedAt) +} + +// SetFinishedAt sets the "finished_at" field. +func (m *UsageCleanupTaskMutation) SetFinishedAt(t time.Time) { + m.finished_at = &t +} + +// FinishedAt returns the value of the "finished_at" field in the mutation. +func (m *UsageCleanupTaskMutation) FinishedAt() (r time.Time, exists bool) { + v := m.finished_at + if v == nil { + return + } + return *v, true +} + +// OldFinishedAt returns the old "finished_at" field's value of the UsageCleanupTask entity. +// If the UsageCleanupTask object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageCleanupTaskMutation) OldFinishedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldFinishedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldFinishedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldFinishedAt: %w", err) + } + return oldValue.FinishedAt, nil +} + +// ClearFinishedAt clears the value of the "finished_at" field. +func (m *UsageCleanupTaskMutation) ClearFinishedAt() { + m.finished_at = nil + m.clearedFields[usagecleanuptask.FieldFinishedAt] = struct{}{} +} + +// FinishedAtCleared returns if the "finished_at" field was cleared in this mutation. +func (m *UsageCleanupTaskMutation) FinishedAtCleared() bool { + _, ok := m.clearedFields[usagecleanuptask.FieldFinishedAt] + return ok +} + +// ResetFinishedAt resets all changes to the "finished_at" field. +func (m *UsageCleanupTaskMutation) ResetFinishedAt() { + m.finished_at = nil + delete(m.clearedFields, usagecleanuptask.FieldFinishedAt) +} + +// Where appends a list predicates to the UsageCleanupTaskMutation builder. +func (m *UsageCleanupTaskMutation) Where(ps ...predicate.UsageCleanupTask) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the UsageCleanupTaskMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *UsageCleanupTaskMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.UsageCleanupTask, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *UsageCleanupTaskMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *UsageCleanupTaskMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (UsageCleanupTask). +func (m *UsageCleanupTaskMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *UsageCleanupTaskMutation) Fields() []string { + fields := make([]string, 0, 11) + if m.created_at != nil { + fields = append(fields, usagecleanuptask.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, usagecleanuptask.FieldUpdatedAt) + } + if m.status != nil { + fields = append(fields, usagecleanuptask.FieldStatus) + } + if m.filters != nil { + fields = append(fields, usagecleanuptask.FieldFilters) + } + if m.created_by != nil { + fields = append(fields, usagecleanuptask.FieldCreatedBy) + } + if m.deleted_rows != nil { + fields = append(fields, usagecleanuptask.FieldDeletedRows) + } + if m.error_message != nil { + fields = append(fields, usagecleanuptask.FieldErrorMessage) + } + if m.canceled_by != nil { + fields = append(fields, usagecleanuptask.FieldCanceledBy) + } + if m.canceled_at != nil { + fields = append(fields, usagecleanuptask.FieldCanceledAt) + } + if m.started_at != nil { + fields = append(fields, usagecleanuptask.FieldStartedAt) + } + if m.finished_at != nil { + fields = append(fields, usagecleanuptask.FieldFinishedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *UsageCleanupTaskMutation) Field(name string) (ent.Value, bool) { + switch name { + case usagecleanuptask.FieldCreatedAt: + return m.CreatedAt() + case usagecleanuptask.FieldUpdatedAt: + return m.UpdatedAt() + case usagecleanuptask.FieldStatus: + return m.Status() + case usagecleanuptask.FieldFilters: + return m.Filters() + case usagecleanuptask.FieldCreatedBy: + return m.CreatedBy() + case usagecleanuptask.FieldDeletedRows: + return m.DeletedRows() + case usagecleanuptask.FieldErrorMessage: + return m.ErrorMessage() + case usagecleanuptask.FieldCanceledBy: + return m.CanceledBy() + case usagecleanuptask.FieldCanceledAt: + return m.CanceledAt() + case usagecleanuptask.FieldStartedAt: + return m.StartedAt() + case usagecleanuptask.FieldFinishedAt: + return m.FinishedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *UsageCleanupTaskMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case usagecleanuptask.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case usagecleanuptask.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case usagecleanuptask.FieldStatus: + return m.OldStatus(ctx) + case usagecleanuptask.FieldFilters: + return m.OldFilters(ctx) + case usagecleanuptask.FieldCreatedBy: + return m.OldCreatedBy(ctx) + case usagecleanuptask.FieldDeletedRows: + return m.OldDeletedRows(ctx) + case usagecleanuptask.FieldErrorMessage: + return m.OldErrorMessage(ctx) + case usagecleanuptask.FieldCanceledBy: + return m.OldCanceledBy(ctx) + case usagecleanuptask.FieldCanceledAt: + return m.OldCanceledAt(ctx) + case usagecleanuptask.FieldStartedAt: + return m.OldStartedAt(ctx) + case usagecleanuptask.FieldFinishedAt: + return m.OldFinishedAt(ctx) + } + return nil, fmt.Errorf("unknown UsageCleanupTask field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UsageCleanupTaskMutation) SetField(name string, value ent.Value) error { + switch name { + case usagecleanuptask.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case usagecleanuptask.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case usagecleanuptask.FieldStatus: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + case usagecleanuptask.FieldFilters: + v, ok := value.(json.RawMessage) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetFilters(v) + return nil + case usagecleanuptask.FieldCreatedBy: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedBy(v) + return nil + case usagecleanuptask.FieldDeletedRows: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDeletedRows(v) + return nil + case usagecleanuptask.FieldErrorMessage: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetErrorMessage(v) + return nil + case usagecleanuptask.FieldCanceledBy: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCanceledBy(v) + return nil + case usagecleanuptask.FieldCanceledAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCanceledAt(v) + return nil + case usagecleanuptask.FieldStartedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStartedAt(v) + return nil + case usagecleanuptask.FieldFinishedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetFinishedAt(v) + return nil + } + return fmt.Errorf("unknown UsageCleanupTask field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *UsageCleanupTaskMutation) AddedFields() []string { + var fields []string + if m.addcreated_by != nil { + fields = append(fields, usagecleanuptask.FieldCreatedBy) + } + if m.adddeleted_rows != nil { + fields = append(fields, usagecleanuptask.FieldDeletedRows) + } + if m.addcanceled_by != nil { + fields = append(fields, usagecleanuptask.FieldCanceledBy) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *UsageCleanupTaskMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case usagecleanuptask.FieldCreatedBy: + return m.AddedCreatedBy() + case usagecleanuptask.FieldDeletedRows: + return m.AddedDeletedRows() + case usagecleanuptask.FieldCanceledBy: + return m.AddedCanceledBy() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UsageCleanupTaskMutation) AddField(name string, value ent.Value) error { + switch name { + case usagecleanuptask.FieldCreatedBy: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddCreatedBy(v) + return nil + case usagecleanuptask.FieldDeletedRows: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddDeletedRows(v) + return nil + case usagecleanuptask.FieldCanceledBy: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddCanceledBy(v) + return nil + } + return fmt.Errorf("unknown UsageCleanupTask numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *UsageCleanupTaskMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(usagecleanuptask.FieldErrorMessage) { + fields = append(fields, usagecleanuptask.FieldErrorMessage) + } + if m.FieldCleared(usagecleanuptask.FieldCanceledBy) { + fields = append(fields, usagecleanuptask.FieldCanceledBy) + } + if m.FieldCleared(usagecleanuptask.FieldCanceledAt) { + fields = append(fields, usagecleanuptask.FieldCanceledAt) + } + if m.FieldCleared(usagecleanuptask.FieldStartedAt) { + fields = append(fields, usagecleanuptask.FieldStartedAt) + } + if m.FieldCleared(usagecleanuptask.FieldFinishedAt) { + fields = append(fields, usagecleanuptask.FieldFinishedAt) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *UsageCleanupTaskMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *UsageCleanupTaskMutation) ClearField(name string) error { + switch name { + case usagecleanuptask.FieldErrorMessage: + m.ClearErrorMessage() + return nil + case usagecleanuptask.FieldCanceledBy: + m.ClearCanceledBy() + return nil + case usagecleanuptask.FieldCanceledAt: + m.ClearCanceledAt() + return nil + case usagecleanuptask.FieldStartedAt: + m.ClearStartedAt() + return nil + case usagecleanuptask.FieldFinishedAt: + m.ClearFinishedAt() + return nil + } + return fmt.Errorf("unknown UsageCleanupTask nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *UsageCleanupTaskMutation) ResetField(name string) error { + switch name { + case usagecleanuptask.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case usagecleanuptask.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case usagecleanuptask.FieldStatus: + m.ResetStatus() + return nil + case usagecleanuptask.FieldFilters: + m.ResetFilters() + return nil + case usagecleanuptask.FieldCreatedBy: + m.ResetCreatedBy() + return nil + case usagecleanuptask.FieldDeletedRows: + m.ResetDeletedRows() + return nil + case usagecleanuptask.FieldErrorMessage: + m.ResetErrorMessage() + return nil + case usagecleanuptask.FieldCanceledBy: + m.ResetCanceledBy() + return nil + case usagecleanuptask.FieldCanceledAt: + m.ResetCanceledAt() + return nil + case usagecleanuptask.FieldStartedAt: + m.ResetStartedAt() + return nil + case usagecleanuptask.FieldFinishedAt: + m.ResetFinishedAt() + return nil + } + return fmt.Errorf("unknown UsageCleanupTask field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *UsageCleanupTaskMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *UsageCleanupTaskMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *UsageCleanupTaskMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *UsageCleanupTaskMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *UsageCleanupTaskMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *UsageCleanupTaskMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *UsageCleanupTaskMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown UsageCleanupTask unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *UsageCleanupTaskMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown UsageCleanupTask edge %s", name) +} + // UsageLogMutation represents an operation that mutates the UsageLog nodes in the graph. type UsageLogMutation struct { config diff --git a/backend/ent/predicate/predicate.go b/backend/ent/predicate/predicate.go index 7a443c5d..785cb4e6 100644 --- a/backend/ent/predicate/predicate.go +++ b/backend/ent/predicate/predicate.go @@ -33,6 +33,9 @@ type RedeemCode func(*sql.Selector) // Setting is the predicate function for setting builders. type Setting func(*sql.Selector) +// UsageCleanupTask is the predicate function for usagecleanuptask builders. +type UsageCleanupTask func(*sql.Selector) + // UsageLog is the predicate function for usagelog builders. type UsageLog func(*sql.Selector) diff --git a/backend/ent/runtime/runtime.go b/backend/ent/runtime/runtime.go index 0cb10775..1e3f4cbe 100644 --- a/backend/ent/runtime/runtime.go +++ b/backend/ent/runtime/runtime.go @@ -15,6 +15,7 @@ import ( "github.com/Wei-Shaw/sub2api/ent/redeemcode" "github.com/Wei-Shaw/sub2api/ent/schema" "github.com/Wei-Shaw/sub2api/ent/setting" + "github.com/Wei-Shaw/sub2api/ent/usagecleanuptask" "github.com/Wei-Shaw/sub2api/ent/usagelog" "github.com/Wei-Shaw/sub2api/ent/user" "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" @@ -495,6 +496,43 @@ func init() { setting.DefaultUpdatedAt = settingDescUpdatedAt.Default.(func() time.Time) // setting.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. setting.UpdateDefaultUpdatedAt = settingDescUpdatedAt.UpdateDefault.(func() time.Time) + usagecleanuptaskMixin := schema.UsageCleanupTask{}.Mixin() + usagecleanuptaskMixinFields0 := usagecleanuptaskMixin[0].Fields() + _ = usagecleanuptaskMixinFields0 + usagecleanuptaskFields := schema.UsageCleanupTask{}.Fields() + _ = usagecleanuptaskFields + // usagecleanuptaskDescCreatedAt is the schema descriptor for created_at field. + usagecleanuptaskDescCreatedAt := usagecleanuptaskMixinFields0[0].Descriptor() + // usagecleanuptask.DefaultCreatedAt holds the default value on creation for the created_at field. + usagecleanuptask.DefaultCreatedAt = usagecleanuptaskDescCreatedAt.Default.(func() time.Time) + // usagecleanuptaskDescUpdatedAt is the schema descriptor for updated_at field. + usagecleanuptaskDescUpdatedAt := usagecleanuptaskMixinFields0[1].Descriptor() + // usagecleanuptask.DefaultUpdatedAt holds the default value on creation for the updated_at field. + usagecleanuptask.DefaultUpdatedAt = usagecleanuptaskDescUpdatedAt.Default.(func() time.Time) + // usagecleanuptask.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + usagecleanuptask.UpdateDefaultUpdatedAt = usagecleanuptaskDescUpdatedAt.UpdateDefault.(func() time.Time) + // usagecleanuptaskDescStatus is the schema descriptor for status field. + usagecleanuptaskDescStatus := usagecleanuptaskFields[0].Descriptor() + // usagecleanuptask.StatusValidator is a validator for the "status" field. It is called by the builders before save. + usagecleanuptask.StatusValidator = func() func(string) error { + validators := usagecleanuptaskDescStatus.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(status string) error { + for _, fn := range fns { + if err := fn(status); err != nil { + return err + } + } + return nil + } + }() + // usagecleanuptaskDescDeletedRows is the schema descriptor for deleted_rows field. + usagecleanuptaskDescDeletedRows := usagecleanuptaskFields[3].Descriptor() + // usagecleanuptask.DefaultDeletedRows holds the default value on creation for the deleted_rows field. + usagecleanuptask.DefaultDeletedRows = usagecleanuptaskDescDeletedRows.Default.(int64) usagelogFields := schema.UsageLog{}.Fields() _ = usagelogFields // usagelogDescRequestID is the schema descriptor for request_id field. diff --git a/backend/ent/schema/mixins/soft_delete.go b/backend/ent/schema/mixins/soft_delete.go index 9571bc9c..461c7348 100644 --- a/backend/ent/schema/mixins/soft_delete.go +++ b/backend/ent/schema/mixins/soft_delete.go @@ -12,7 +12,6 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/schema/field" "entgo.io/ent/schema/mixin" - dbent "github.com/Wei-Shaw/sub2api/ent" "github.com/Wei-Shaw/sub2api/ent/intercept" ) @@ -113,7 +112,9 @@ func (d SoftDeleteMixin) Hooks() []ent.Hook { SetOp(ent.Op) SetDeletedAt(time.Time) WhereP(...func(*sql.Selector)) - Client() *dbent.Client + Client() interface { + Mutate(context.Context, ent.Mutation) (ent.Value, error) + } }) if !ok { return nil, fmt.Errorf("unexpected mutation type %T", m) diff --git a/backend/ent/schema/usage_cleanup_task.go b/backend/ent/schema/usage_cleanup_task.go new file mode 100644 index 00000000..753e6410 --- /dev/null +++ b/backend/ent/schema/usage_cleanup_task.go @@ -0,0 +1,75 @@ +package schema + +import ( + "encoding/json" + "fmt" + + "github.com/Wei-Shaw/sub2api/ent/schema/mixins" + + "entgo.io/ent" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// UsageCleanupTask 定义使用记录清理任务的 schema。 +type UsageCleanupTask struct { + ent.Schema +} + +func (UsageCleanupTask) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "usage_cleanup_tasks"}, + } +} + +func (UsageCleanupTask) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixins.TimeMixin{}, + } +} + +func (UsageCleanupTask) Fields() []ent.Field { + return []ent.Field{ + field.String("status"). + MaxLen(20). + Validate(validateUsageCleanupStatus), + field.JSON("filters", json.RawMessage{}), + field.Int64("created_by"), + field.Int64("deleted_rows"). + Default(0), + field.String("error_message"). + Optional(). + Nillable(), + field.Int64("canceled_by"). + Optional(). + Nillable(), + field.Time("canceled_at"). + Optional(). + Nillable(), + field.Time("started_at"). + Optional(). + Nillable(), + field.Time("finished_at"). + Optional(). + Nillable(), + } +} + +func (UsageCleanupTask) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("status", "created_at"), + index.Fields("created_at"), + index.Fields("canceled_at"), + } +} + +func validateUsageCleanupStatus(status string) error { + switch status { + case "pending", "running", "succeeded", "failed", "canceled": + return nil + default: + return fmt.Errorf("invalid usage cleanup status: %s", status) + } +} diff --git a/backend/ent/tx.go b/backend/ent/tx.go index 56df121a..7ff16ec8 100644 --- a/backend/ent/tx.go +++ b/backend/ent/tx.go @@ -32,6 +32,8 @@ type Tx struct { RedeemCode *RedeemCodeClient // Setting is the client for interacting with the Setting builders. Setting *SettingClient + // UsageCleanupTask is the client for interacting with the UsageCleanupTask builders. + UsageCleanupTask *UsageCleanupTaskClient // UsageLog is the client for interacting with the UsageLog builders. UsageLog *UsageLogClient // User is the client for interacting with the User builders. @@ -184,6 +186,7 @@ func (tx *Tx) init() { tx.Proxy = NewProxyClient(tx.config) tx.RedeemCode = NewRedeemCodeClient(tx.config) tx.Setting = NewSettingClient(tx.config) + tx.UsageCleanupTask = NewUsageCleanupTaskClient(tx.config) tx.UsageLog = NewUsageLogClient(tx.config) tx.User = NewUserClient(tx.config) tx.UserAllowedGroup = NewUserAllowedGroupClient(tx.config) diff --git a/backend/ent/usagecleanuptask.go b/backend/ent/usagecleanuptask.go new file mode 100644 index 00000000..e3a17b5a --- /dev/null +++ b/backend/ent/usagecleanuptask.go @@ -0,0 +1,236 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/usagecleanuptask" +) + +// UsageCleanupTask is the model entity for the UsageCleanupTask schema. +type UsageCleanupTask struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Status holds the value of the "status" field. + Status string `json:"status,omitempty"` + // Filters holds the value of the "filters" field. + Filters json.RawMessage `json:"filters,omitempty"` + // CreatedBy holds the value of the "created_by" field. + CreatedBy int64 `json:"created_by,omitempty"` + // DeletedRows holds the value of the "deleted_rows" field. + DeletedRows int64 `json:"deleted_rows,omitempty"` + // ErrorMessage holds the value of the "error_message" field. + ErrorMessage *string `json:"error_message,omitempty"` + // CanceledBy holds the value of the "canceled_by" field. + CanceledBy *int64 `json:"canceled_by,omitempty"` + // CanceledAt holds the value of the "canceled_at" field. + CanceledAt *time.Time `json:"canceled_at,omitempty"` + // StartedAt holds the value of the "started_at" field. + StartedAt *time.Time `json:"started_at,omitempty"` + // FinishedAt holds the value of the "finished_at" field. + FinishedAt *time.Time `json:"finished_at,omitempty"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*UsageCleanupTask) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case usagecleanuptask.FieldFilters: + values[i] = new([]byte) + case usagecleanuptask.FieldID, usagecleanuptask.FieldCreatedBy, usagecleanuptask.FieldDeletedRows, usagecleanuptask.FieldCanceledBy: + values[i] = new(sql.NullInt64) + case usagecleanuptask.FieldStatus, usagecleanuptask.FieldErrorMessage: + values[i] = new(sql.NullString) + case usagecleanuptask.FieldCreatedAt, usagecleanuptask.FieldUpdatedAt, usagecleanuptask.FieldCanceledAt, usagecleanuptask.FieldStartedAt, usagecleanuptask.FieldFinishedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the UsageCleanupTask fields. +func (_m *UsageCleanupTask) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case usagecleanuptask.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case usagecleanuptask.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case usagecleanuptask.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + _m.UpdatedAt = value.Time + } + case usagecleanuptask.FieldStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field status", values[i]) + } else if value.Valid { + _m.Status = value.String + } + case usagecleanuptask.FieldFilters: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field filters", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &_m.Filters); err != nil { + return fmt.Errorf("unmarshal field filters: %w", err) + } + } + case usagecleanuptask.FieldCreatedBy: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field created_by", values[i]) + } else if value.Valid { + _m.CreatedBy = value.Int64 + } + case usagecleanuptask.FieldDeletedRows: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field deleted_rows", values[i]) + } else if value.Valid { + _m.DeletedRows = value.Int64 + } + case usagecleanuptask.FieldErrorMessage: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field error_message", values[i]) + } else if value.Valid { + _m.ErrorMessage = new(string) + *_m.ErrorMessage = value.String + } + case usagecleanuptask.FieldCanceledBy: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field canceled_by", values[i]) + } else if value.Valid { + _m.CanceledBy = new(int64) + *_m.CanceledBy = value.Int64 + } + case usagecleanuptask.FieldCanceledAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field canceled_at", values[i]) + } else if value.Valid { + _m.CanceledAt = new(time.Time) + *_m.CanceledAt = value.Time + } + case usagecleanuptask.FieldStartedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field started_at", values[i]) + } else if value.Valid { + _m.StartedAt = new(time.Time) + *_m.StartedAt = value.Time + } + case usagecleanuptask.FieldFinishedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field finished_at", values[i]) + } else if value.Valid { + _m.FinishedAt = new(time.Time) + *_m.FinishedAt = value.Time + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the UsageCleanupTask. +// This includes values selected through modifiers, order, etc. +func (_m *UsageCleanupTask) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// Update returns a builder for updating this UsageCleanupTask. +// Note that you need to call UsageCleanupTask.Unwrap() before calling this method if this UsageCleanupTask +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *UsageCleanupTask) Update() *UsageCleanupTaskUpdateOne { + return NewUsageCleanupTaskClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the UsageCleanupTask entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *UsageCleanupTask) Unwrap() *UsageCleanupTask { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: UsageCleanupTask is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *UsageCleanupTask) String() string { + var builder strings.Builder + builder.WriteString("UsageCleanupTask(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("status=") + builder.WriteString(_m.Status) + builder.WriteString(", ") + builder.WriteString("filters=") + builder.WriteString(fmt.Sprintf("%v", _m.Filters)) + builder.WriteString(", ") + builder.WriteString("created_by=") + builder.WriteString(fmt.Sprintf("%v", _m.CreatedBy)) + builder.WriteString(", ") + builder.WriteString("deleted_rows=") + builder.WriteString(fmt.Sprintf("%v", _m.DeletedRows)) + builder.WriteString(", ") + if v := _m.ErrorMessage; v != nil { + builder.WriteString("error_message=") + builder.WriteString(*v) + } + builder.WriteString(", ") + if v := _m.CanceledBy; v != nil { + builder.WriteString("canceled_by=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + if v := _m.CanceledAt; v != nil { + builder.WriteString("canceled_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := _m.StartedAt; v != nil { + builder.WriteString("started_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := _m.FinishedAt; v != nil { + builder.WriteString("finished_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteByte(')') + return builder.String() +} + +// UsageCleanupTasks is a parsable slice of UsageCleanupTask. +type UsageCleanupTasks []*UsageCleanupTask diff --git a/backend/ent/usagecleanuptask/usagecleanuptask.go b/backend/ent/usagecleanuptask/usagecleanuptask.go new file mode 100644 index 00000000..a8ddd9a0 --- /dev/null +++ b/backend/ent/usagecleanuptask/usagecleanuptask.go @@ -0,0 +1,137 @@ +// Code generated by ent, DO NOT EDIT. + +package usagecleanuptask + +import ( + "time" + + "entgo.io/ent/dialect/sql" +) + +const ( + // Label holds the string label denoting the usagecleanuptask type in the database. + Label = "usage_cleanup_task" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldStatus holds the string denoting the status field in the database. + FieldStatus = "status" + // FieldFilters holds the string denoting the filters field in the database. + FieldFilters = "filters" + // FieldCreatedBy holds the string denoting the created_by field in the database. + FieldCreatedBy = "created_by" + // FieldDeletedRows holds the string denoting the deleted_rows field in the database. + FieldDeletedRows = "deleted_rows" + // FieldErrorMessage holds the string denoting the error_message field in the database. + FieldErrorMessage = "error_message" + // FieldCanceledBy holds the string denoting the canceled_by field in the database. + FieldCanceledBy = "canceled_by" + // FieldCanceledAt holds the string denoting the canceled_at field in the database. + FieldCanceledAt = "canceled_at" + // FieldStartedAt holds the string denoting the started_at field in the database. + FieldStartedAt = "started_at" + // FieldFinishedAt holds the string denoting the finished_at field in the database. + FieldFinishedAt = "finished_at" + // Table holds the table name of the usagecleanuptask in the database. + Table = "usage_cleanup_tasks" +) + +// Columns holds all SQL columns for usagecleanuptask fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldStatus, + FieldFilters, + FieldCreatedBy, + FieldDeletedRows, + FieldErrorMessage, + FieldCanceledBy, + FieldCanceledAt, + FieldStartedAt, + FieldFinishedAt, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // StatusValidator is a validator for the "status" field. It is called by the builders before save. + StatusValidator func(string) error + // DefaultDeletedRows holds the default value on creation for the "deleted_rows" field. + DefaultDeletedRows int64 +) + +// OrderOption defines the ordering options for the UsageCleanupTask queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByStatus orders the results by the status field. +func ByStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatus, opts...).ToFunc() +} + +// ByCreatedBy orders the results by the created_by field. +func ByCreatedBy(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedBy, opts...).ToFunc() +} + +// ByDeletedRows orders the results by the deleted_rows field. +func ByDeletedRows(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDeletedRows, opts...).ToFunc() +} + +// ByErrorMessage orders the results by the error_message field. +func ByErrorMessage(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldErrorMessage, opts...).ToFunc() +} + +// ByCanceledBy orders the results by the canceled_by field. +func ByCanceledBy(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCanceledBy, opts...).ToFunc() +} + +// ByCanceledAt orders the results by the canceled_at field. +func ByCanceledAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCanceledAt, opts...).ToFunc() +} + +// ByStartedAt orders the results by the started_at field. +func ByStartedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStartedAt, opts...).ToFunc() +} + +// ByFinishedAt orders the results by the finished_at field. +func ByFinishedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldFinishedAt, opts...).ToFunc() +} diff --git a/backend/ent/usagecleanuptask/where.go b/backend/ent/usagecleanuptask/where.go new file mode 100644 index 00000000..99e790ca --- /dev/null +++ b/backend/ent/usagecleanuptask/where.go @@ -0,0 +1,620 @@ +// Code generated by ent, DO NOT EDIT. + +package usagecleanuptask + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLTE(FieldID, id)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// Status applies equality check predicate on the "status" field. It's identical to StatusEQ. +func Status(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldStatus, v)) +} + +// CreatedBy applies equality check predicate on the "created_by" field. It's identical to CreatedByEQ. +func CreatedBy(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldCreatedBy, v)) +} + +// DeletedRows applies equality check predicate on the "deleted_rows" field. It's identical to DeletedRowsEQ. +func DeletedRows(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldDeletedRows, v)) +} + +// ErrorMessage applies equality check predicate on the "error_message" field. It's identical to ErrorMessageEQ. +func ErrorMessage(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldErrorMessage, v)) +} + +// CanceledBy applies equality check predicate on the "canceled_by" field. It's identical to CanceledByEQ. +func CanceledBy(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldCanceledBy, v)) +} + +// CanceledAt applies equality check predicate on the "canceled_at" field. It's identical to CanceledAtEQ. +func CanceledAt(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldCanceledAt, v)) +} + +// StartedAt applies equality check predicate on the "started_at" field. It's identical to StartedAtEQ. +func StartedAt(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldStartedAt, v)) +} + +// FinishedAt applies equality check predicate on the "finished_at" field. It's identical to FinishedAtEQ. +func FinishedAt(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldFinishedAt, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldStatus, v)) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNEQ(FieldStatus, v)) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldIn(FieldStatus, vs...)) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNotIn(FieldStatus, vs...)) +} + +// StatusGT applies the GT predicate on the "status" field. +func StatusGT(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGT(FieldStatus, v)) +} + +// StatusGTE applies the GTE predicate on the "status" field. +func StatusGTE(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGTE(FieldStatus, v)) +} + +// StatusLT applies the LT predicate on the "status" field. +func StatusLT(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLT(FieldStatus, v)) +} + +// StatusLTE applies the LTE predicate on the "status" field. +func StatusLTE(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLTE(FieldStatus, v)) +} + +// StatusContains applies the Contains predicate on the "status" field. +func StatusContains(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldContains(FieldStatus, v)) +} + +// StatusHasPrefix applies the HasPrefix predicate on the "status" field. +func StatusHasPrefix(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldHasPrefix(FieldStatus, v)) +} + +// StatusHasSuffix applies the HasSuffix predicate on the "status" field. +func StatusHasSuffix(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldHasSuffix(FieldStatus, v)) +} + +// StatusEqualFold applies the EqualFold predicate on the "status" field. +func StatusEqualFold(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEqualFold(FieldStatus, v)) +} + +// StatusContainsFold applies the ContainsFold predicate on the "status" field. +func StatusContainsFold(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldContainsFold(FieldStatus, v)) +} + +// CreatedByEQ applies the EQ predicate on the "created_by" field. +func CreatedByEQ(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldCreatedBy, v)) +} + +// CreatedByNEQ applies the NEQ predicate on the "created_by" field. +func CreatedByNEQ(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNEQ(FieldCreatedBy, v)) +} + +// CreatedByIn applies the In predicate on the "created_by" field. +func CreatedByIn(vs ...int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldIn(FieldCreatedBy, vs...)) +} + +// CreatedByNotIn applies the NotIn predicate on the "created_by" field. +func CreatedByNotIn(vs ...int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNotIn(FieldCreatedBy, vs...)) +} + +// CreatedByGT applies the GT predicate on the "created_by" field. +func CreatedByGT(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGT(FieldCreatedBy, v)) +} + +// CreatedByGTE applies the GTE predicate on the "created_by" field. +func CreatedByGTE(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGTE(FieldCreatedBy, v)) +} + +// CreatedByLT applies the LT predicate on the "created_by" field. +func CreatedByLT(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLT(FieldCreatedBy, v)) +} + +// CreatedByLTE applies the LTE predicate on the "created_by" field. +func CreatedByLTE(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLTE(FieldCreatedBy, v)) +} + +// DeletedRowsEQ applies the EQ predicate on the "deleted_rows" field. +func DeletedRowsEQ(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldDeletedRows, v)) +} + +// DeletedRowsNEQ applies the NEQ predicate on the "deleted_rows" field. +func DeletedRowsNEQ(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNEQ(FieldDeletedRows, v)) +} + +// DeletedRowsIn applies the In predicate on the "deleted_rows" field. +func DeletedRowsIn(vs ...int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldIn(FieldDeletedRows, vs...)) +} + +// DeletedRowsNotIn applies the NotIn predicate on the "deleted_rows" field. +func DeletedRowsNotIn(vs ...int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNotIn(FieldDeletedRows, vs...)) +} + +// DeletedRowsGT applies the GT predicate on the "deleted_rows" field. +func DeletedRowsGT(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGT(FieldDeletedRows, v)) +} + +// DeletedRowsGTE applies the GTE predicate on the "deleted_rows" field. +func DeletedRowsGTE(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGTE(FieldDeletedRows, v)) +} + +// DeletedRowsLT applies the LT predicate on the "deleted_rows" field. +func DeletedRowsLT(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLT(FieldDeletedRows, v)) +} + +// DeletedRowsLTE applies the LTE predicate on the "deleted_rows" field. +func DeletedRowsLTE(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLTE(FieldDeletedRows, v)) +} + +// ErrorMessageEQ applies the EQ predicate on the "error_message" field. +func ErrorMessageEQ(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldErrorMessage, v)) +} + +// ErrorMessageNEQ applies the NEQ predicate on the "error_message" field. +func ErrorMessageNEQ(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNEQ(FieldErrorMessage, v)) +} + +// ErrorMessageIn applies the In predicate on the "error_message" field. +func ErrorMessageIn(vs ...string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldIn(FieldErrorMessage, vs...)) +} + +// ErrorMessageNotIn applies the NotIn predicate on the "error_message" field. +func ErrorMessageNotIn(vs ...string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNotIn(FieldErrorMessage, vs...)) +} + +// ErrorMessageGT applies the GT predicate on the "error_message" field. +func ErrorMessageGT(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGT(FieldErrorMessage, v)) +} + +// ErrorMessageGTE applies the GTE predicate on the "error_message" field. +func ErrorMessageGTE(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGTE(FieldErrorMessage, v)) +} + +// ErrorMessageLT applies the LT predicate on the "error_message" field. +func ErrorMessageLT(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLT(FieldErrorMessage, v)) +} + +// ErrorMessageLTE applies the LTE predicate on the "error_message" field. +func ErrorMessageLTE(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLTE(FieldErrorMessage, v)) +} + +// ErrorMessageContains applies the Contains predicate on the "error_message" field. +func ErrorMessageContains(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldContains(FieldErrorMessage, v)) +} + +// ErrorMessageHasPrefix applies the HasPrefix predicate on the "error_message" field. +func ErrorMessageHasPrefix(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldHasPrefix(FieldErrorMessage, v)) +} + +// ErrorMessageHasSuffix applies the HasSuffix predicate on the "error_message" field. +func ErrorMessageHasSuffix(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldHasSuffix(FieldErrorMessage, v)) +} + +// ErrorMessageIsNil applies the IsNil predicate on the "error_message" field. +func ErrorMessageIsNil() predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldIsNull(FieldErrorMessage)) +} + +// ErrorMessageNotNil applies the NotNil predicate on the "error_message" field. +func ErrorMessageNotNil() predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNotNull(FieldErrorMessage)) +} + +// ErrorMessageEqualFold applies the EqualFold predicate on the "error_message" field. +func ErrorMessageEqualFold(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEqualFold(FieldErrorMessage, v)) +} + +// ErrorMessageContainsFold applies the ContainsFold predicate on the "error_message" field. +func ErrorMessageContainsFold(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldContainsFold(FieldErrorMessage, v)) +} + +// CanceledByEQ applies the EQ predicate on the "canceled_by" field. +func CanceledByEQ(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldCanceledBy, v)) +} + +// CanceledByNEQ applies the NEQ predicate on the "canceled_by" field. +func CanceledByNEQ(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNEQ(FieldCanceledBy, v)) +} + +// CanceledByIn applies the In predicate on the "canceled_by" field. +func CanceledByIn(vs ...int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldIn(FieldCanceledBy, vs...)) +} + +// CanceledByNotIn applies the NotIn predicate on the "canceled_by" field. +func CanceledByNotIn(vs ...int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNotIn(FieldCanceledBy, vs...)) +} + +// CanceledByGT applies the GT predicate on the "canceled_by" field. +func CanceledByGT(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGT(FieldCanceledBy, v)) +} + +// CanceledByGTE applies the GTE predicate on the "canceled_by" field. +func CanceledByGTE(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGTE(FieldCanceledBy, v)) +} + +// CanceledByLT applies the LT predicate on the "canceled_by" field. +func CanceledByLT(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLT(FieldCanceledBy, v)) +} + +// CanceledByLTE applies the LTE predicate on the "canceled_by" field. +func CanceledByLTE(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLTE(FieldCanceledBy, v)) +} + +// CanceledByIsNil applies the IsNil predicate on the "canceled_by" field. +func CanceledByIsNil() predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldIsNull(FieldCanceledBy)) +} + +// CanceledByNotNil applies the NotNil predicate on the "canceled_by" field. +func CanceledByNotNil() predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNotNull(FieldCanceledBy)) +} + +// CanceledAtEQ applies the EQ predicate on the "canceled_at" field. +func CanceledAtEQ(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldCanceledAt, v)) +} + +// CanceledAtNEQ applies the NEQ predicate on the "canceled_at" field. +func CanceledAtNEQ(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNEQ(FieldCanceledAt, v)) +} + +// CanceledAtIn applies the In predicate on the "canceled_at" field. +func CanceledAtIn(vs ...time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldIn(FieldCanceledAt, vs...)) +} + +// CanceledAtNotIn applies the NotIn predicate on the "canceled_at" field. +func CanceledAtNotIn(vs ...time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNotIn(FieldCanceledAt, vs...)) +} + +// CanceledAtGT applies the GT predicate on the "canceled_at" field. +func CanceledAtGT(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGT(FieldCanceledAt, v)) +} + +// CanceledAtGTE applies the GTE predicate on the "canceled_at" field. +func CanceledAtGTE(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGTE(FieldCanceledAt, v)) +} + +// CanceledAtLT applies the LT predicate on the "canceled_at" field. +func CanceledAtLT(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLT(FieldCanceledAt, v)) +} + +// CanceledAtLTE applies the LTE predicate on the "canceled_at" field. +func CanceledAtLTE(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLTE(FieldCanceledAt, v)) +} + +// CanceledAtIsNil applies the IsNil predicate on the "canceled_at" field. +func CanceledAtIsNil() predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldIsNull(FieldCanceledAt)) +} + +// CanceledAtNotNil applies the NotNil predicate on the "canceled_at" field. +func CanceledAtNotNil() predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNotNull(FieldCanceledAt)) +} + +// StartedAtEQ applies the EQ predicate on the "started_at" field. +func StartedAtEQ(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldStartedAt, v)) +} + +// StartedAtNEQ applies the NEQ predicate on the "started_at" field. +func StartedAtNEQ(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNEQ(FieldStartedAt, v)) +} + +// StartedAtIn applies the In predicate on the "started_at" field. +func StartedAtIn(vs ...time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldIn(FieldStartedAt, vs...)) +} + +// StartedAtNotIn applies the NotIn predicate on the "started_at" field. +func StartedAtNotIn(vs ...time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNotIn(FieldStartedAt, vs...)) +} + +// StartedAtGT applies the GT predicate on the "started_at" field. +func StartedAtGT(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGT(FieldStartedAt, v)) +} + +// StartedAtGTE applies the GTE predicate on the "started_at" field. +func StartedAtGTE(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGTE(FieldStartedAt, v)) +} + +// StartedAtLT applies the LT predicate on the "started_at" field. +func StartedAtLT(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLT(FieldStartedAt, v)) +} + +// StartedAtLTE applies the LTE predicate on the "started_at" field. +func StartedAtLTE(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLTE(FieldStartedAt, v)) +} + +// StartedAtIsNil applies the IsNil predicate on the "started_at" field. +func StartedAtIsNil() predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldIsNull(FieldStartedAt)) +} + +// StartedAtNotNil applies the NotNil predicate on the "started_at" field. +func StartedAtNotNil() predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNotNull(FieldStartedAt)) +} + +// FinishedAtEQ applies the EQ predicate on the "finished_at" field. +func FinishedAtEQ(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldFinishedAt, v)) +} + +// FinishedAtNEQ applies the NEQ predicate on the "finished_at" field. +func FinishedAtNEQ(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNEQ(FieldFinishedAt, v)) +} + +// FinishedAtIn applies the In predicate on the "finished_at" field. +func FinishedAtIn(vs ...time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldIn(FieldFinishedAt, vs...)) +} + +// FinishedAtNotIn applies the NotIn predicate on the "finished_at" field. +func FinishedAtNotIn(vs ...time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNotIn(FieldFinishedAt, vs...)) +} + +// FinishedAtGT applies the GT predicate on the "finished_at" field. +func FinishedAtGT(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGT(FieldFinishedAt, v)) +} + +// FinishedAtGTE applies the GTE predicate on the "finished_at" field. +func FinishedAtGTE(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGTE(FieldFinishedAt, v)) +} + +// FinishedAtLT applies the LT predicate on the "finished_at" field. +func FinishedAtLT(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLT(FieldFinishedAt, v)) +} + +// FinishedAtLTE applies the LTE predicate on the "finished_at" field. +func FinishedAtLTE(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLTE(FieldFinishedAt, v)) +} + +// FinishedAtIsNil applies the IsNil predicate on the "finished_at" field. +func FinishedAtIsNil() predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldIsNull(FieldFinishedAt)) +} + +// FinishedAtNotNil applies the NotNil predicate on the "finished_at" field. +func FinishedAtNotNil() predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNotNull(FieldFinishedAt)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.UsageCleanupTask) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.UsageCleanupTask) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.UsageCleanupTask) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.NotPredicates(p)) +} diff --git a/backend/ent/usagecleanuptask_create.go b/backend/ent/usagecleanuptask_create.go new file mode 100644 index 00000000..0b1dcff5 --- /dev/null +++ b/backend/ent/usagecleanuptask_create.go @@ -0,0 +1,1190 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/usagecleanuptask" +) + +// UsageCleanupTaskCreate is the builder for creating a UsageCleanupTask entity. +type UsageCleanupTaskCreate struct { + config + mutation *UsageCleanupTaskMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCreatedAt sets the "created_at" field. +func (_c *UsageCleanupTaskCreate) SetCreatedAt(v time.Time) *UsageCleanupTaskCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *UsageCleanupTaskCreate) SetNillableCreatedAt(v *time.Time) *UsageCleanupTaskCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetUpdatedAt sets the "updated_at" field. +func (_c *UsageCleanupTaskCreate) SetUpdatedAt(v time.Time) *UsageCleanupTaskCreate { + _c.mutation.SetUpdatedAt(v) + return _c +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (_c *UsageCleanupTaskCreate) SetNillableUpdatedAt(v *time.Time) *UsageCleanupTaskCreate { + if v != nil { + _c.SetUpdatedAt(*v) + } + return _c +} + +// SetStatus sets the "status" field. +func (_c *UsageCleanupTaskCreate) SetStatus(v string) *UsageCleanupTaskCreate { + _c.mutation.SetStatus(v) + return _c +} + +// SetFilters sets the "filters" field. +func (_c *UsageCleanupTaskCreate) SetFilters(v json.RawMessage) *UsageCleanupTaskCreate { + _c.mutation.SetFilters(v) + return _c +} + +// SetCreatedBy sets the "created_by" field. +func (_c *UsageCleanupTaskCreate) SetCreatedBy(v int64) *UsageCleanupTaskCreate { + _c.mutation.SetCreatedBy(v) + return _c +} + +// SetDeletedRows sets the "deleted_rows" field. +func (_c *UsageCleanupTaskCreate) SetDeletedRows(v int64) *UsageCleanupTaskCreate { + _c.mutation.SetDeletedRows(v) + return _c +} + +// SetNillableDeletedRows sets the "deleted_rows" field if the given value is not nil. +func (_c *UsageCleanupTaskCreate) SetNillableDeletedRows(v *int64) *UsageCleanupTaskCreate { + if v != nil { + _c.SetDeletedRows(*v) + } + return _c +} + +// SetErrorMessage sets the "error_message" field. +func (_c *UsageCleanupTaskCreate) SetErrorMessage(v string) *UsageCleanupTaskCreate { + _c.mutation.SetErrorMessage(v) + return _c +} + +// SetNillableErrorMessage sets the "error_message" field if the given value is not nil. +func (_c *UsageCleanupTaskCreate) SetNillableErrorMessage(v *string) *UsageCleanupTaskCreate { + if v != nil { + _c.SetErrorMessage(*v) + } + return _c +} + +// SetCanceledBy sets the "canceled_by" field. +func (_c *UsageCleanupTaskCreate) SetCanceledBy(v int64) *UsageCleanupTaskCreate { + _c.mutation.SetCanceledBy(v) + return _c +} + +// SetNillableCanceledBy sets the "canceled_by" field if the given value is not nil. +func (_c *UsageCleanupTaskCreate) SetNillableCanceledBy(v *int64) *UsageCleanupTaskCreate { + if v != nil { + _c.SetCanceledBy(*v) + } + return _c +} + +// SetCanceledAt sets the "canceled_at" field. +func (_c *UsageCleanupTaskCreate) SetCanceledAt(v time.Time) *UsageCleanupTaskCreate { + _c.mutation.SetCanceledAt(v) + return _c +} + +// SetNillableCanceledAt sets the "canceled_at" field if the given value is not nil. +func (_c *UsageCleanupTaskCreate) SetNillableCanceledAt(v *time.Time) *UsageCleanupTaskCreate { + if v != nil { + _c.SetCanceledAt(*v) + } + return _c +} + +// SetStartedAt sets the "started_at" field. +func (_c *UsageCleanupTaskCreate) SetStartedAt(v time.Time) *UsageCleanupTaskCreate { + _c.mutation.SetStartedAt(v) + return _c +} + +// SetNillableStartedAt sets the "started_at" field if the given value is not nil. +func (_c *UsageCleanupTaskCreate) SetNillableStartedAt(v *time.Time) *UsageCleanupTaskCreate { + if v != nil { + _c.SetStartedAt(*v) + } + return _c +} + +// SetFinishedAt sets the "finished_at" field. +func (_c *UsageCleanupTaskCreate) SetFinishedAt(v time.Time) *UsageCleanupTaskCreate { + _c.mutation.SetFinishedAt(v) + return _c +} + +// SetNillableFinishedAt sets the "finished_at" field if the given value is not nil. +func (_c *UsageCleanupTaskCreate) SetNillableFinishedAt(v *time.Time) *UsageCleanupTaskCreate { + if v != nil { + _c.SetFinishedAt(*v) + } + return _c +} + +// Mutation returns the UsageCleanupTaskMutation object of the builder. +func (_c *UsageCleanupTaskCreate) Mutation() *UsageCleanupTaskMutation { + return _c.mutation +} + +// Save creates the UsageCleanupTask in the database. +func (_c *UsageCleanupTaskCreate) Save(ctx context.Context) (*UsageCleanupTask, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *UsageCleanupTaskCreate) SaveX(ctx context.Context) *UsageCleanupTask { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *UsageCleanupTaskCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *UsageCleanupTaskCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *UsageCleanupTaskCreate) defaults() { + if _, ok := _c.mutation.CreatedAt(); !ok { + v := usagecleanuptask.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + v := usagecleanuptask.DefaultUpdatedAt() + _c.mutation.SetUpdatedAt(v) + } + if _, ok := _c.mutation.DeletedRows(); !ok { + v := usagecleanuptask.DefaultDeletedRows + _c.mutation.SetDeletedRows(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *UsageCleanupTaskCreate) check() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "UsageCleanupTask.created_at"`)} + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "UsageCleanupTask.updated_at"`)} + } + if _, ok := _c.mutation.Status(); !ok { + return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "UsageCleanupTask.status"`)} + } + if v, ok := _c.mutation.Status(); ok { + if err := usagecleanuptask.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "UsageCleanupTask.status": %w`, err)} + } + } + if _, ok := _c.mutation.Filters(); !ok { + return &ValidationError{Name: "filters", err: errors.New(`ent: missing required field "UsageCleanupTask.filters"`)} + } + if _, ok := _c.mutation.CreatedBy(); !ok { + return &ValidationError{Name: "created_by", err: errors.New(`ent: missing required field "UsageCleanupTask.created_by"`)} + } + if _, ok := _c.mutation.DeletedRows(); !ok { + return &ValidationError{Name: "deleted_rows", err: errors.New(`ent: missing required field "UsageCleanupTask.deleted_rows"`)} + } + return nil +} + +func (_c *UsageCleanupTaskCreate) sqlSave(ctx context.Context) (*UsageCleanupTask, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *UsageCleanupTaskCreate) createSpec() (*UsageCleanupTask, *sqlgraph.CreateSpec) { + var ( + _node = &UsageCleanupTask{config: _c.config} + _spec = sqlgraph.NewCreateSpec(usagecleanuptask.Table, sqlgraph.NewFieldSpec(usagecleanuptask.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(usagecleanuptask.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.UpdatedAt(); ok { + _spec.SetField(usagecleanuptask.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if value, ok := _c.mutation.Status(); ok { + _spec.SetField(usagecleanuptask.FieldStatus, field.TypeString, value) + _node.Status = value + } + if value, ok := _c.mutation.Filters(); ok { + _spec.SetField(usagecleanuptask.FieldFilters, field.TypeJSON, value) + _node.Filters = value + } + if value, ok := _c.mutation.CreatedBy(); ok { + _spec.SetField(usagecleanuptask.FieldCreatedBy, field.TypeInt64, value) + _node.CreatedBy = value + } + if value, ok := _c.mutation.DeletedRows(); ok { + _spec.SetField(usagecleanuptask.FieldDeletedRows, field.TypeInt64, value) + _node.DeletedRows = value + } + if value, ok := _c.mutation.ErrorMessage(); ok { + _spec.SetField(usagecleanuptask.FieldErrorMessage, field.TypeString, value) + _node.ErrorMessage = &value + } + if value, ok := _c.mutation.CanceledBy(); ok { + _spec.SetField(usagecleanuptask.FieldCanceledBy, field.TypeInt64, value) + _node.CanceledBy = &value + } + if value, ok := _c.mutation.CanceledAt(); ok { + _spec.SetField(usagecleanuptask.FieldCanceledAt, field.TypeTime, value) + _node.CanceledAt = &value + } + if value, ok := _c.mutation.StartedAt(); ok { + _spec.SetField(usagecleanuptask.FieldStartedAt, field.TypeTime, value) + _node.StartedAt = &value + } + if value, ok := _c.mutation.FinishedAt(); ok { + _spec.SetField(usagecleanuptask.FieldFinishedAt, field.TypeTime, value) + _node.FinishedAt = &value + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.UsageCleanupTask.Create(). +// SetCreatedAt(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.UsageCleanupTaskUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *UsageCleanupTaskCreate) OnConflict(opts ...sql.ConflictOption) *UsageCleanupTaskUpsertOne { + _c.conflict = opts + return &UsageCleanupTaskUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.UsageCleanupTask.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *UsageCleanupTaskCreate) OnConflictColumns(columns ...string) *UsageCleanupTaskUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &UsageCleanupTaskUpsertOne{ + create: _c, + } +} + +type ( + // UsageCleanupTaskUpsertOne is the builder for "upsert"-ing + // one UsageCleanupTask node. + UsageCleanupTaskUpsertOne struct { + create *UsageCleanupTaskCreate + } + + // UsageCleanupTaskUpsert is the "OnConflict" setter. + UsageCleanupTaskUpsert struct { + *sql.UpdateSet + } +) + +// SetUpdatedAt sets the "updated_at" field. +func (u *UsageCleanupTaskUpsert) SetUpdatedAt(v time.Time) *UsageCleanupTaskUpsert { + u.Set(usagecleanuptask.FieldUpdatedAt, v) + return u +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsert) UpdateUpdatedAt() *UsageCleanupTaskUpsert { + u.SetExcluded(usagecleanuptask.FieldUpdatedAt) + return u +} + +// SetStatus sets the "status" field. +func (u *UsageCleanupTaskUpsert) SetStatus(v string) *UsageCleanupTaskUpsert { + u.Set(usagecleanuptask.FieldStatus, v) + return u +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsert) UpdateStatus() *UsageCleanupTaskUpsert { + u.SetExcluded(usagecleanuptask.FieldStatus) + return u +} + +// SetFilters sets the "filters" field. +func (u *UsageCleanupTaskUpsert) SetFilters(v json.RawMessage) *UsageCleanupTaskUpsert { + u.Set(usagecleanuptask.FieldFilters, v) + return u +} + +// UpdateFilters sets the "filters" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsert) UpdateFilters() *UsageCleanupTaskUpsert { + u.SetExcluded(usagecleanuptask.FieldFilters) + return u +} + +// SetCreatedBy sets the "created_by" field. +func (u *UsageCleanupTaskUpsert) SetCreatedBy(v int64) *UsageCleanupTaskUpsert { + u.Set(usagecleanuptask.FieldCreatedBy, v) + return u +} + +// UpdateCreatedBy sets the "created_by" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsert) UpdateCreatedBy() *UsageCleanupTaskUpsert { + u.SetExcluded(usagecleanuptask.FieldCreatedBy) + return u +} + +// AddCreatedBy adds v to the "created_by" field. +func (u *UsageCleanupTaskUpsert) AddCreatedBy(v int64) *UsageCleanupTaskUpsert { + u.Add(usagecleanuptask.FieldCreatedBy, v) + return u +} + +// SetDeletedRows sets the "deleted_rows" field. +func (u *UsageCleanupTaskUpsert) SetDeletedRows(v int64) *UsageCleanupTaskUpsert { + u.Set(usagecleanuptask.FieldDeletedRows, v) + return u +} + +// UpdateDeletedRows sets the "deleted_rows" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsert) UpdateDeletedRows() *UsageCleanupTaskUpsert { + u.SetExcluded(usagecleanuptask.FieldDeletedRows) + return u +} + +// AddDeletedRows adds v to the "deleted_rows" field. +func (u *UsageCleanupTaskUpsert) AddDeletedRows(v int64) *UsageCleanupTaskUpsert { + u.Add(usagecleanuptask.FieldDeletedRows, v) + return u +} + +// SetErrorMessage sets the "error_message" field. +func (u *UsageCleanupTaskUpsert) SetErrorMessage(v string) *UsageCleanupTaskUpsert { + u.Set(usagecleanuptask.FieldErrorMessage, v) + return u +} + +// UpdateErrorMessage sets the "error_message" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsert) UpdateErrorMessage() *UsageCleanupTaskUpsert { + u.SetExcluded(usagecleanuptask.FieldErrorMessage) + return u +} + +// ClearErrorMessage clears the value of the "error_message" field. +func (u *UsageCleanupTaskUpsert) ClearErrorMessage() *UsageCleanupTaskUpsert { + u.SetNull(usagecleanuptask.FieldErrorMessage) + return u +} + +// SetCanceledBy sets the "canceled_by" field. +func (u *UsageCleanupTaskUpsert) SetCanceledBy(v int64) *UsageCleanupTaskUpsert { + u.Set(usagecleanuptask.FieldCanceledBy, v) + return u +} + +// UpdateCanceledBy sets the "canceled_by" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsert) UpdateCanceledBy() *UsageCleanupTaskUpsert { + u.SetExcluded(usagecleanuptask.FieldCanceledBy) + return u +} + +// AddCanceledBy adds v to the "canceled_by" field. +func (u *UsageCleanupTaskUpsert) AddCanceledBy(v int64) *UsageCleanupTaskUpsert { + u.Add(usagecleanuptask.FieldCanceledBy, v) + return u +} + +// ClearCanceledBy clears the value of the "canceled_by" field. +func (u *UsageCleanupTaskUpsert) ClearCanceledBy() *UsageCleanupTaskUpsert { + u.SetNull(usagecleanuptask.FieldCanceledBy) + return u +} + +// SetCanceledAt sets the "canceled_at" field. +func (u *UsageCleanupTaskUpsert) SetCanceledAt(v time.Time) *UsageCleanupTaskUpsert { + u.Set(usagecleanuptask.FieldCanceledAt, v) + return u +} + +// UpdateCanceledAt sets the "canceled_at" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsert) UpdateCanceledAt() *UsageCleanupTaskUpsert { + u.SetExcluded(usagecleanuptask.FieldCanceledAt) + return u +} + +// ClearCanceledAt clears the value of the "canceled_at" field. +func (u *UsageCleanupTaskUpsert) ClearCanceledAt() *UsageCleanupTaskUpsert { + u.SetNull(usagecleanuptask.FieldCanceledAt) + return u +} + +// SetStartedAt sets the "started_at" field. +func (u *UsageCleanupTaskUpsert) SetStartedAt(v time.Time) *UsageCleanupTaskUpsert { + u.Set(usagecleanuptask.FieldStartedAt, v) + return u +} + +// UpdateStartedAt sets the "started_at" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsert) UpdateStartedAt() *UsageCleanupTaskUpsert { + u.SetExcluded(usagecleanuptask.FieldStartedAt) + return u +} + +// ClearStartedAt clears the value of the "started_at" field. +func (u *UsageCleanupTaskUpsert) ClearStartedAt() *UsageCleanupTaskUpsert { + u.SetNull(usagecleanuptask.FieldStartedAt) + return u +} + +// SetFinishedAt sets the "finished_at" field. +func (u *UsageCleanupTaskUpsert) SetFinishedAt(v time.Time) *UsageCleanupTaskUpsert { + u.Set(usagecleanuptask.FieldFinishedAt, v) + return u +} + +// UpdateFinishedAt sets the "finished_at" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsert) UpdateFinishedAt() *UsageCleanupTaskUpsert { + u.SetExcluded(usagecleanuptask.FieldFinishedAt) + return u +} + +// ClearFinishedAt clears the value of the "finished_at" field. +func (u *UsageCleanupTaskUpsert) ClearFinishedAt() *UsageCleanupTaskUpsert { + u.SetNull(usagecleanuptask.FieldFinishedAt) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.UsageCleanupTask.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *UsageCleanupTaskUpsertOne) UpdateNewValues() *UsageCleanupTaskUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(usagecleanuptask.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.UsageCleanupTask.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *UsageCleanupTaskUpsertOne) Ignore() *UsageCleanupTaskUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *UsageCleanupTaskUpsertOne) DoNothing() *UsageCleanupTaskUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the UsageCleanupTaskCreate.OnConflict +// documentation for more info. +func (u *UsageCleanupTaskUpsertOne) Update(set func(*UsageCleanupTaskUpsert)) *UsageCleanupTaskUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&UsageCleanupTaskUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *UsageCleanupTaskUpsertOne) SetUpdatedAt(v time.Time) *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertOne) UpdateUpdatedAt() *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetStatus sets the "status" field. +func (u *UsageCleanupTaskUpsertOne) SetStatus(v string) *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertOne) UpdateStatus() *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateStatus() + }) +} + +// SetFilters sets the "filters" field. +func (u *UsageCleanupTaskUpsertOne) SetFilters(v json.RawMessage) *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetFilters(v) + }) +} + +// UpdateFilters sets the "filters" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertOne) UpdateFilters() *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateFilters() + }) +} + +// SetCreatedBy sets the "created_by" field. +func (u *UsageCleanupTaskUpsertOne) SetCreatedBy(v int64) *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetCreatedBy(v) + }) +} + +// AddCreatedBy adds v to the "created_by" field. +func (u *UsageCleanupTaskUpsertOne) AddCreatedBy(v int64) *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.AddCreatedBy(v) + }) +} + +// UpdateCreatedBy sets the "created_by" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertOne) UpdateCreatedBy() *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateCreatedBy() + }) +} + +// SetDeletedRows sets the "deleted_rows" field. +func (u *UsageCleanupTaskUpsertOne) SetDeletedRows(v int64) *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetDeletedRows(v) + }) +} + +// AddDeletedRows adds v to the "deleted_rows" field. +func (u *UsageCleanupTaskUpsertOne) AddDeletedRows(v int64) *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.AddDeletedRows(v) + }) +} + +// UpdateDeletedRows sets the "deleted_rows" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertOne) UpdateDeletedRows() *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateDeletedRows() + }) +} + +// SetErrorMessage sets the "error_message" field. +func (u *UsageCleanupTaskUpsertOne) SetErrorMessage(v string) *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetErrorMessage(v) + }) +} + +// UpdateErrorMessage sets the "error_message" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertOne) UpdateErrorMessage() *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateErrorMessage() + }) +} + +// ClearErrorMessage clears the value of the "error_message" field. +func (u *UsageCleanupTaskUpsertOne) ClearErrorMessage() *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.ClearErrorMessage() + }) +} + +// SetCanceledBy sets the "canceled_by" field. +func (u *UsageCleanupTaskUpsertOne) SetCanceledBy(v int64) *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetCanceledBy(v) + }) +} + +// AddCanceledBy adds v to the "canceled_by" field. +func (u *UsageCleanupTaskUpsertOne) AddCanceledBy(v int64) *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.AddCanceledBy(v) + }) +} + +// UpdateCanceledBy sets the "canceled_by" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertOne) UpdateCanceledBy() *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateCanceledBy() + }) +} + +// ClearCanceledBy clears the value of the "canceled_by" field. +func (u *UsageCleanupTaskUpsertOne) ClearCanceledBy() *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.ClearCanceledBy() + }) +} + +// SetCanceledAt sets the "canceled_at" field. +func (u *UsageCleanupTaskUpsertOne) SetCanceledAt(v time.Time) *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetCanceledAt(v) + }) +} + +// UpdateCanceledAt sets the "canceled_at" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertOne) UpdateCanceledAt() *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateCanceledAt() + }) +} + +// ClearCanceledAt clears the value of the "canceled_at" field. +func (u *UsageCleanupTaskUpsertOne) ClearCanceledAt() *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.ClearCanceledAt() + }) +} + +// SetStartedAt sets the "started_at" field. +func (u *UsageCleanupTaskUpsertOne) SetStartedAt(v time.Time) *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetStartedAt(v) + }) +} + +// UpdateStartedAt sets the "started_at" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertOne) UpdateStartedAt() *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateStartedAt() + }) +} + +// ClearStartedAt clears the value of the "started_at" field. +func (u *UsageCleanupTaskUpsertOne) ClearStartedAt() *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.ClearStartedAt() + }) +} + +// SetFinishedAt sets the "finished_at" field. +func (u *UsageCleanupTaskUpsertOne) SetFinishedAt(v time.Time) *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetFinishedAt(v) + }) +} + +// UpdateFinishedAt sets the "finished_at" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertOne) UpdateFinishedAt() *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateFinishedAt() + }) +} + +// ClearFinishedAt clears the value of the "finished_at" field. +func (u *UsageCleanupTaskUpsertOne) ClearFinishedAt() *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.ClearFinishedAt() + }) +} + +// Exec executes the query. +func (u *UsageCleanupTaskUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for UsageCleanupTaskCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *UsageCleanupTaskUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *UsageCleanupTaskUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *UsageCleanupTaskUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// UsageCleanupTaskCreateBulk is the builder for creating many UsageCleanupTask entities in bulk. +type UsageCleanupTaskCreateBulk struct { + config + err error + builders []*UsageCleanupTaskCreate + conflict []sql.ConflictOption +} + +// Save creates the UsageCleanupTask entities in the database. +func (_c *UsageCleanupTaskCreateBulk) Save(ctx context.Context) ([]*UsageCleanupTask, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*UsageCleanupTask, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*UsageCleanupTaskMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *UsageCleanupTaskCreateBulk) SaveX(ctx context.Context) []*UsageCleanupTask { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *UsageCleanupTaskCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *UsageCleanupTaskCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.UsageCleanupTask.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.UsageCleanupTaskUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *UsageCleanupTaskCreateBulk) OnConflict(opts ...sql.ConflictOption) *UsageCleanupTaskUpsertBulk { + _c.conflict = opts + return &UsageCleanupTaskUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.UsageCleanupTask.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *UsageCleanupTaskCreateBulk) OnConflictColumns(columns ...string) *UsageCleanupTaskUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &UsageCleanupTaskUpsertBulk{ + create: _c, + } +} + +// UsageCleanupTaskUpsertBulk is the builder for "upsert"-ing +// a bulk of UsageCleanupTask nodes. +type UsageCleanupTaskUpsertBulk struct { + create *UsageCleanupTaskCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.UsageCleanupTask.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *UsageCleanupTaskUpsertBulk) UpdateNewValues() *UsageCleanupTaskUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(usagecleanuptask.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.UsageCleanupTask.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *UsageCleanupTaskUpsertBulk) Ignore() *UsageCleanupTaskUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *UsageCleanupTaskUpsertBulk) DoNothing() *UsageCleanupTaskUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the UsageCleanupTaskCreateBulk.OnConflict +// documentation for more info. +func (u *UsageCleanupTaskUpsertBulk) Update(set func(*UsageCleanupTaskUpsert)) *UsageCleanupTaskUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&UsageCleanupTaskUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *UsageCleanupTaskUpsertBulk) SetUpdatedAt(v time.Time) *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertBulk) UpdateUpdatedAt() *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetStatus sets the "status" field. +func (u *UsageCleanupTaskUpsertBulk) SetStatus(v string) *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertBulk) UpdateStatus() *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateStatus() + }) +} + +// SetFilters sets the "filters" field. +func (u *UsageCleanupTaskUpsertBulk) SetFilters(v json.RawMessage) *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetFilters(v) + }) +} + +// UpdateFilters sets the "filters" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertBulk) UpdateFilters() *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateFilters() + }) +} + +// SetCreatedBy sets the "created_by" field. +func (u *UsageCleanupTaskUpsertBulk) SetCreatedBy(v int64) *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetCreatedBy(v) + }) +} + +// AddCreatedBy adds v to the "created_by" field. +func (u *UsageCleanupTaskUpsertBulk) AddCreatedBy(v int64) *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.AddCreatedBy(v) + }) +} + +// UpdateCreatedBy sets the "created_by" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertBulk) UpdateCreatedBy() *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateCreatedBy() + }) +} + +// SetDeletedRows sets the "deleted_rows" field. +func (u *UsageCleanupTaskUpsertBulk) SetDeletedRows(v int64) *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetDeletedRows(v) + }) +} + +// AddDeletedRows adds v to the "deleted_rows" field. +func (u *UsageCleanupTaskUpsertBulk) AddDeletedRows(v int64) *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.AddDeletedRows(v) + }) +} + +// UpdateDeletedRows sets the "deleted_rows" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertBulk) UpdateDeletedRows() *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateDeletedRows() + }) +} + +// SetErrorMessage sets the "error_message" field. +func (u *UsageCleanupTaskUpsertBulk) SetErrorMessage(v string) *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetErrorMessage(v) + }) +} + +// UpdateErrorMessage sets the "error_message" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertBulk) UpdateErrorMessage() *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateErrorMessage() + }) +} + +// ClearErrorMessage clears the value of the "error_message" field. +func (u *UsageCleanupTaskUpsertBulk) ClearErrorMessage() *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.ClearErrorMessage() + }) +} + +// SetCanceledBy sets the "canceled_by" field. +func (u *UsageCleanupTaskUpsertBulk) SetCanceledBy(v int64) *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetCanceledBy(v) + }) +} + +// AddCanceledBy adds v to the "canceled_by" field. +func (u *UsageCleanupTaskUpsertBulk) AddCanceledBy(v int64) *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.AddCanceledBy(v) + }) +} + +// UpdateCanceledBy sets the "canceled_by" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertBulk) UpdateCanceledBy() *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateCanceledBy() + }) +} + +// ClearCanceledBy clears the value of the "canceled_by" field. +func (u *UsageCleanupTaskUpsertBulk) ClearCanceledBy() *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.ClearCanceledBy() + }) +} + +// SetCanceledAt sets the "canceled_at" field. +func (u *UsageCleanupTaskUpsertBulk) SetCanceledAt(v time.Time) *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetCanceledAt(v) + }) +} + +// UpdateCanceledAt sets the "canceled_at" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertBulk) UpdateCanceledAt() *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateCanceledAt() + }) +} + +// ClearCanceledAt clears the value of the "canceled_at" field. +func (u *UsageCleanupTaskUpsertBulk) ClearCanceledAt() *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.ClearCanceledAt() + }) +} + +// SetStartedAt sets the "started_at" field. +func (u *UsageCleanupTaskUpsertBulk) SetStartedAt(v time.Time) *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetStartedAt(v) + }) +} + +// UpdateStartedAt sets the "started_at" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertBulk) UpdateStartedAt() *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateStartedAt() + }) +} + +// ClearStartedAt clears the value of the "started_at" field. +func (u *UsageCleanupTaskUpsertBulk) ClearStartedAt() *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.ClearStartedAt() + }) +} + +// SetFinishedAt sets the "finished_at" field. +func (u *UsageCleanupTaskUpsertBulk) SetFinishedAt(v time.Time) *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetFinishedAt(v) + }) +} + +// UpdateFinishedAt sets the "finished_at" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertBulk) UpdateFinishedAt() *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateFinishedAt() + }) +} + +// ClearFinishedAt clears the value of the "finished_at" field. +func (u *UsageCleanupTaskUpsertBulk) ClearFinishedAt() *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.ClearFinishedAt() + }) +} + +// Exec executes the query. +func (u *UsageCleanupTaskUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the UsageCleanupTaskCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for UsageCleanupTaskCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *UsageCleanupTaskUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/usagecleanuptask_delete.go b/backend/ent/usagecleanuptask_delete.go new file mode 100644 index 00000000..158555f7 --- /dev/null +++ b/backend/ent/usagecleanuptask_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/usagecleanuptask" +) + +// UsageCleanupTaskDelete is the builder for deleting a UsageCleanupTask entity. +type UsageCleanupTaskDelete struct { + config + hooks []Hook + mutation *UsageCleanupTaskMutation +} + +// Where appends a list predicates to the UsageCleanupTaskDelete builder. +func (_d *UsageCleanupTaskDelete) Where(ps ...predicate.UsageCleanupTask) *UsageCleanupTaskDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *UsageCleanupTaskDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *UsageCleanupTaskDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *UsageCleanupTaskDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(usagecleanuptask.Table, sqlgraph.NewFieldSpec(usagecleanuptask.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// UsageCleanupTaskDeleteOne is the builder for deleting a single UsageCleanupTask entity. +type UsageCleanupTaskDeleteOne struct { + _d *UsageCleanupTaskDelete +} + +// Where appends a list predicates to the UsageCleanupTaskDelete builder. +func (_d *UsageCleanupTaskDeleteOne) Where(ps ...predicate.UsageCleanupTask) *UsageCleanupTaskDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *UsageCleanupTaskDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{usagecleanuptask.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *UsageCleanupTaskDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/usagecleanuptask_query.go b/backend/ent/usagecleanuptask_query.go new file mode 100644 index 00000000..9d8d5410 --- /dev/null +++ b/backend/ent/usagecleanuptask_query.go @@ -0,0 +1,564 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/usagecleanuptask" +) + +// UsageCleanupTaskQuery is the builder for querying UsageCleanupTask entities. +type UsageCleanupTaskQuery struct { + config + ctx *QueryContext + order []usagecleanuptask.OrderOption + inters []Interceptor + predicates []predicate.UsageCleanupTask + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the UsageCleanupTaskQuery builder. +func (_q *UsageCleanupTaskQuery) Where(ps ...predicate.UsageCleanupTask) *UsageCleanupTaskQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *UsageCleanupTaskQuery) Limit(limit int) *UsageCleanupTaskQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *UsageCleanupTaskQuery) Offset(offset int) *UsageCleanupTaskQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *UsageCleanupTaskQuery) Unique(unique bool) *UsageCleanupTaskQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *UsageCleanupTaskQuery) Order(o ...usagecleanuptask.OrderOption) *UsageCleanupTaskQuery { + _q.order = append(_q.order, o...) + return _q +} + +// First returns the first UsageCleanupTask entity from the query. +// Returns a *NotFoundError when no UsageCleanupTask was found. +func (_q *UsageCleanupTaskQuery) First(ctx context.Context) (*UsageCleanupTask, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{usagecleanuptask.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *UsageCleanupTaskQuery) FirstX(ctx context.Context) *UsageCleanupTask { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first UsageCleanupTask ID from the query. +// Returns a *NotFoundError when no UsageCleanupTask ID was found. +func (_q *UsageCleanupTaskQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{usagecleanuptask.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *UsageCleanupTaskQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single UsageCleanupTask entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one UsageCleanupTask entity is found. +// Returns a *NotFoundError when no UsageCleanupTask entities are found. +func (_q *UsageCleanupTaskQuery) Only(ctx context.Context) (*UsageCleanupTask, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{usagecleanuptask.Label} + default: + return nil, &NotSingularError{usagecleanuptask.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *UsageCleanupTaskQuery) OnlyX(ctx context.Context) *UsageCleanupTask { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only UsageCleanupTask ID in the query. +// Returns a *NotSingularError when more than one UsageCleanupTask ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *UsageCleanupTaskQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{usagecleanuptask.Label} + default: + err = &NotSingularError{usagecleanuptask.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *UsageCleanupTaskQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of UsageCleanupTasks. +func (_q *UsageCleanupTaskQuery) All(ctx context.Context) ([]*UsageCleanupTask, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*UsageCleanupTask, *UsageCleanupTaskQuery]() + return withInterceptors[[]*UsageCleanupTask](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *UsageCleanupTaskQuery) AllX(ctx context.Context) []*UsageCleanupTask { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of UsageCleanupTask IDs. +func (_q *UsageCleanupTaskQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(usagecleanuptask.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *UsageCleanupTaskQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *UsageCleanupTaskQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*UsageCleanupTaskQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *UsageCleanupTaskQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *UsageCleanupTaskQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *UsageCleanupTaskQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the UsageCleanupTaskQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *UsageCleanupTaskQuery) Clone() *UsageCleanupTaskQuery { + if _q == nil { + return nil + } + return &UsageCleanupTaskQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]usagecleanuptask.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.UsageCleanupTask{}, _q.predicates...), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.UsageCleanupTask.Query(). +// GroupBy(usagecleanuptask.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *UsageCleanupTaskQuery) GroupBy(field string, fields ...string) *UsageCleanupTaskGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &UsageCleanupTaskGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = usagecleanuptask.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.UsageCleanupTask.Query(). +// Select(usagecleanuptask.FieldCreatedAt). +// Scan(ctx, &v) +func (_q *UsageCleanupTaskQuery) Select(fields ...string) *UsageCleanupTaskSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &UsageCleanupTaskSelect{UsageCleanupTaskQuery: _q} + sbuild.label = usagecleanuptask.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a UsageCleanupTaskSelect configured with the given aggregations. +func (_q *UsageCleanupTaskQuery) Aggregate(fns ...AggregateFunc) *UsageCleanupTaskSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *UsageCleanupTaskQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !usagecleanuptask.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *UsageCleanupTaskQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*UsageCleanupTask, error) { + var ( + nodes = []*UsageCleanupTask{} + _spec = _q.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*UsageCleanupTask).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &UsageCleanupTask{config: _q.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (_q *UsageCleanupTaskQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *UsageCleanupTaskQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(usagecleanuptask.Table, usagecleanuptask.Columns, sqlgraph.NewFieldSpec(usagecleanuptask.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, usagecleanuptask.FieldID) + for i := range fields { + if fields[i] != usagecleanuptask.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *UsageCleanupTaskQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(usagecleanuptask.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = usagecleanuptask.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, m := range _q.modifiers { + m(selector) + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *UsageCleanupTaskQuery) ForUpdate(opts ...sql.LockOption) *UsageCleanupTaskQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *UsageCleanupTaskQuery) ForShare(opts ...sql.LockOption) *UsageCleanupTaskQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + +// UsageCleanupTaskGroupBy is the group-by builder for UsageCleanupTask entities. +type UsageCleanupTaskGroupBy struct { + selector + build *UsageCleanupTaskQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *UsageCleanupTaskGroupBy) Aggregate(fns ...AggregateFunc) *UsageCleanupTaskGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *UsageCleanupTaskGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UsageCleanupTaskQuery, *UsageCleanupTaskGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *UsageCleanupTaskGroupBy) sqlScan(ctx context.Context, root *UsageCleanupTaskQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// UsageCleanupTaskSelect is the builder for selecting fields of UsageCleanupTask entities. +type UsageCleanupTaskSelect struct { + *UsageCleanupTaskQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *UsageCleanupTaskSelect) Aggregate(fns ...AggregateFunc) *UsageCleanupTaskSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *UsageCleanupTaskSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UsageCleanupTaskQuery, *UsageCleanupTaskSelect](ctx, _s.UsageCleanupTaskQuery, _s, _s.inters, v) +} + +func (_s *UsageCleanupTaskSelect) sqlScan(ctx context.Context, root *UsageCleanupTaskQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/usagecleanuptask_update.go b/backend/ent/usagecleanuptask_update.go new file mode 100644 index 00000000..604202c6 --- /dev/null +++ b/backend/ent/usagecleanuptask_update.go @@ -0,0 +1,702 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/dialect/sql/sqljson" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/usagecleanuptask" +) + +// UsageCleanupTaskUpdate is the builder for updating UsageCleanupTask entities. +type UsageCleanupTaskUpdate struct { + config + hooks []Hook + mutation *UsageCleanupTaskMutation +} + +// Where appends a list predicates to the UsageCleanupTaskUpdate builder. +func (_u *UsageCleanupTaskUpdate) Where(ps ...predicate.UsageCleanupTask) *UsageCleanupTaskUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *UsageCleanupTaskUpdate) SetUpdatedAt(v time.Time) *UsageCleanupTaskUpdate { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetStatus sets the "status" field. +func (_u *UsageCleanupTaskUpdate) SetStatus(v string) *UsageCleanupTaskUpdate { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *UsageCleanupTaskUpdate) SetNillableStatus(v *string) *UsageCleanupTaskUpdate { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetFilters sets the "filters" field. +func (_u *UsageCleanupTaskUpdate) SetFilters(v json.RawMessage) *UsageCleanupTaskUpdate { + _u.mutation.SetFilters(v) + return _u +} + +// AppendFilters appends value to the "filters" field. +func (_u *UsageCleanupTaskUpdate) AppendFilters(v json.RawMessage) *UsageCleanupTaskUpdate { + _u.mutation.AppendFilters(v) + return _u +} + +// SetCreatedBy sets the "created_by" field. +func (_u *UsageCleanupTaskUpdate) SetCreatedBy(v int64) *UsageCleanupTaskUpdate { + _u.mutation.ResetCreatedBy() + _u.mutation.SetCreatedBy(v) + return _u +} + +// SetNillableCreatedBy sets the "created_by" field if the given value is not nil. +func (_u *UsageCleanupTaskUpdate) SetNillableCreatedBy(v *int64) *UsageCleanupTaskUpdate { + if v != nil { + _u.SetCreatedBy(*v) + } + return _u +} + +// AddCreatedBy adds value to the "created_by" field. +func (_u *UsageCleanupTaskUpdate) AddCreatedBy(v int64) *UsageCleanupTaskUpdate { + _u.mutation.AddCreatedBy(v) + return _u +} + +// SetDeletedRows sets the "deleted_rows" field. +func (_u *UsageCleanupTaskUpdate) SetDeletedRows(v int64) *UsageCleanupTaskUpdate { + _u.mutation.ResetDeletedRows() + _u.mutation.SetDeletedRows(v) + return _u +} + +// SetNillableDeletedRows sets the "deleted_rows" field if the given value is not nil. +func (_u *UsageCleanupTaskUpdate) SetNillableDeletedRows(v *int64) *UsageCleanupTaskUpdate { + if v != nil { + _u.SetDeletedRows(*v) + } + return _u +} + +// AddDeletedRows adds value to the "deleted_rows" field. +func (_u *UsageCleanupTaskUpdate) AddDeletedRows(v int64) *UsageCleanupTaskUpdate { + _u.mutation.AddDeletedRows(v) + return _u +} + +// SetErrorMessage sets the "error_message" field. +func (_u *UsageCleanupTaskUpdate) SetErrorMessage(v string) *UsageCleanupTaskUpdate { + _u.mutation.SetErrorMessage(v) + return _u +} + +// SetNillableErrorMessage sets the "error_message" field if the given value is not nil. +func (_u *UsageCleanupTaskUpdate) SetNillableErrorMessage(v *string) *UsageCleanupTaskUpdate { + if v != nil { + _u.SetErrorMessage(*v) + } + return _u +} + +// ClearErrorMessage clears the value of the "error_message" field. +func (_u *UsageCleanupTaskUpdate) ClearErrorMessage() *UsageCleanupTaskUpdate { + _u.mutation.ClearErrorMessage() + return _u +} + +// SetCanceledBy sets the "canceled_by" field. +func (_u *UsageCleanupTaskUpdate) SetCanceledBy(v int64) *UsageCleanupTaskUpdate { + _u.mutation.ResetCanceledBy() + _u.mutation.SetCanceledBy(v) + return _u +} + +// SetNillableCanceledBy sets the "canceled_by" field if the given value is not nil. +func (_u *UsageCleanupTaskUpdate) SetNillableCanceledBy(v *int64) *UsageCleanupTaskUpdate { + if v != nil { + _u.SetCanceledBy(*v) + } + return _u +} + +// AddCanceledBy adds value to the "canceled_by" field. +func (_u *UsageCleanupTaskUpdate) AddCanceledBy(v int64) *UsageCleanupTaskUpdate { + _u.mutation.AddCanceledBy(v) + return _u +} + +// ClearCanceledBy clears the value of the "canceled_by" field. +func (_u *UsageCleanupTaskUpdate) ClearCanceledBy() *UsageCleanupTaskUpdate { + _u.mutation.ClearCanceledBy() + return _u +} + +// SetCanceledAt sets the "canceled_at" field. +func (_u *UsageCleanupTaskUpdate) SetCanceledAt(v time.Time) *UsageCleanupTaskUpdate { + _u.mutation.SetCanceledAt(v) + return _u +} + +// SetNillableCanceledAt sets the "canceled_at" field if the given value is not nil. +func (_u *UsageCleanupTaskUpdate) SetNillableCanceledAt(v *time.Time) *UsageCleanupTaskUpdate { + if v != nil { + _u.SetCanceledAt(*v) + } + return _u +} + +// ClearCanceledAt clears the value of the "canceled_at" field. +func (_u *UsageCleanupTaskUpdate) ClearCanceledAt() *UsageCleanupTaskUpdate { + _u.mutation.ClearCanceledAt() + return _u +} + +// SetStartedAt sets the "started_at" field. +func (_u *UsageCleanupTaskUpdate) SetStartedAt(v time.Time) *UsageCleanupTaskUpdate { + _u.mutation.SetStartedAt(v) + return _u +} + +// SetNillableStartedAt sets the "started_at" field if the given value is not nil. +func (_u *UsageCleanupTaskUpdate) SetNillableStartedAt(v *time.Time) *UsageCleanupTaskUpdate { + if v != nil { + _u.SetStartedAt(*v) + } + return _u +} + +// ClearStartedAt clears the value of the "started_at" field. +func (_u *UsageCleanupTaskUpdate) ClearStartedAt() *UsageCleanupTaskUpdate { + _u.mutation.ClearStartedAt() + return _u +} + +// SetFinishedAt sets the "finished_at" field. +func (_u *UsageCleanupTaskUpdate) SetFinishedAt(v time.Time) *UsageCleanupTaskUpdate { + _u.mutation.SetFinishedAt(v) + return _u +} + +// SetNillableFinishedAt sets the "finished_at" field if the given value is not nil. +func (_u *UsageCleanupTaskUpdate) SetNillableFinishedAt(v *time.Time) *UsageCleanupTaskUpdate { + if v != nil { + _u.SetFinishedAt(*v) + } + return _u +} + +// ClearFinishedAt clears the value of the "finished_at" field. +func (_u *UsageCleanupTaskUpdate) ClearFinishedAt() *UsageCleanupTaskUpdate { + _u.mutation.ClearFinishedAt() + return _u +} + +// Mutation returns the UsageCleanupTaskMutation object of the builder. +func (_u *UsageCleanupTaskUpdate) Mutation() *UsageCleanupTaskMutation { + return _u.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *UsageCleanupTaskUpdate) Save(ctx context.Context) (int, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *UsageCleanupTaskUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *UsageCleanupTaskUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *UsageCleanupTaskUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *UsageCleanupTaskUpdate) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := usagecleanuptask.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *UsageCleanupTaskUpdate) check() error { + if v, ok := _u.mutation.Status(); ok { + if err := usagecleanuptask.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "UsageCleanupTask.status": %w`, err)} + } + } + return nil +} + +func (_u *UsageCleanupTaskUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(usagecleanuptask.Table, usagecleanuptask.Columns, sqlgraph.NewFieldSpec(usagecleanuptask.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(usagecleanuptask.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(usagecleanuptask.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.Filters(); ok { + _spec.SetField(usagecleanuptask.FieldFilters, field.TypeJSON, value) + } + if value, ok := _u.mutation.AppendedFilters(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, usagecleanuptask.FieldFilters, value) + }) + } + if value, ok := _u.mutation.CreatedBy(); ok { + _spec.SetField(usagecleanuptask.FieldCreatedBy, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedCreatedBy(); ok { + _spec.AddField(usagecleanuptask.FieldCreatedBy, field.TypeInt64, value) + } + if value, ok := _u.mutation.DeletedRows(); ok { + _spec.SetField(usagecleanuptask.FieldDeletedRows, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedDeletedRows(); ok { + _spec.AddField(usagecleanuptask.FieldDeletedRows, field.TypeInt64, value) + } + if value, ok := _u.mutation.ErrorMessage(); ok { + _spec.SetField(usagecleanuptask.FieldErrorMessage, field.TypeString, value) + } + if _u.mutation.ErrorMessageCleared() { + _spec.ClearField(usagecleanuptask.FieldErrorMessage, field.TypeString) + } + if value, ok := _u.mutation.CanceledBy(); ok { + _spec.SetField(usagecleanuptask.FieldCanceledBy, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedCanceledBy(); ok { + _spec.AddField(usagecleanuptask.FieldCanceledBy, field.TypeInt64, value) + } + if _u.mutation.CanceledByCleared() { + _spec.ClearField(usagecleanuptask.FieldCanceledBy, field.TypeInt64) + } + if value, ok := _u.mutation.CanceledAt(); ok { + _spec.SetField(usagecleanuptask.FieldCanceledAt, field.TypeTime, value) + } + if _u.mutation.CanceledAtCleared() { + _spec.ClearField(usagecleanuptask.FieldCanceledAt, field.TypeTime) + } + if value, ok := _u.mutation.StartedAt(); ok { + _spec.SetField(usagecleanuptask.FieldStartedAt, field.TypeTime, value) + } + if _u.mutation.StartedAtCleared() { + _spec.ClearField(usagecleanuptask.FieldStartedAt, field.TypeTime) + } + if value, ok := _u.mutation.FinishedAt(); ok { + _spec.SetField(usagecleanuptask.FieldFinishedAt, field.TypeTime, value) + } + if _u.mutation.FinishedAtCleared() { + _spec.ClearField(usagecleanuptask.FieldFinishedAt, field.TypeTime) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{usagecleanuptask.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// UsageCleanupTaskUpdateOne is the builder for updating a single UsageCleanupTask entity. +type UsageCleanupTaskUpdateOne struct { + config + fields []string + hooks []Hook + mutation *UsageCleanupTaskMutation +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *UsageCleanupTaskUpdateOne) SetUpdatedAt(v time.Time) *UsageCleanupTaskUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetStatus sets the "status" field. +func (_u *UsageCleanupTaskUpdateOne) SetStatus(v string) *UsageCleanupTaskUpdateOne { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *UsageCleanupTaskUpdateOne) SetNillableStatus(v *string) *UsageCleanupTaskUpdateOne { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetFilters sets the "filters" field. +func (_u *UsageCleanupTaskUpdateOne) SetFilters(v json.RawMessage) *UsageCleanupTaskUpdateOne { + _u.mutation.SetFilters(v) + return _u +} + +// AppendFilters appends value to the "filters" field. +func (_u *UsageCleanupTaskUpdateOne) AppendFilters(v json.RawMessage) *UsageCleanupTaskUpdateOne { + _u.mutation.AppendFilters(v) + return _u +} + +// SetCreatedBy sets the "created_by" field. +func (_u *UsageCleanupTaskUpdateOne) SetCreatedBy(v int64) *UsageCleanupTaskUpdateOne { + _u.mutation.ResetCreatedBy() + _u.mutation.SetCreatedBy(v) + return _u +} + +// SetNillableCreatedBy sets the "created_by" field if the given value is not nil. +func (_u *UsageCleanupTaskUpdateOne) SetNillableCreatedBy(v *int64) *UsageCleanupTaskUpdateOne { + if v != nil { + _u.SetCreatedBy(*v) + } + return _u +} + +// AddCreatedBy adds value to the "created_by" field. +func (_u *UsageCleanupTaskUpdateOne) AddCreatedBy(v int64) *UsageCleanupTaskUpdateOne { + _u.mutation.AddCreatedBy(v) + return _u +} + +// SetDeletedRows sets the "deleted_rows" field. +func (_u *UsageCleanupTaskUpdateOne) SetDeletedRows(v int64) *UsageCleanupTaskUpdateOne { + _u.mutation.ResetDeletedRows() + _u.mutation.SetDeletedRows(v) + return _u +} + +// SetNillableDeletedRows sets the "deleted_rows" field if the given value is not nil. +func (_u *UsageCleanupTaskUpdateOne) SetNillableDeletedRows(v *int64) *UsageCleanupTaskUpdateOne { + if v != nil { + _u.SetDeletedRows(*v) + } + return _u +} + +// AddDeletedRows adds value to the "deleted_rows" field. +func (_u *UsageCleanupTaskUpdateOne) AddDeletedRows(v int64) *UsageCleanupTaskUpdateOne { + _u.mutation.AddDeletedRows(v) + return _u +} + +// SetErrorMessage sets the "error_message" field. +func (_u *UsageCleanupTaskUpdateOne) SetErrorMessage(v string) *UsageCleanupTaskUpdateOne { + _u.mutation.SetErrorMessage(v) + return _u +} + +// SetNillableErrorMessage sets the "error_message" field if the given value is not nil. +func (_u *UsageCleanupTaskUpdateOne) SetNillableErrorMessage(v *string) *UsageCleanupTaskUpdateOne { + if v != nil { + _u.SetErrorMessage(*v) + } + return _u +} + +// ClearErrorMessage clears the value of the "error_message" field. +func (_u *UsageCleanupTaskUpdateOne) ClearErrorMessage() *UsageCleanupTaskUpdateOne { + _u.mutation.ClearErrorMessage() + return _u +} + +// SetCanceledBy sets the "canceled_by" field. +func (_u *UsageCleanupTaskUpdateOne) SetCanceledBy(v int64) *UsageCleanupTaskUpdateOne { + _u.mutation.ResetCanceledBy() + _u.mutation.SetCanceledBy(v) + return _u +} + +// SetNillableCanceledBy sets the "canceled_by" field if the given value is not nil. +func (_u *UsageCleanupTaskUpdateOne) SetNillableCanceledBy(v *int64) *UsageCleanupTaskUpdateOne { + if v != nil { + _u.SetCanceledBy(*v) + } + return _u +} + +// AddCanceledBy adds value to the "canceled_by" field. +func (_u *UsageCleanupTaskUpdateOne) AddCanceledBy(v int64) *UsageCleanupTaskUpdateOne { + _u.mutation.AddCanceledBy(v) + return _u +} + +// ClearCanceledBy clears the value of the "canceled_by" field. +func (_u *UsageCleanupTaskUpdateOne) ClearCanceledBy() *UsageCleanupTaskUpdateOne { + _u.mutation.ClearCanceledBy() + return _u +} + +// SetCanceledAt sets the "canceled_at" field. +func (_u *UsageCleanupTaskUpdateOne) SetCanceledAt(v time.Time) *UsageCleanupTaskUpdateOne { + _u.mutation.SetCanceledAt(v) + return _u +} + +// SetNillableCanceledAt sets the "canceled_at" field if the given value is not nil. +func (_u *UsageCleanupTaskUpdateOne) SetNillableCanceledAt(v *time.Time) *UsageCleanupTaskUpdateOne { + if v != nil { + _u.SetCanceledAt(*v) + } + return _u +} + +// ClearCanceledAt clears the value of the "canceled_at" field. +func (_u *UsageCleanupTaskUpdateOne) ClearCanceledAt() *UsageCleanupTaskUpdateOne { + _u.mutation.ClearCanceledAt() + return _u +} + +// SetStartedAt sets the "started_at" field. +func (_u *UsageCleanupTaskUpdateOne) SetStartedAt(v time.Time) *UsageCleanupTaskUpdateOne { + _u.mutation.SetStartedAt(v) + return _u +} + +// SetNillableStartedAt sets the "started_at" field if the given value is not nil. +func (_u *UsageCleanupTaskUpdateOne) SetNillableStartedAt(v *time.Time) *UsageCleanupTaskUpdateOne { + if v != nil { + _u.SetStartedAt(*v) + } + return _u +} + +// ClearStartedAt clears the value of the "started_at" field. +func (_u *UsageCleanupTaskUpdateOne) ClearStartedAt() *UsageCleanupTaskUpdateOne { + _u.mutation.ClearStartedAt() + return _u +} + +// SetFinishedAt sets the "finished_at" field. +func (_u *UsageCleanupTaskUpdateOne) SetFinishedAt(v time.Time) *UsageCleanupTaskUpdateOne { + _u.mutation.SetFinishedAt(v) + return _u +} + +// SetNillableFinishedAt sets the "finished_at" field if the given value is not nil. +func (_u *UsageCleanupTaskUpdateOne) SetNillableFinishedAt(v *time.Time) *UsageCleanupTaskUpdateOne { + if v != nil { + _u.SetFinishedAt(*v) + } + return _u +} + +// ClearFinishedAt clears the value of the "finished_at" field. +func (_u *UsageCleanupTaskUpdateOne) ClearFinishedAt() *UsageCleanupTaskUpdateOne { + _u.mutation.ClearFinishedAt() + return _u +} + +// Mutation returns the UsageCleanupTaskMutation object of the builder. +func (_u *UsageCleanupTaskUpdateOne) Mutation() *UsageCleanupTaskMutation { + return _u.mutation +} + +// Where appends a list predicates to the UsageCleanupTaskUpdate builder. +func (_u *UsageCleanupTaskUpdateOne) Where(ps ...predicate.UsageCleanupTask) *UsageCleanupTaskUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *UsageCleanupTaskUpdateOne) Select(field string, fields ...string) *UsageCleanupTaskUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated UsageCleanupTask entity. +func (_u *UsageCleanupTaskUpdateOne) Save(ctx context.Context) (*UsageCleanupTask, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *UsageCleanupTaskUpdateOne) SaveX(ctx context.Context) *UsageCleanupTask { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *UsageCleanupTaskUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *UsageCleanupTaskUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *UsageCleanupTaskUpdateOne) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := usagecleanuptask.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *UsageCleanupTaskUpdateOne) check() error { + if v, ok := _u.mutation.Status(); ok { + if err := usagecleanuptask.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "UsageCleanupTask.status": %w`, err)} + } + } + return nil +} + +func (_u *UsageCleanupTaskUpdateOne) sqlSave(ctx context.Context) (_node *UsageCleanupTask, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(usagecleanuptask.Table, usagecleanuptask.Columns, sqlgraph.NewFieldSpec(usagecleanuptask.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "UsageCleanupTask.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, usagecleanuptask.FieldID) + for _, f := range fields { + if !usagecleanuptask.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != usagecleanuptask.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(usagecleanuptask.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(usagecleanuptask.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.Filters(); ok { + _spec.SetField(usagecleanuptask.FieldFilters, field.TypeJSON, value) + } + if value, ok := _u.mutation.AppendedFilters(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, usagecleanuptask.FieldFilters, value) + }) + } + if value, ok := _u.mutation.CreatedBy(); ok { + _spec.SetField(usagecleanuptask.FieldCreatedBy, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedCreatedBy(); ok { + _spec.AddField(usagecleanuptask.FieldCreatedBy, field.TypeInt64, value) + } + if value, ok := _u.mutation.DeletedRows(); ok { + _spec.SetField(usagecleanuptask.FieldDeletedRows, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedDeletedRows(); ok { + _spec.AddField(usagecleanuptask.FieldDeletedRows, field.TypeInt64, value) + } + if value, ok := _u.mutation.ErrorMessage(); ok { + _spec.SetField(usagecleanuptask.FieldErrorMessage, field.TypeString, value) + } + if _u.mutation.ErrorMessageCleared() { + _spec.ClearField(usagecleanuptask.FieldErrorMessage, field.TypeString) + } + if value, ok := _u.mutation.CanceledBy(); ok { + _spec.SetField(usagecleanuptask.FieldCanceledBy, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedCanceledBy(); ok { + _spec.AddField(usagecleanuptask.FieldCanceledBy, field.TypeInt64, value) + } + if _u.mutation.CanceledByCleared() { + _spec.ClearField(usagecleanuptask.FieldCanceledBy, field.TypeInt64) + } + if value, ok := _u.mutation.CanceledAt(); ok { + _spec.SetField(usagecleanuptask.FieldCanceledAt, field.TypeTime, value) + } + if _u.mutation.CanceledAtCleared() { + _spec.ClearField(usagecleanuptask.FieldCanceledAt, field.TypeTime) + } + if value, ok := _u.mutation.StartedAt(); ok { + _spec.SetField(usagecleanuptask.FieldStartedAt, field.TypeTime, value) + } + if _u.mutation.StartedAtCleared() { + _spec.ClearField(usagecleanuptask.FieldStartedAt, field.TypeTime) + } + if value, ok := _u.mutation.FinishedAt(); ok { + _spec.SetField(usagecleanuptask.FieldFinishedAt, field.TypeTime, value) + } + if _u.mutation.FinishedAtCleared() { + _spec.ClearField(usagecleanuptask.FieldFinishedAt, field.TypeTime) + } + _node = &UsageCleanupTask{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{usagecleanuptask.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/go.mod b/backend/go.mod index 9ebae69e..fd429b07 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -98,6 +98,7 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/morikuni/aec v1.0.0 // indirect + github.com/ncruces/go-strftime v1.0.0 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.1 // indirect @@ -108,6 +109,7 @@ require ( github.com/quic-go/qpack v0.6.0 // indirect github.com/quic-go/quic-go v0.57.1 // indirect github.com/refraction-networking/utls v1.8.1 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/rivo/uniseg v0.2.0 // indirect github.com/robfig/cron/v3 v3.0.1 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect @@ -140,7 +142,7 @@ require ( go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/multierr v1.9.0 // indirect golang.org/x/arch v0.3.0 // indirect - golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect + golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect golang.org/x/mod v0.30.0 // indirect golang.org/x/sys v0.39.0 // indirect golang.org/x/text v0.32.0 // indirect @@ -149,4 +151,8 @@ require ( google.golang.org/grpc v1.75.1 // indirect google.golang.org/protobuf v1.36.10 // indirect gopkg.in/ini.v1 v1.67.0 // indirect + modernc.org/libc v1.67.6 // indirect + modernc.org/mathutil v1.7.1 // indirect + modernc.org/memory v1.11.0 // indirect + modernc.org/sqlite v1.44.1 // indirect ) diff --git a/backend/go.sum b/backend/go.sum index 4496603d..aa10718c 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -200,6 +200,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w= +github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -225,6 +227,8 @@ github.com/redis/go-redis/v9 v9.17.2 h1:P2EGsA4qVIM3Pp+aPocCJ7DguDHhqrXNhVcEp4Vi github.com/redis/go-redis/v9 v9.17.2/go.mod h1:u410H11HMLoB+TP67dz8rL9s6QW2j76l0//kSOd3370= github.com/refraction-networking/utls v1.8.1 h1:yNY1kapmQU8JeM1sSw2H2asfTIwWxIkrMJI0pRUOCAo= github.com/refraction-networking/utls v1.8.1/go.mod h1:jkSOEkLqn+S/jtpEHPOsVv/4V4EVnelwbMQl4vCWXAM= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= @@ -339,6 +343,8 @@ golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= +golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY= +golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= @@ -366,6 +372,7 @@ golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= golang.org/x/tools/go/expect v0.1.0-deprecated h1:jY2C5HGYR5lqex3gEniOQL0r7Dq5+VGVgY1nudX5lXY= golang.org/x/tools/go/expect v0.1.0-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= +golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM= golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -388,4 +395,12 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +modernc.org/libc v1.67.6 h1:eVOQvpModVLKOdT+LvBPjdQqfrZq+pC39BygcT+E7OI= +modernc.org/libc v1.67.6/go.mod h1:JAhxUVlolfYDErnwiqaLvUqc8nfb2r6S6slAgZOnaiE= +modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= +modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg= +modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI= +modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw= +modernc.org/sqlite v1.44.1 h1:qybx/rNpfQipX/t47OxbHmkkJuv2JWifCMH8SVUiDas= +modernc.org/sqlite v1.44.1/go.mod h1:CzbrU2lSB1DKUusvwGz7rqEKIq+NUd8GWuBBZDs9/nA= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/backend/internal/handler/admin/usage_cleanup_handler_test.go b/backend/internal/handler/admin/usage_cleanup_handler_test.go index d8684c39..ed1c7cc2 100644 --- a/backend/internal/handler/admin/usage_cleanup_handler_test.go +++ b/backend/internal/handler/admin/usage_cleanup_handler_test.go @@ -3,8 +3,8 @@ package admin import ( "bytes" "context" - "encoding/json" "database/sql" + "encoding/json" "errors" "net/http" "net/http/httptest" diff --git a/backend/internal/repository/usage_cleanup_repo.go b/backend/internal/repository/usage_cleanup_repo.go index b6dfa42a..9c021357 100644 --- a/backend/internal/repository/usage_cleanup_repo.go +++ b/backend/internal/repository/usage_cleanup_repo.go @@ -7,43 +7,41 @@ import ( "errors" "fmt" "strings" + "time" + dbent "github.com/Wei-Shaw/sub2api/ent" + dbusagecleanuptask "github.com/Wei-Shaw/sub2api/ent/usagecleanuptask" "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" "github.com/Wei-Shaw/sub2api/internal/service" ) type usageCleanupRepository struct { - sql sqlExecutor + client *dbent.Client + sql sqlExecutor } -func NewUsageCleanupRepository(sqlDB *sql.DB) service.UsageCleanupRepository { - return &usageCleanupRepository{sql: sqlDB} +func NewUsageCleanupRepository(client *dbent.Client, sqlDB *sql.DB) service.UsageCleanupRepository { + return newUsageCleanupRepositoryWithSQL(client, sqlDB) +} + +func newUsageCleanupRepositoryWithSQL(client *dbent.Client, sqlq sqlExecutor) *usageCleanupRepository { + return &usageCleanupRepository{client: client, sql: sqlq} } func (r *usageCleanupRepository) CreateTask(ctx context.Context, task *service.UsageCleanupTask) error { if task == nil { return nil } - filtersJSON, err := json.Marshal(task.Filters) - if err != nil { - return fmt.Errorf("marshal cleanup filters: %w", err) + if r.client != nil { + return r.createTaskWithEnt(ctx, task) } - query := ` - INSERT INTO usage_cleanup_tasks ( - status, - filters, - created_by, - deleted_rows - ) VALUES ($1, $2, $3, $4) - RETURNING id, created_at, updated_at - ` - if err := scanSingleRow(ctx, r.sql, query, []any{task.Status, filtersJSON, task.CreatedBy, task.DeletedRows}, &task.ID, &task.CreatedAt, &task.UpdatedAt); err != nil { - return err - } - return nil + return r.createTaskWithSQL(ctx, task) } func (r *usageCleanupRepository) ListTasks(ctx context.Context, params pagination.PaginationParams) ([]service.UsageCleanupTask, *pagination.PaginationResult, error) { + if r.client != nil { + return r.listTasksWithEnt(ctx, params) + } var total int64 if err := scanSingleRow(ctx, r.sql, "SELECT COUNT(*) FROM usage_cleanup_tasks", nil, &total); err != nil { return nil, nil, err @@ -57,14 +55,14 @@ func (r *usageCleanupRepository) ListTasks(ctx context.Context, params paginatio canceled_by, canceled_at, started_at, finished_at, created_at, updated_at FROM usage_cleanup_tasks - ORDER BY created_at DESC + ORDER BY created_at DESC, id DESC LIMIT $1 OFFSET $2 ` rows, err := r.sql.QueryContext(ctx, query, params.Limit(), params.Offset()) if err != nil { return nil, nil, err } - defer rows.Close() + defer func() { _ = rows.Close() }() tasks := make([]service.UsageCleanupTask, 0) for rows.Next() { @@ -194,6 +192,9 @@ func (r *usageCleanupRepository) ClaimNextPendingTask(ctx context.Context, stale } func (r *usageCleanupRepository) GetTaskStatus(ctx context.Context, taskID int64) (string, error) { + if r.client != nil { + return r.getTaskStatusWithEnt(ctx, taskID) + } var status string if err := scanSingleRow(ctx, r.sql, "SELECT status FROM usage_cleanup_tasks WHERE id = $1", []any{taskID}, &status); err != nil { return "", err @@ -202,6 +203,9 @@ func (r *usageCleanupRepository) GetTaskStatus(ctx context.Context, taskID int64 } func (r *usageCleanupRepository) UpdateTaskProgress(ctx context.Context, taskID int64, deletedRows int64) error { + if r.client != nil { + return r.updateTaskProgressWithEnt(ctx, taskID, deletedRows) + } query := ` UPDATE usage_cleanup_tasks SET deleted_rows = $1, @@ -213,6 +217,9 @@ func (r *usageCleanupRepository) UpdateTaskProgress(ctx context.Context, taskID } func (r *usageCleanupRepository) CancelTask(ctx context.Context, taskID int64, canceledBy int64) (bool, error) { + if r.client != nil { + return r.cancelTaskWithEnt(ctx, taskID, canceledBy) + } query := ` UPDATE usage_cleanup_tasks SET status = $1, @@ -243,6 +250,9 @@ func (r *usageCleanupRepository) CancelTask(ctx context.Context, taskID int64, c } func (r *usageCleanupRepository) MarkTaskSucceeded(ctx context.Context, taskID int64, deletedRows int64) error { + if r.client != nil { + return r.markTaskSucceededWithEnt(ctx, taskID, deletedRows) + } query := ` UPDATE usage_cleanup_tasks SET status = $1, @@ -256,6 +266,9 @@ func (r *usageCleanupRepository) MarkTaskSucceeded(ctx context.Context, taskID i } func (r *usageCleanupRepository) MarkTaskFailed(ctx context.Context, taskID int64, deletedRows int64, errorMsg string) error { + if r.client != nil { + return r.markTaskFailedWithEnt(ctx, taskID, deletedRows, errorMsg) + } query := ` UPDATE usage_cleanup_tasks SET status = $1, @@ -295,7 +308,7 @@ func (r *usageCleanupRepository) DeleteUsageLogsBatch(ctx context.Context, filte if err != nil { return 0, err } - defer rows.Close() + defer func() { _ = rows.Close() }() var deleted int64 for rows.Next() { @@ -357,7 +370,182 @@ func buildUsageCleanupWhere(filters service.UsageCleanupFilters) (string, []any) if filters.BillingType != nil { conditions = append(conditions, fmt.Sprintf("billing_type = $%d", idx)) args = append(args, *filters.BillingType) - idx++ } return strings.Join(conditions, " AND "), args } + +func (r *usageCleanupRepository) createTaskWithEnt(ctx context.Context, task *service.UsageCleanupTask) error { + client := clientFromContext(ctx, r.client) + filtersJSON, err := json.Marshal(task.Filters) + if err != nil { + return fmt.Errorf("marshal cleanup filters: %w", err) + } + created, err := client.UsageCleanupTask. + Create(). + SetStatus(task.Status). + SetFilters(json.RawMessage(filtersJSON)). + SetCreatedBy(task.CreatedBy). + SetDeletedRows(task.DeletedRows). + Save(ctx) + if err != nil { + return err + } + task.ID = created.ID + task.CreatedAt = created.CreatedAt + task.UpdatedAt = created.UpdatedAt + return nil +} + +func (r *usageCleanupRepository) createTaskWithSQL(ctx context.Context, task *service.UsageCleanupTask) error { + filtersJSON, err := json.Marshal(task.Filters) + if err != nil { + return fmt.Errorf("marshal cleanup filters: %w", err) + } + query := ` + INSERT INTO usage_cleanup_tasks ( + status, + filters, + created_by, + deleted_rows + ) VALUES ($1, $2, $3, $4) + RETURNING id, created_at, updated_at + ` + if err := scanSingleRow(ctx, r.sql, query, []any{task.Status, filtersJSON, task.CreatedBy, task.DeletedRows}, &task.ID, &task.CreatedAt, &task.UpdatedAt); err != nil { + return err + } + return nil +} + +func (r *usageCleanupRepository) listTasksWithEnt(ctx context.Context, params pagination.PaginationParams) ([]service.UsageCleanupTask, *pagination.PaginationResult, error) { + client := clientFromContext(ctx, r.client) + query := client.UsageCleanupTask.Query() + total, err := query.Clone().Count(ctx) + if err != nil { + return nil, nil, err + } + if total == 0 { + return []service.UsageCleanupTask{}, paginationResultFromTotal(0, params), nil + } + rows, err := query. + Order(dbent.Desc(dbusagecleanuptask.FieldCreatedAt), dbent.Desc(dbusagecleanuptask.FieldID)). + Offset(params.Offset()). + Limit(params.Limit()). + All(ctx) + if err != nil { + return nil, nil, err + } + tasks := make([]service.UsageCleanupTask, 0, len(rows)) + for _, row := range rows { + task, err := usageCleanupTaskFromEnt(row) + if err != nil { + return nil, nil, err + } + tasks = append(tasks, task) + } + return tasks, paginationResultFromTotal(int64(total), params), nil +} + +func (r *usageCleanupRepository) getTaskStatusWithEnt(ctx context.Context, taskID int64) (string, error) { + client := clientFromContext(ctx, r.client) + task, err := client.UsageCleanupTask.Query(). + Where(dbusagecleanuptask.IDEQ(taskID)). + Only(ctx) + if err != nil { + if dbent.IsNotFound(err) { + return "", sql.ErrNoRows + } + return "", err + } + return task.Status, nil +} + +func (r *usageCleanupRepository) updateTaskProgressWithEnt(ctx context.Context, taskID int64, deletedRows int64) error { + client := clientFromContext(ctx, r.client) + now := time.Now() + _, err := client.UsageCleanupTask.Update(). + Where(dbusagecleanuptask.IDEQ(taskID)). + SetDeletedRows(deletedRows). + SetUpdatedAt(now). + Save(ctx) + return err +} + +func (r *usageCleanupRepository) cancelTaskWithEnt(ctx context.Context, taskID int64, canceledBy int64) (bool, error) { + client := clientFromContext(ctx, r.client) + now := time.Now() + affected, err := client.UsageCleanupTask.Update(). + Where( + dbusagecleanuptask.IDEQ(taskID), + dbusagecleanuptask.StatusIn(service.UsageCleanupStatusPending, service.UsageCleanupStatusRunning), + ). + SetStatus(service.UsageCleanupStatusCanceled). + SetCanceledBy(canceledBy). + SetCanceledAt(now). + SetFinishedAt(now). + ClearErrorMessage(). + SetUpdatedAt(now). + Save(ctx) + if err != nil { + return false, err + } + return affected > 0, nil +} + +func (r *usageCleanupRepository) markTaskSucceededWithEnt(ctx context.Context, taskID int64, deletedRows int64) error { + client := clientFromContext(ctx, r.client) + now := time.Now() + _, err := client.UsageCleanupTask.Update(). + Where(dbusagecleanuptask.IDEQ(taskID)). + SetStatus(service.UsageCleanupStatusSucceeded). + SetDeletedRows(deletedRows). + SetFinishedAt(now). + SetUpdatedAt(now). + Save(ctx) + return err +} + +func (r *usageCleanupRepository) markTaskFailedWithEnt(ctx context.Context, taskID int64, deletedRows int64, errorMsg string) error { + client := clientFromContext(ctx, r.client) + now := time.Now() + _, err := client.UsageCleanupTask.Update(). + Where(dbusagecleanuptask.IDEQ(taskID)). + SetStatus(service.UsageCleanupStatusFailed). + SetDeletedRows(deletedRows). + SetErrorMessage(errorMsg). + SetFinishedAt(now). + SetUpdatedAt(now). + Save(ctx) + return err +} + +func usageCleanupTaskFromEnt(row *dbent.UsageCleanupTask) (service.UsageCleanupTask, error) { + task := service.UsageCleanupTask{ + ID: row.ID, + Status: row.Status, + CreatedBy: row.CreatedBy, + DeletedRows: row.DeletedRows, + CreatedAt: row.CreatedAt, + UpdatedAt: row.UpdatedAt, + } + if len(row.Filters) > 0 { + if err := json.Unmarshal(row.Filters, &task.Filters); err != nil { + return service.UsageCleanupTask{}, fmt.Errorf("parse cleanup filters: %w", err) + } + } + if row.ErrorMessage != nil { + task.ErrorMsg = row.ErrorMessage + } + if row.CanceledBy != nil { + task.CanceledBy = row.CanceledBy + } + if row.CanceledAt != nil { + task.CanceledAt = row.CanceledAt + } + if row.StartedAt != nil { + task.StartedAt = row.StartedAt + } + if row.FinishedAt != nil { + task.FinishedAt = row.FinishedAt + } + return task, nil +} diff --git a/backend/internal/repository/usage_cleanup_repo_ent_test.go b/backend/internal/repository/usage_cleanup_repo_ent_test.go new file mode 100644 index 00000000..6c20b2b9 --- /dev/null +++ b/backend/internal/repository/usage_cleanup_repo_ent_test.go @@ -0,0 +1,251 @@ +package repository + +import ( + "context" + "database/sql" + "encoding/json" + "testing" + "time" + + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/ent/enttest" + dbusagecleanuptask "github.com/Wei-Shaw/sub2api/ent/usagecleanuptask" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/stretchr/testify/require" + + "entgo.io/ent/dialect" + entsql "entgo.io/ent/dialect/sql" + _ "modernc.org/sqlite" +) + +func newUsageCleanupEntRepo(t *testing.T) (*usageCleanupRepository, *dbent.Client) { + t.Helper() + db, err := sql.Open("sqlite", "file:usage_cleanup?mode=memory&cache=shared") + require.NoError(t, err) + t.Cleanup(func() { _ = db.Close() }) + _, err = db.Exec("PRAGMA foreign_keys = ON") + require.NoError(t, err) + + drv := entsql.OpenDB(dialect.SQLite, db) + client := enttest.NewClient(t, enttest.WithOptions(dbent.Driver(drv))) + t.Cleanup(func() { _ = client.Close() }) + + repo := &usageCleanupRepository{client: client, sql: db} + return repo, client +} + +func TestUsageCleanupRepositoryEntCreateAndList(t *testing.T) { + repo, _ := newUsageCleanupEntRepo(t) + + start := time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC) + end := start.Add(24 * time.Hour) + task := &service.UsageCleanupTask{ + Status: service.UsageCleanupStatusPending, + Filters: service.UsageCleanupFilters{StartTime: start, EndTime: end}, + CreatedBy: 9, + } + require.NoError(t, repo.CreateTask(context.Background(), task)) + require.NotZero(t, task.ID) + + task2 := &service.UsageCleanupTask{ + Status: service.UsageCleanupStatusRunning, + Filters: service.UsageCleanupFilters{StartTime: start.Add(-24 * time.Hour), EndTime: end.Add(-24 * time.Hour)}, + CreatedBy: 10, + } + require.NoError(t, repo.CreateTask(context.Background(), task2)) + + tasks, result, err := repo.ListTasks(context.Background(), pagination.PaginationParams{Page: 1, PageSize: 10}) + require.NoError(t, err) + require.Len(t, tasks, 2) + require.Equal(t, int64(2), result.Total) + require.Greater(t, tasks[0].ID, tasks[1].ID) + require.Equal(t, start, tasks[1].Filters.StartTime) + require.Equal(t, end, tasks[1].Filters.EndTime) +} + +func TestUsageCleanupRepositoryEntListEmpty(t *testing.T) { + repo, _ := newUsageCleanupEntRepo(t) + + tasks, result, err := repo.ListTasks(context.Background(), pagination.PaginationParams{Page: 1, PageSize: 10}) + require.NoError(t, err) + require.Empty(t, tasks) + require.Equal(t, int64(0), result.Total) +} + +func TestUsageCleanupRepositoryEntGetStatusAndProgress(t *testing.T) { + repo, client := newUsageCleanupEntRepo(t) + + task := &service.UsageCleanupTask{ + Status: service.UsageCleanupStatusPending, + Filters: service.UsageCleanupFilters{StartTime: time.Now().UTC(), EndTime: time.Now().UTC().Add(time.Hour)}, + CreatedBy: 3, + } + require.NoError(t, repo.CreateTask(context.Background(), task)) + + status, err := repo.GetTaskStatus(context.Background(), task.ID) + require.NoError(t, err) + require.Equal(t, service.UsageCleanupStatusPending, status) + + _, err = repo.GetTaskStatus(context.Background(), task.ID+99) + require.ErrorIs(t, err, sql.ErrNoRows) + + require.NoError(t, repo.UpdateTaskProgress(context.Background(), task.ID, 42)) + loaded, err := client.UsageCleanupTask.Get(context.Background(), task.ID) + require.NoError(t, err) + require.Equal(t, int64(42), loaded.DeletedRows) +} + +func TestUsageCleanupRepositoryEntCancelAndFinish(t *testing.T) { + repo, client := newUsageCleanupEntRepo(t) + + task := &service.UsageCleanupTask{ + Status: service.UsageCleanupStatusPending, + Filters: service.UsageCleanupFilters{StartTime: time.Now().UTC(), EndTime: time.Now().UTC().Add(time.Hour)}, + CreatedBy: 5, + } + require.NoError(t, repo.CreateTask(context.Background(), task)) + + ok, err := repo.CancelTask(context.Background(), task.ID, 7) + require.NoError(t, err) + require.True(t, ok) + + loaded, err := client.UsageCleanupTask.Get(context.Background(), task.ID) + require.NoError(t, err) + require.Equal(t, service.UsageCleanupStatusCanceled, loaded.Status) + require.NotNil(t, loaded.CanceledBy) + require.NotNil(t, loaded.CanceledAt) + require.NotNil(t, loaded.FinishedAt) + + loaded.Status = service.UsageCleanupStatusSucceeded + _, err = client.UsageCleanupTask.Update().Where(dbusagecleanuptask.IDEQ(task.ID)).SetStatus(loaded.Status).Save(context.Background()) + require.NoError(t, err) + + ok, err = repo.CancelTask(context.Background(), task.ID, 7) + require.NoError(t, err) + require.False(t, ok) +} + +func TestUsageCleanupRepositoryEntCancelError(t *testing.T) { + repo, client := newUsageCleanupEntRepo(t) + + task := &service.UsageCleanupTask{ + Status: service.UsageCleanupStatusPending, + Filters: service.UsageCleanupFilters{StartTime: time.Now().UTC(), EndTime: time.Now().UTC().Add(time.Hour)}, + CreatedBy: 5, + } + require.NoError(t, repo.CreateTask(context.Background(), task)) + + require.NoError(t, client.Close()) + _, err := repo.CancelTask(context.Background(), task.ID, 7) + require.Error(t, err) +} + +func TestUsageCleanupRepositoryEntMarkResults(t *testing.T) { + repo, client := newUsageCleanupEntRepo(t) + + task := &service.UsageCleanupTask{ + Status: service.UsageCleanupStatusRunning, + Filters: service.UsageCleanupFilters{StartTime: time.Now().UTC(), EndTime: time.Now().UTC().Add(time.Hour)}, + CreatedBy: 12, + } + require.NoError(t, repo.CreateTask(context.Background(), task)) + + require.NoError(t, repo.MarkTaskSucceeded(context.Background(), task.ID, 6)) + loaded, err := client.UsageCleanupTask.Get(context.Background(), task.ID) + require.NoError(t, err) + require.Equal(t, service.UsageCleanupStatusSucceeded, loaded.Status) + require.Equal(t, int64(6), loaded.DeletedRows) + require.NotNil(t, loaded.FinishedAt) + + task2 := &service.UsageCleanupTask{ + Status: service.UsageCleanupStatusRunning, + Filters: service.UsageCleanupFilters{StartTime: time.Now().UTC(), EndTime: time.Now().UTC().Add(time.Hour)}, + CreatedBy: 12, + } + require.NoError(t, repo.CreateTask(context.Background(), task2)) + + require.NoError(t, repo.MarkTaskFailed(context.Background(), task2.ID, 4, "boom")) + loaded2, err := client.UsageCleanupTask.Get(context.Background(), task2.ID) + require.NoError(t, err) + require.Equal(t, service.UsageCleanupStatusFailed, loaded2.Status) + require.Equal(t, "boom", *loaded2.ErrorMessage) +} + +func TestUsageCleanupRepositoryEntInvalidStatus(t *testing.T) { + repo, _ := newUsageCleanupEntRepo(t) + + task := &service.UsageCleanupTask{ + Status: "invalid", + Filters: service.UsageCleanupFilters{StartTime: time.Now().UTC(), EndTime: time.Now().UTC().Add(time.Hour)}, + CreatedBy: 1, + } + require.Error(t, repo.CreateTask(context.Background(), task)) +} + +func TestUsageCleanupRepositoryEntListInvalidFilters(t *testing.T) { + repo, client := newUsageCleanupEntRepo(t) + + now := time.Now().UTC() + driver, ok := client.Driver().(*entsql.Driver) + require.True(t, ok) + _, err := driver.DB().ExecContext( + context.Background(), + `INSERT INTO usage_cleanup_tasks (status, filters, created_by, deleted_rows, created_at, updated_at) + VALUES (?, ?, ?, ?, ?, ?)`, + service.UsageCleanupStatusPending, + []byte("invalid-json"), + int64(1), + int64(0), + now, + now, + ) + require.NoError(t, err) + + _, _, err = repo.ListTasks(context.Background(), pagination.PaginationParams{Page: 1, PageSize: 10}) + require.Error(t, err) +} + +func TestUsageCleanupTaskFromEntFull(t *testing.T) { + start := time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC) + end := start.Add(24 * time.Hour) + errMsg := "failed" + canceledBy := int64(2) + canceledAt := start.Add(time.Minute) + startedAt := start.Add(2 * time.Minute) + finishedAt := start.Add(3 * time.Minute) + filters := service.UsageCleanupFilters{StartTime: start, EndTime: end} + filtersJSON, err := json.Marshal(filters) + require.NoError(t, err) + + task, err := usageCleanupTaskFromEnt(&dbent.UsageCleanupTask{ + ID: 10, + Status: service.UsageCleanupStatusFailed, + Filters: filtersJSON, + CreatedBy: 11, + DeletedRows: 7, + ErrorMessage: &errMsg, + CanceledBy: &canceledBy, + CanceledAt: &canceledAt, + StartedAt: &startedAt, + FinishedAt: &finishedAt, + CreatedAt: start, + UpdatedAt: end, + }) + require.NoError(t, err) + require.Equal(t, int64(10), task.ID) + require.Equal(t, service.UsageCleanupStatusFailed, task.Status) + require.NotNil(t, task.ErrorMsg) + require.NotNil(t, task.CanceledBy) + require.NotNil(t, task.CanceledAt) + require.NotNil(t, task.StartedAt) + require.NotNil(t, task.FinishedAt) +} + +func TestUsageCleanupTaskFromEntInvalidFilters(t *testing.T) { + task, err := usageCleanupTaskFromEnt(&dbent.UsageCleanupTask{ + Filters: json.RawMessage("invalid-json"), + }) + require.Error(t, err) + require.Empty(t, task) +} diff --git a/backend/internal/repository/usage_cleanup_repo_test.go b/backend/internal/repository/usage_cleanup_repo_test.go index e5582709..0ca30ec7 100644 --- a/backend/internal/repository/usage_cleanup_repo_test.go +++ b/backend/internal/repository/usage_cleanup_repo_test.go @@ -23,7 +23,7 @@ func newSQLMock(t *testing.T) (*sql.DB, sqlmock.Sqlmock) { func TestNewUsageCleanupRepository(t *testing.T) { db, _ := newSQLMock(t) - repo := NewUsageCleanupRepository(db) + repo := NewUsageCleanupRepository(nil, db) require.NotNil(t, repo) } @@ -146,6 +146,21 @@ func TestUsageCleanupRepositoryListTasks(t *testing.T) { require.NoError(t, mock.ExpectationsWereMet()) } +func TestUsageCleanupRepositoryListTasksQueryError(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + mock.ExpectQuery("SELECT COUNT\\(\\*\\) FROM usage_cleanup_tasks"). + WillReturnRows(sqlmock.NewRows([]string{"count"}).AddRow(int64(2))) + mock.ExpectQuery("SELECT id, status, filters, created_by, deleted_rows, error_message"). + WithArgs(20, 0). + WillReturnError(sql.ErrConnDone) + + _, _, err := repo.ListTasks(context.Background(), pagination.PaginationParams{Page: 1, PageSize: 20}) + require.Error(t, err) + require.NoError(t, mock.ExpectationsWereMet()) +} + func TestUsageCleanupRepositoryListTasksInvalidFilters(t *testing.T) { db, mock := newSQLMock(t) repo := &usageCleanupRepository{sql: db} @@ -320,6 +335,19 @@ func TestUsageCleanupRepositoryGetTaskStatus(t *testing.T) { require.NoError(t, mock.ExpectationsWereMet()) } +func TestUsageCleanupRepositoryGetTaskStatusQueryError(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + mock.ExpectQuery("SELECT status FROM usage_cleanup_tasks"). + WithArgs(int64(9)). + WillReturnError(sql.ErrConnDone) + + _, err := repo.GetTaskStatus(context.Background(), 9) + require.Error(t, err) + require.NoError(t, mock.ExpectationsWereMet()) +} + func TestUsageCleanupRepositoryUpdateTaskProgress(t *testing.T) { db, mock := newSQLMock(t) repo := &usageCleanupRepository{sql: db} @@ -347,6 +375,20 @@ func TestUsageCleanupRepositoryCancelTask(t *testing.T) { require.NoError(t, mock.ExpectationsWereMet()) } +func TestUsageCleanupRepositoryCancelTaskNoRows(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + mock.ExpectQuery("UPDATE usage_cleanup_tasks"). + WithArgs(service.UsageCleanupStatusCanceled, int64(6), int64(9), service.UsageCleanupStatusPending, service.UsageCleanupStatusRunning). + WillReturnRows(sqlmock.NewRows([]string{"id"})) + + ok, err := repo.CancelTask(context.Background(), 6, 9) + require.NoError(t, err) + require.False(t, ok) + require.NoError(t, mock.ExpectationsWereMet()) +} + func TestUsageCleanupRepositoryDeleteUsageLogsBatchMissingRange(t *testing.T) { db, _ := newSQLMock(t) repo := &usageCleanupRepository{sql: db} diff --git a/backend/internal/service/dashboard_aggregation_service.go b/backend/internal/service/dashboard_aggregation_service.go index 8f7e8144..10c68868 100644 --- a/backend/internal/service/dashboard_aggregation_service.go +++ b/backend/internal/service/dashboard_aggregation_service.go @@ -20,7 +20,7 @@ var ( // ErrDashboardBackfillDisabled 当配置禁用回填时返回。 ErrDashboardBackfillDisabled = errors.New("仪表盘聚合回填已禁用") // ErrDashboardBackfillTooLarge 当回填跨度超过限制时返回。 - ErrDashboardBackfillTooLarge = errors.New("回填时间跨度过大") + ErrDashboardBackfillTooLarge = errors.New("回填时间跨度过大") errDashboardAggregationRunning = errors.New("聚合作业正在运行") ) diff --git a/backend/internal/service/usage_cleanup_service.go b/backend/internal/service/usage_cleanup_service.go index 8ca02cfc..37f6d375 100644 --- a/backend/internal/service/usage_cleanup_service.go +++ b/backend/internal/service/usage_cleanup_service.go @@ -151,20 +151,24 @@ func (s *UsageCleanupService) CreateTask(ctx context.Context, filters UsageClean } func (s *UsageCleanupService) runOnce() { - if !atomic.CompareAndSwapInt32(&s.running, 0, 1) { + svc := s + if svc == nil { + return + } + if !atomic.CompareAndSwapInt32(&svc.running, 0, 1) { log.Printf("[UsageCleanup] run_once skipped: already_running=true") return } - defer atomic.StoreInt32(&s.running, 0) + defer atomic.StoreInt32(&svc.running, 0) parent := context.Background() - if s != nil && s.workerCtx != nil { - parent = s.workerCtx + if svc.workerCtx != nil { + parent = svc.workerCtx } - ctx, cancel := context.WithTimeout(parent, s.taskTimeout()) + ctx, cancel := context.WithTimeout(parent, svc.taskTimeout()) defer cancel() - task, err := s.repo.ClaimNextPendingTask(ctx, int64(s.taskTimeout().Seconds())) + task, err := svc.repo.ClaimNextPendingTask(ctx, int64(svc.taskTimeout().Seconds())) if err != nil { log.Printf("[UsageCleanup] claim pending task failed: %v", err) return @@ -175,7 +179,7 @@ func (s *UsageCleanupService) runOnce() { } log.Printf("[UsageCleanup] task claimed: task=%d status=%s created_by=%d deleted_rows=%d %s", task.ID, task.Status, task.CreatedBy, task.DeletedRows, describeUsageCleanupFilters(task.Filters)) - s.executeTask(ctx, task) + svc.executeTask(ctx, task) } func (s *UsageCleanupService) executeTask(ctx context.Context, task *UsageCleanupTask) { diff --git a/backend/internal/service/usage_cleanup_service_test.go b/backend/internal/service/usage_cleanup_service_test.go index 37d3eb19..05c423bc 100644 --- a/backend/internal/service/usage_cleanup_service_test.go +++ b/backend/internal/service/usage_cleanup_service_test.go @@ -46,8 +46,45 @@ type cleanupRepoStub struct { markSucceeded []cleanupMarkCall markFailed []cleanupMarkCall statusByID map[int64]string + statusErr error progressCalls []cleanupMarkCall + updateErr error cancelCalls []int64 + cancelErr error + cancelResult *bool + markFailedErr error +} + +type dashboardRepoStub struct { + recomputeErr error +} + +func (s *dashboardRepoStub) AggregateRange(ctx context.Context, start, end time.Time) error { + return nil +} + +func (s *dashboardRepoStub) RecomputeRange(ctx context.Context, start, end time.Time) error { + return s.recomputeErr +} + +func (s *dashboardRepoStub) GetAggregationWatermark(ctx context.Context) (time.Time, error) { + return time.Time{}, nil +} + +func (s *dashboardRepoStub) UpdateAggregationWatermark(ctx context.Context, aggregatedAt time.Time) error { + return nil +} + +func (s *dashboardRepoStub) CleanupAggregates(ctx context.Context, hourlyCutoff, dailyCutoff time.Time) error { + return nil +} + +func (s *dashboardRepoStub) CleanupUsageLogs(ctx context.Context, cutoff time.Time) error { + return nil +} + +func (s *dashboardRepoStub) EnsureUsageLogsPartitions(ctx context.Context, now time.Time) error { + return nil } func (s *cleanupRepoStub) CreateTask(ctx context.Context, task *UsageCleanupTask) error { @@ -100,6 +137,9 @@ func (s *cleanupRepoStub) ClaimNextPendingTask(ctx context.Context, staleRunning func (s *cleanupRepoStub) GetTaskStatus(ctx context.Context, taskID int64) (string, error) { s.mu.Lock() defer s.mu.Unlock() + if s.statusErr != nil { + return "", s.statusErr + } if s.statusByID == nil { return "", sql.ErrNoRows } @@ -114,6 +154,9 @@ func (s *cleanupRepoStub) UpdateTaskProgress(ctx context.Context, taskID int64, s.mu.Lock() defer s.mu.Unlock() s.progressCalls = append(s.progressCalls, cleanupMarkCall{taskID: taskID, deletedRows: deletedRows}) + if s.updateErr != nil { + return s.updateErr + } return nil } @@ -121,6 +164,19 @@ func (s *cleanupRepoStub) CancelTask(ctx context.Context, taskID int64, canceled s.mu.Lock() defer s.mu.Unlock() s.cancelCalls = append(s.cancelCalls, taskID) + if s.cancelErr != nil { + return false, s.cancelErr + } + if s.cancelResult != nil { + ok := *s.cancelResult + if ok { + if s.statusByID == nil { + s.statusByID = map[int64]string{} + } + s.statusByID[taskID] = UsageCleanupStatusCanceled + } + return ok, nil + } if s.statusByID == nil { s.statusByID = map[int64]string{} } @@ -151,6 +207,9 @@ func (s *cleanupRepoStub) MarkTaskFailed(ctx context.Context, taskID int64, dele s.statusByID = map[int64]string{} } s.statusByID[taskID] = UsageCleanupStatusFailed + if s.markFailedErr != nil { + return s.markFailedErr + } return nil } @@ -266,9 +325,11 @@ func TestUsageCleanupServiceCreateTaskRepoError(t *testing.T) { } func TestUsageCleanupServiceRunOnceSuccess(t *testing.T) { + start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + end := start.Add(2 * time.Hour) repo := &cleanupRepoStub{ claimQueue: []*UsageCleanupTask{ - {ID: 5, Filters: UsageCleanupFilters{StartTime: time.Now(), EndTime: time.Now().Add(2 * time.Hour)}}, + {ID: 5, Filters: UsageCleanupFilters{StartTime: start, EndTime: end}}, }, deleteQueue: []cleanupDeleteResponse{ {deleted: 2}, @@ -288,6 +349,9 @@ func TestUsageCleanupServiceRunOnceSuccess(t *testing.T) { require.Empty(t, repo.markFailed) require.Equal(t, int64(5), repo.markSucceeded[0].taskID) require.Equal(t, int64(5), repo.markSucceeded[0].deletedRows) + require.Equal(t, 2, repo.deleteCalls[0].limit) + require.Equal(t, start, repo.deleteCalls[0].filters.StartTime) + require.Equal(t, end, repo.deleteCalls[0].filters.EndTime) } func TestUsageCleanupServiceRunOnceClaimError(t *testing.T) { @@ -336,6 +400,293 @@ func TestUsageCleanupServiceExecuteTaskFailed(t *testing.T) { require.Equal(t, 500, len(repo.markFailed[0].errMsg)) } +func TestUsageCleanupServiceExecuteTaskProgressError(t *testing.T) { + repo := &cleanupRepoStub{ + deleteQueue: []cleanupDeleteResponse{ + {deleted: 2}, + {deleted: 0}, + }, + updateErr: errors.New("update failed"), + } + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, BatchSize: 2}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + task := &UsageCleanupTask{ + ID: 8, + Filters: UsageCleanupFilters{ + StartTime: time.Now().UTC(), + EndTime: time.Now().UTC().Add(time.Hour), + }, + } + + svc.executeTask(context.Background(), task) + + repo.mu.Lock() + defer repo.mu.Unlock() + require.Len(t, repo.markSucceeded, 1) + require.Empty(t, repo.markFailed) + require.Len(t, repo.progressCalls, 1) +} + +func TestUsageCleanupServiceExecuteTaskDeleteCanceled(t *testing.T) { + repo := &cleanupRepoStub{ + deleteQueue: []cleanupDeleteResponse{ + {err: context.Canceled}, + }, + } + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, BatchSize: 2}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + task := &UsageCleanupTask{ + ID: 12, + Filters: UsageCleanupFilters{ + StartTime: time.Now().UTC(), + EndTime: time.Now().UTC().Add(time.Hour), + }, + } + + svc.executeTask(context.Background(), task) + + repo.mu.Lock() + defer repo.mu.Unlock() + require.Empty(t, repo.markSucceeded) + require.Empty(t, repo.markFailed) +} + +func TestUsageCleanupServiceExecuteTaskContextCanceled(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, BatchSize: 2}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + task := &UsageCleanupTask{ + ID: 9, + Filters: UsageCleanupFilters{ + StartTime: time.Now().UTC(), + EndTime: time.Now().UTC().Add(time.Hour), + }, + } + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + svc.executeTask(ctx, task) + + repo.mu.Lock() + defer repo.mu.Unlock() + require.Empty(t, repo.markSucceeded) + require.Empty(t, repo.markFailed) + require.Empty(t, repo.deleteCalls) +} + +func TestUsageCleanupServiceExecuteTaskMarkFailedUpdateError(t *testing.T) { + repo := &cleanupRepoStub{ + deleteQueue: []cleanupDeleteResponse{ + {err: errors.New("boom")}, + }, + markFailedErr: errors.New("update failed"), + } + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, BatchSize: 2}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + task := &UsageCleanupTask{ + ID: 13, + Filters: UsageCleanupFilters{ + StartTime: time.Now().UTC(), + EndTime: time.Now().UTC().Add(time.Hour), + }, + } + + svc.executeTask(context.Background(), task) + + repo.mu.Lock() + defer repo.mu.Unlock() + require.Len(t, repo.markFailed, 1) + require.Equal(t, int64(13), repo.markFailed[0].taskID) +} + +func TestUsageCleanupServiceExecuteTaskDashboardRecomputeError(t *testing.T) { + repo := &cleanupRepoStub{ + deleteQueue: []cleanupDeleteResponse{ + {deleted: 0}, + }, + } + dashboard := NewDashboardAggregationService(&dashboardRepoStub{}, nil, &config.Config{ + DashboardAgg: config.DashboardAggregationConfig{Enabled: false}, + }) + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, BatchSize: 2}} + svc := NewUsageCleanupService(repo, nil, dashboard, cfg) + task := &UsageCleanupTask{ + ID: 14, + Filters: UsageCleanupFilters{ + StartTime: time.Now().UTC(), + EndTime: time.Now().UTC().Add(time.Hour), + }, + } + + svc.executeTask(context.Background(), task) + + repo.mu.Lock() + defer repo.mu.Unlock() + require.Len(t, repo.markSucceeded, 1) +} + +func TestUsageCleanupServiceExecuteTaskDashboardRecomputeSuccess(t *testing.T) { + repo := &cleanupRepoStub{ + deleteQueue: []cleanupDeleteResponse{ + {deleted: 0}, + }, + } + dashboard := NewDashboardAggregationService(&dashboardRepoStub{}, nil, &config.Config{ + DashboardAgg: config.DashboardAggregationConfig{Enabled: true}, + }) + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, BatchSize: 2}} + svc := NewUsageCleanupService(repo, nil, dashboard, cfg) + task := &UsageCleanupTask{ + ID: 15, + Filters: UsageCleanupFilters{ + StartTime: time.Now().UTC(), + EndTime: time.Now().UTC().Add(time.Hour), + }, + } + + svc.executeTask(context.Background(), task) + + repo.mu.Lock() + defer repo.mu.Unlock() + require.Len(t, repo.markSucceeded, 1) +} + +func TestUsageCleanupServiceExecuteTaskCanceled(t *testing.T) { + repo := &cleanupRepoStub{ + statusByID: map[int64]string{ + 3: UsageCleanupStatusCanceled, + }, + } + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, BatchSize: 2}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + task := &UsageCleanupTask{ + ID: 3, + Filters: UsageCleanupFilters{ + StartTime: time.Now().UTC(), + EndTime: time.Now().UTC().Add(time.Hour), + }, + } + + svc.executeTask(context.Background(), task) + + repo.mu.Lock() + defer repo.mu.Unlock() + require.Empty(t, repo.deleteCalls) + require.Empty(t, repo.markSucceeded) + require.Empty(t, repo.markFailed) +} + +func TestUsageCleanupServiceCancelTaskSuccess(t *testing.T) { + repo := &cleanupRepoStub{ + statusByID: map[int64]string{ + 5: UsageCleanupStatusPending, + }, + } + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + + err := svc.CancelTask(context.Background(), 5, 9) + require.NoError(t, err) + + repo.mu.Lock() + defer repo.mu.Unlock() + require.Equal(t, UsageCleanupStatusCanceled, repo.statusByID[5]) + require.Len(t, repo.cancelCalls, 1) +} + +func TestUsageCleanupServiceCancelTaskDisabled(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: false}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + + err := svc.CancelTask(context.Background(), 1, 2) + require.Error(t, err) + require.Equal(t, http.StatusServiceUnavailable, infraerrors.Code(err)) + require.Equal(t, "USAGE_CLEANUP_DISABLED", infraerrors.Reason(err)) +} + +func TestUsageCleanupServiceCancelTaskNotFound(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + + err := svc.CancelTask(context.Background(), 999, 1) + require.Error(t, err) + require.Equal(t, http.StatusNotFound, infraerrors.Code(err)) + require.Equal(t, "USAGE_CLEANUP_TASK_NOT_FOUND", infraerrors.Reason(err)) +} + +func TestUsageCleanupServiceCancelTaskStatusError(t *testing.T) { + repo := &cleanupRepoStub{statusErr: errors.New("status broken")} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + + err := svc.CancelTask(context.Background(), 7, 1) + require.Error(t, err) + require.Contains(t, err.Error(), "status broken") +} + +func TestUsageCleanupServiceCancelTaskConflict(t *testing.T) { + repo := &cleanupRepoStub{ + statusByID: map[int64]string{ + 7: UsageCleanupStatusSucceeded, + }, + } + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + + err := svc.CancelTask(context.Background(), 7, 1) + require.Error(t, err) + require.Equal(t, http.StatusConflict, infraerrors.Code(err)) + require.Equal(t, "USAGE_CLEANUP_CANCEL_CONFLICT", infraerrors.Reason(err)) +} + +func TestUsageCleanupServiceCancelTaskRepoConflict(t *testing.T) { + shouldCancel := false + repo := &cleanupRepoStub{ + statusByID: map[int64]string{ + 7: UsageCleanupStatusPending, + }, + cancelResult: &shouldCancel, + } + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + + err := svc.CancelTask(context.Background(), 7, 1) + require.Error(t, err) + require.Equal(t, http.StatusConflict, infraerrors.Code(err)) + require.Equal(t, "USAGE_CLEANUP_CANCEL_CONFLICT", infraerrors.Reason(err)) +} + +func TestUsageCleanupServiceCancelTaskRepoError(t *testing.T) { + repo := &cleanupRepoStub{ + statusByID: map[int64]string{ + 7: UsageCleanupStatusPending, + }, + cancelErr: errors.New("cancel failed"), + } + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + + err := svc.CancelTask(context.Background(), 7, 1) + require.Error(t, err) + require.Contains(t, err.Error(), "cancel failed") +} + +func TestUsageCleanupServiceCancelTaskInvalidCanceller(t *testing.T) { + repo := &cleanupRepoStub{ + statusByID: map[int64]string{ + 7: UsageCleanupStatusRunning, + }, + } + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + + err := svc.CancelTask(context.Background(), 7, 0) + require.Error(t, err) + require.Equal(t, "USAGE_CLEANUP_INVALID_CANCELLER", infraerrors.Reason(err)) +} + func TestUsageCleanupServiceListTasks(t *testing.T) { repo := &cleanupRepoStub{ listTasks: []UsageCleanupTask{{ID: 1}, {ID: 2}}, @@ -418,3 +769,47 @@ func TestSanitizeUsageCleanupFiltersModelEmpty(t *testing.T) { require.Nil(t, filters.GroupID) require.Nil(t, filters.Model) } + +func TestDescribeUsageCleanupFiltersAllFields(t *testing.T) { + start := time.Date(2024, 2, 1, 10, 0, 0, 0, time.UTC) + end := start.Add(2 * time.Hour) + userID := int64(1) + apiKeyID := int64(2) + accountID := int64(3) + groupID := int64(4) + model := " gpt-4 " + stream := true + billingType := int8(2) + filters := UsageCleanupFilters{ + StartTime: start, + EndTime: end, + UserID: &userID, + APIKeyID: &apiKeyID, + AccountID: &accountID, + GroupID: &groupID, + Model: &model, + Stream: &stream, + BillingType: &billingType, + } + + desc := describeUsageCleanupFilters(filters) + require.Equal(t, "start=2024-02-01T10:00:00Z end=2024-02-01T12:00:00Z user_id=1 api_key_id=2 account_id=3 group_id=4 model=gpt-4 stream=true billing_type=2", desc) +} + +func TestUsageCleanupServiceIsTaskCanceledNotFound(t *testing.T) { + repo := &cleanupRepoStub{} + svc := NewUsageCleanupService(repo, nil, nil, &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}}) + + canceled, err := svc.isTaskCanceled(context.Background(), 9) + require.NoError(t, err) + require.False(t, canceled) +} + +func TestUsageCleanupServiceIsTaskCanceledError(t *testing.T) { + repo := &cleanupRepoStub{statusErr: errors.New("status err")} + svc := NewUsageCleanupService(repo, nil, nil, &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}}) + + _, err := svc.isTaskCanceled(context.Background(), 9) + require.Error(t, err) + require.Contains(t, err.Error(), "status err") +} diff --git a/frontend/src/components/admin/usage/UsageCleanupDialog.vue b/frontend/src/components/admin/usage/UsageCleanupDialog.vue index 4cd562e8..91a43ecd 100644 --- a/frontend/src/components/admin/usage/UsageCleanupDialog.vue +++ b/frontend/src/components/admin/usage/UsageCleanupDialog.vue @@ -219,7 +219,7 @@ const loadTasks = async () => { if (!props.show) return tasksLoading.value = true try { - const res = await adminUsageAPI.listCleanupTasks({ page: 1, page_size: 10 }) + const res = await adminUsageAPI.listCleanupTasks({ page: 1, page_size: 5 }) tasks.value = res.items || [] } catch (error) { console.error('Failed to load cleanup tasks:', error) From 771baa66ee34812691b8a28047e702113aeada42 Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Sun, 18 Jan 2026 14:31:22 +0800 Subject: [PATCH 052/147] =?UTF-8?q?feat(=E7=95=8C=E9=9D=A2):=20=E4=BC=98?= =?UTF-8?q?=E5=8C=96=E5=88=86=E9=A1=B5=E8=B7=B3=E8=BD=AC=E4=B8=8E=E9=A1=B5?= =?UTF-8?q?=E5=A4=A7=E5=B0=8F=E6=98=BE=E7=A4=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 分页组件支持隐藏每页条数选择器并新增跳转页输入 清理任务列表启用跳转页并固定每页 5 条 补充中英文分页文案 --- .../admin/usage/UsageCleanupDialog.vue | 43 ++++++++++++++++++- frontend/src/components/common/Pagination.vue | 38 ++++++++++++++-- frontend/src/i18n/locales/en.ts | 5 ++- frontend/src/i18n/locales/zh.ts | 5 ++- 4 files changed, 85 insertions(+), 6 deletions(-) diff --git a/frontend/src/components/admin/usage/UsageCleanupDialog.vue b/frontend/src/components/admin/usage/UsageCleanupDialog.vue index 91a43ecd..d5e81e72 100644 --- a/frontend/src/components/admin/usage/UsageCleanupDialog.vue +++ b/frontend/src/components/admin/usage/UsageCleanupDialog.vue @@ -66,6 +66,19 @@ + + @@ -108,6 +121,7 @@ import { useI18n } from 'vue-i18n' import { useAppStore } from '@/stores/app' import BaseDialog from '@/components/common/BaseDialog.vue' import ConfirmDialog from '@/components/common/ConfirmDialog.vue' +import Pagination from '@/components/common/Pagination.vue' import UsageFilters from '@/components/admin/usage/UsageFilters.vue' import { adminUsageAPI } from '@/api/admin/usage' import type { AdminUsageQueryParams, UsageCleanupTask, CreateUsageCleanupTaskRequest } from '@/api/admin/usage' @@ -131,6 +145,9 @@ const localEndDate = ref('') const tasks = ref([]) const tasksLoading = ref(false) +const tasksPage = ref(1) +const tasksPageSize = ref(5) +const tasksTotal = ref(0) const submitting = ref(false) const confirmVisible = ref(false) const cancelConfirmVisible = ref(false) @@ -146,6 +163,8 @@ const resetFilters = () => { localEndDate.value = props.endDate localFilters.value.start_date = localStartDate.value localFilters.value.end_date = localEndDate.value + tasksPage.value = 1 + tasksTotal.value = 0 } const startPolling = () => { @@ -219,8 +238,18 @@ const loadTasks = async () => { if (!props.show) return tasksLoading.value = true try { - const res = await adminUsageAPI.listCleanupTasks({ page: 1, page_size: 5 }) + const res = await adminUsageAPI.listCleanupTasks({ + page: tasksPage.value, + page_size: tasksPageSize.value + }) tasks.value = res.items || [] + tasksTotal.value = res.total || 0 + if (res.page) { + tasksPage.value = res.page + } + if (res.page_size) { + tasksPageSize.value = res.page_size + } } catch (error) { console.error('Failed to load cleanup tasks:', error) appStore.showError(t('admin.usage.cleanup.loadFailed')) @@ -229,6 +258,18 @@ const loadTasks = async () => { } } +const handleTaskPageChange = (page: number) => { + tasksPage.value = page + loadTasks() +} + +const handleTaskPageSizeChange = (size: number) => { + if (!Number.isFinite(size) || size <= 0) return + tasksPageSize.value = size + tasksPage.value = 1 + loadTasks() +} + const openConfirm = () => { confirmVisible.value = true } diff --git a/frontend/src/components/common/Pagination.vue b/frontend/src/components/common/Pagination.vue index 728bc0d3..3365a186 100644 --- a/frontend/src/components/common/Pagination.vue +++ b/frontend/src/components/common/Pagination.vue @@ -37,7 +37,7 @@

-
+
{{ t('pagination.perPage') }}: @@ -49,6 +49,22 @@ />
+ +
+ {{ t('pagination.jumpTo') }} + + +
@@ -102,7 +118,7 @@ + + +
+
+

%s

+
+
+

密码重置请求

+

您已请求重置密码。请点击下方按钮设置新密码:

+ 重置密码 +
+

此链接将在 30 分钟后失效。

+

如果您没有请求重置密码,请忽略此邮件。您的密码将保持不变。

+
+ +
+ +
+ + +`, siteName, resetURL, resetURL) +} diff --git a/backend/internal/service/setting_service.go b/backend/internal/service/setting_service.go index d77dd30d..c2ef9a5d 100644 --- a/backend/internal/service/setting_service.go +++ b/backend/internal/service/setting_service.go @@ -61,6 +61,7 @@ func (s *SettingService) GetPublicSettings(ctx context.Context) (*PublicSettings SettingKeyRegistrationEnabled, SettingKeyEmailVerifyEnabled, SettingKeyPromoCodeEnabled, + SettingKeyPasswordResetEnabled, SettingKeyTurnstileEnabled, SettingKeyTurnstileSiteKey, SettingKeySiteName, @@ -86,21 +87,26 @@ func (s *SettingService) GetPublicSettings(ctx context.Context) (*PublicSettings linuxDoEnabled = s.cfg != nil && s.cfg.LinuxDo.Enabled } + // Password reset requires email verification to be enabled + emailVerifyEnabled := settings[SettingKeyEmailVerifyEnabled] == "true" + passwordResetEnabled := emailVerifyEnabled && settings[SettingKeyPasswordResetEnabled] == "true" + return &PublicSettings{ - RegistrationEnabled: settings[SettingKeyRegistrationEnabled] == "true", - EmailVerifyEnabled: settings[SettingKeyEmailVerifyEnabled] == "true", - PromoCodeEnabled: settings[SettingKeyPromoCodeEnabled] != "false", // 默认启用 - TurnstileEnabled: settings[SettingKeyTurnstileEnabled] == "true", - TurnstileSiteKey: settings[SettingKeyTurnstileSiteKey], - SiteName: s.getStringOrDefault(settings, SettingKeySiteName, "Sub2API"), - SiteLogo: settings[SettingKeySiteLogo], - SiteSubtitle: s.getStringOrDefault(settings, SettingKeySiteSubtitle, "Subscription to API Conversion Platform"), - APIBaseURL: settings[SettingKeyAPIBaseURL], - ContactInfo: settings[SettingKeyContactInfo], - DocURL: settings[SettingKeyDocURL], - HomeContent: settings[SettingKeyHomeContent], - HideCcsImportButton: settings[SettingKeyHideCcsImportButton] == "true", - LinuxDoOAuthEnabled: linuxDoEnabled, + RegistrationEnabled: settings[SettingKeyRegistrationEnabled] == "true", + EmailVerifyEnabled: emailVerifyEnabled, + PromoCodeEnabled: settings[SettingKeyPromoCodeEnabled] != "false", // 默认启用 + PasswordResetEnabled: passwordResetEnabled, + TurnstileEnabled: settings[SettingKeyTurnstileEnabled] == "true", + TurnstileSiteKey: settings[SettingKeyTurnstileSiteKey], + SiteName: s.getStringOrDefault(settings, SettingKeySiteName, "Sub2API"), + SiteLogo: settings[SettingKeySiteLogo], + SiteSubtitle: s.getStringOrDefault(settings, SettingKeySiteSubtitle, "Subscription to API Conversion Platform"), + APIBaseURL: settings[SettingKeyAPIBaseURL], + ContactInfo: settings[SettingKeyContactInfo], + DocURL: settings[SettingKeyDocURL], + HomeContent: settings[SettingKeyHomeContent], + HideCcsImportButton: settings[SettingKeyHideCcsImportButton] == "true", + LinuxDoOAuthEnabled: linuxDoEnabled, }, nil } @@ -125,37 +131,39 @@ func (s *SettingService) GetPublicSettingsForInjection(ctx context.Context) (any // Return a struct that matches the frontend's expected format return &struct { - RegistrationEnabled bool `json:"registration_enabled"` - EmailVerifyEnabled bool `json:"email_verify_enabled"` - PromoCodeEnabled bool `json:"promo_code_enabled"` - TurnstileEnabled bool `json:"turnstile_enabled"` - TurnstileSiteKey string `json:"turnstile_site_key,omitempty"` - SiteName string `json:"site_name"` - SiteLogo string `json:"site_logo,omitempty"` - SiteSubtitle string `json:"site_subtitle,omitempty"` - APIBaseURL string `json:"api_base_url,omitempty"` - ContactInfo string `json:"contact_info,omitempty"` - DocURL string `json:"doc_url,omitempty"` - HomeContent string `json:"home_content,omitempty"` - HideCcsImportButton bool `json:"hide_ccs_import_button"` - LinuxDoOAuthEnabled bool `json:"linuxdo_oauth_enabled"` - Version string `json:"version,omitempty"` + RegistrationEnabled bool `json:"registration_enabled"` + EmailVerifyEnabled bool `json:"email_verify_enabled"` + PromoCodeEnabled bool `json:"promo_code_enabled"` + PasswordResetEnabled bool `json:"password_reset_enabled"` + TurnstileEnabled bool `json:"turnstile_enabled"` + TurnstileSiteKey string `json:"turnstile_site_key,omitempty"` + SiteName string `json:"site_name"` + SiteLogo string `json:"site_logo,omitempty"` + SiteSubtitle string `json:"site_subtitle,omitempty"` + APIBaseURL string `json:"api_base_url,omitempty"` + ContactInfo string `json:"contact_info,omitempty"` + DocURL string `json:"doc_url,omitempty"` + HomeContent string `json:"home_content,omitempty"` + HideCcsImportButton bool `json:"hide_ccs_import_button"` + LinuxDoOAuthEnabled bool `json:"linuxdo_oauth_enabled"` + Version string `json:"version,omitempty"` }{ - RegistrationEnabled: settings.RegistrationEnabled, - EmailVerifyEnabled: settings.EmailVerifyEnabled, - PromoCodeEnabled: settings.PromoCodeEnabled, - TurnstileEnabled: settings.TurnstileEnabled, - TurnstileSiteKey: settings.TurnstileSiteKey, - SiteName: settings.SiteName, - SiteLogo: settings.SiteLogo, - SiteSubtitle: settings.SiteSubtitle, - APIBaseURL: settings.APIBaseURL, - ContactInfo: settings.ContactInfo, - DocURL: settings.DocURL, - HomeContent: settings.HomeContent, - HideCcsImportButton: settings.HideCcsImportButton, - LinuxDoOAuthEnabled: settings.LinuxDoOAuthEnabled, - Version: s.version, + RegistrationEnabled: settings.RegistrationEnabled, + EmailVerifyEnabled: settings.EmailVerifyEnabled, + PromoCodeEnabled: settings.PromoCodeEnabled, + PasswordResetEnabled: settings.PasswordResetEnabled, + TurnstileEnabled: settings.TurnstileEnabled, + TurnstileSiteKey: settings.TurnstileSiteKey, + SiteName: settings.SiteName, + SiteLogo: settings.SiteLogo, + SiteSubtitle: settings.SiteSubtitle, + APIBaseURL: settings.APIBaseURL, + ContactInfo: settings.ContactInfo, + DocURL: settings.DocURL, + HomeContent: settings.HomeContent, + HideCcsImportButton: settings.HideCcsImportButton, + LinuxDoOAuthEnabled: settings.LinuxDoOAuthEnabled, + Version: s.version, }, nil } @@ -167,6 +175,7 @@ func (s *SettingService) UpdateSettings(ctx context.Context, settings *SystemSet updates[SettingKeyRegistrationEnabled] = strconv.FormatBool(settings.RegistrationEnabled) updates[SettingKeyEmailVerifyEnabled] = strconv.FormatBool(settings.EmailVerifyEnabled) updates[SettingKeyPromoCodeEnabled] = strconv.FormatBool(settings.PromoCodeEnabled) + updates[SettingKeyPasswordResetEnabled] = strconv.FormatBool(settings.PasswordResetEnabled) // 邮件服务设置(只有非空才更新密码) updates[SettingKeySMTPHost] = settings.SMTPHost @@ -262,6 +271,20 @@ func (s *SettingService) IsPromoCodeEnabled(ctx context.Context) bool { return value != "false" } +// IsPasswordResetEnabled 检查是否启用密码重置功能 +// 要求:必须同时开启邮件验证 +func (s *SettingService) IsPasswordResetEnabled(ctx context.Context) bool { + // Password reset requires email verification to be enabled + if !s.IsEmailVerifyEnabled(ctx) { + return false + } + value, err := s.settingRepo.GetValue(ctx, SettingKeyPasswordResetEnabled) + if err != nil { + return false // 默认关闭 + } + return value == "true" +} + // GetSiteName 获取网站名称 func (s *SettingService) GetSiteName(ctx context.Context) string { value, err := s.settingRepo.GetValue(ctx, SettingKeySiteName) @@ -340,10 +363,12 @@ func (s *SettingService) InitializeDefaultSettings(ctx context.Context) error { // parseSettings 解析设置到结构体 func (s *SettingService) parseSettings(settings map[string]string) *SystemSettings { + emailVerifyEnabled := settings[SettingKeyEmailVerifyEnabled] == "true" result := &SystemSettings{ RegistrationEnabled: settings[SettingKeyRegistrationEnabled] == "true", - EmailVerifyEnabled: settings[SettingKeyEmailVerifyEnabled] == "true", + EmailVerifyEnabled: emailVerifyEnabled, PromoCodeEnabled: settings[SettingKeyPromoCodeEnabled] != "false", // 默认启用 + PasswordResetEnabled: emailVerifyEnabled && settings[SettingKeyPasswordResetEnabled] == "true", SMTPHost: settings[SettingKeySMTPHost], SMTPUsername: settings[SettingKeySMTPUsername], SMTPFrom: settings[SettingKeySMTPFrom], diff --git a/backend/internal/service/settings_view.go b/backend/internal/service/settings_view.go index 919344e5..427e64d2 100644 --- a/backend/internal/service/settings_view.go +++ b/backend/internal/service/settings_view.go @@ -1,9 +1,10 @@ package service type SystemSettings struct { - RegistrationEnabled bool - EmailVerifyEnabled bool - PromoCodeEnabled bool + RegistrationEnabled bool + EmailVerifyEnabled bool + PromoCodeEnabled bool + PasswordResetEnabled bool SMTPHost string SMTPPort int @@ -57,21 +58,22 @@ type SystemSettings struct { } type PublicSettings struct { - RegistrationEnabled bool - EmailVerifyEnabled bool - PromoCodeEnabled bool - TurnstileEnabled bool - TurnstileSiteKey string - SiteName string - SiteLogo string - SiteSubtitle string - APIBaseURL string - ContactInfo string - DocURL string - HomeContent string - HideCcsImportButton bool - LinuxDoOAuthEnabled bool - Version string + RegistrationEnabled bool + EmailVerifyEnabled bool + PromoCodeEnabled bool + PasswordResetEnabled bool + TurnstileEnabled bool + TurnstileSiteKey string + SiteName string + SiteLogo string + SiteSubtitle string + APIBaseURL string + ContactInfo string + DocURL string + HomeContent string + HideCcsImportButton bool + LinuxDoOAuthEnabled bool + Version string } // StreamTimeoutSettings 流超时处理配置(仅控制超时后的处理方式,超时判定由网关配置控制) diff --git a/frontend/src/api/admin/settings.ts b/frontend/src/api/admin/settings.ts index 6e2ade00..76624bb9 100644 --- a/frontend/src/api/admin/settings.ts +++ b/frontend/src/api/admin/settings.ts @@ -13,6 +13,7 @@ export interface SystemSettings { registration_enabled: boolean email_verify_enabled: boolean promo_code_enabled: boolean + password_reset_enabled: boolean // Default settings default_balance: number default_concurrency: number @@ -66,6 +67,7 @@ export interface UpdateSettingsRequest { registration_enabled?: boolean email_verify_enabled?: boolean promo_code_enabled?: boolean + password_reset_enabled?: boolean default_balance?: number default_concurrency?: number site_name?: string diff --git a/frontend/src/api/auth.ts b/frontend/src/api/auth.ts index fddc23ef..8c669c3e 100644 --- a/frontend/src/api/auth.ts +++ b/frontend/src/api/auth.ts @@ -133,6 +133,57 @@ export async function validatePromoCode(code: string): Promise { + const { data } = await apiClient.post('/auth/forgot-password', request) + return data +} + +/** + * Reset password request + */ +export interface ResetPasswordRequest { + email: string + token: string + new_password: string +} + +/** + * Reset password response + */ +export interface ResetPasswordResponse { + message: string +} + +/** + * Reset password with token + * @param request - Email, token, and new password + * @returns Response with message + */ +export async function resetPassword(request: ResetPasswordRequest): Promise { + const { data } = await apiClient.post('/auth/reset-password', request) + return data +} + export const authAPI = { login, register, @@ -144,7 +195,9 @@ export const authAPI = { clearAuthToken, getPublicSettings, sendVerifyCode, - validatePromoCode + validatePromoCode, + forgotPassword, + resetPassword } export default authAPI diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index e293491b..1880e4cc 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -271,7 +271,36 @@ export default { code: 'Code', state: 'State', fullUrl: 'Full URL' - } + }, + // Forgot password + forgotPassword: 'Forgot password?', + forgotPasswordTitle: 'Reset Your Password', + forgotPasswordHint: 'Enter your email address and we will send you a link to reset your password.', + sendResetLink: 'Send Reset Link', + sendingResetLink: 'Sending...', + sendResetLinkFailed: 'Failed to send reset link. Please try again.', + resetEmailSent: 'Reset Link Sent', + resetEmailSentHint: 'If an account exists with this email, you will receive a password reset link shortly. Please check your inbox and spam folder.', + backToLogin: 'Back to Login', + rememberedPassword: 'Remembered your password?', + // Reset password + resetPasswordTitle: 'Set New Password', + resetPasswordHint: 'Enter your new password below.', + newPassword: 'New Password', + newPasswordPlaceholder: 'Enter your new password', + confirmPassword: 'Confirm Password', + confirmPasswordPlaceholder: 'Confirm your new password', + confirmPasswordRequired: 'Please confirm your password', + passwordsDoNotMatch: 'Passwords do not match', + resetPassword: 'Reset Password', + resettingPassword: 'Resetting...', + resetPasswordFailed: 'Failed to reset password. Please try again.', + passwordResetSuccess: 'Password Reset Successful', + passwordResetSuccessHint: 'Your password has been reset. You can now sign in with your new password.', + invalidResetLink: 'Invalid Reset Link', + invalidResetLinkHint: 'This password reset link is invalid or has expired. Please request a new one.', + requestNewResetLink: 'Request New Reset Link', + invalidOrExpiredToken: 'The password reset link is invalid or has expired. Please request a new one.' }, // Dashboard @@ -2743,7 +2772,9 @@ export default { emailVerification: 'Email Verification', emailVerificationHint: 'Require email verification for new registrations', promoCode: 'Promo Code', - promoCodeHint: 'Allow users to use promo codes during registration' + promoCodeHint: 'Allow users to use promo codes during registration', + passwordReset: 'Password Reset', + passwordResetHint: 'Allow users to reset their password via email' }, turnstile: { title: 'Cloudflare Turnstile', diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index dbeb3819..49b24abb 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -268,7 +268,36 @@ export default { code: '授权码', state: '状态', fullUrl: '完整URL' - } + }, + // 忘记密码 + forgotPassword: '忘记密码?', + forgotPasswordTitle: '重置密码', + forgotPasswordHint: '输入您的邮箱地址,我们将向您发送密码重置链接。', + sendResetLink: '发送重置链接', + sendingResetLink: '发送中...', + sendResetLinkFailed: '发送重置链接失败,请重试。', + resetEmailSent: '重置链接已发送', + resetEmailSentHint: '如果该邮箱已注册,您将很快收到密码重置链接。请检查您的收件箱和垃圾邮件文件夹。', + backToLogin: '返回登录', + rememberedPassword: '想起密码了?', + // 重置密码 + resetPasswordTitle: '设置新密码', + resetPasswordHint: '请在下方输入您的新密码。', + newPassword: '新密码', + newPasswordPlaceholder: '输入新密码', + confirmPassword: '确认密码', + confirmPasswordPlaceholder: '再次输入新密码', + confirmPasswordRequired: '请确认您的密码', + passwordsDoNotMatch: '两次输入的密码不一致', + resetPassword: '重置密码', + resettingPassword: '重置中...', + resetPasswordFailed: '重置密码失败,请重试。', + passwordResetSuccess: '密码重置成功', + passwordResetSuccessHint: '您的密码已重置。现在可以使用新密码登录。', + invalidResetLink: '无效的重置链接', + invalidResetLinkHint: '此密码重置链接无效或已过期。请重新请求一个新链接。', + requestNewResetLink: '请求新的重置链接', + invalidOrExpiredToken: '密码重置链接无效或已过期。请重新请求一个新链接。' }, // Dashboard @@ -2896,7 +2925,9 @@ export default { emailVerification: '邮箱验证', emailVerificationHint: '新用户注册时需要验证邮箱', promoCode: '优惠码', - promoCodeHint: '允许用户在注册时使用优惠码' + promoCodeHint: '允许用户在注册时使用优惠码', + passwordReset: '忘记密码', + passwordResetHint: '允许用户通过邮箱重置密码' }, turnstile: { title: 'Cloudflare Turnstile', diff --git a/frontend/src/router/index.ts b/frontend/src/router/index.ts index 96127063..31c489b4 100644 --- a/frontend/src/router/index.ts +++ b/frontend/src/router/index.ts @@ -79,6 +79,24 @@ const routes: RouteRecordRaw[] = [ title: 'LinuxDo OAuth Callback' } }, + { + path: '/forgot-password', + name: 'ForgotPassword', + component: () => import('@/views/auth/ForgotPasswordView.vue'), + meta: { + requiresAuth: false, + title: 'Forgot Password' + } + }, + { + path: '/reset-password', + name: 'ResetPassword', + component: () => import('@/views/auth/ResetPasswordView.vue'), + meta: { + requiresAuth: false, + title: 'Reset Password' + } + }, // ==================== User Routes ==================== { diff --git a/frontend/src/stores/app.ts b/frontend/src/stores/app.ts index 9c4db599..c5a1ffc6 100644 --- a/frontend/src/stores/app.ts +++ b/frontend/src/stores/app.ts @@ -313,6 +313,7 @@ export const useAppStore = defineStore('app', () => { registration_enabled: false, email_verify_enabled: false, promo_code_enabled: true, + password_reset_enabled: false, turnstile_enabled: false, turnstile_site_key: '', site_name: siteName.value, diff --git a/frontend/src/types/index.ts b/frontend/src/types/index.ts index 37c9f030..6f4b3e50 100644 --- a/frontend/src/types/index.ts +++ b/frontend/src/types/index.ts @@ -71,6 +71,7 @@ export interface PublicSettings { registration_enabled: boolean email_verify_enabled: boolean promo_code_enabled: boolean + password_reset_enabled: boolean turnstile_enabled: boolean turnstile_site_key: string site_name: string diff --git a/frontend/src/views/admin/SettingsView.vue b/frontend/src/views/admin/SettingsView.vue index 7ebca114..ed095254 100644 --- a/frontend/src/views/admin/SettingsView.vue +++ b/frontend/src/views/admin/SettingsView.vue @@ -338,6 +338,22 @@ + + +
+
+ +

+ {{ t('admin.settings.registration.passwordResetHint') }} +

+
+ +
@@ -1029,6 +1045,7 @@ const form = reactive({ registration_enabled: true, email_verify_enabled: false, promo_code_enabled: true, + password_reset_enabled: false, default_balance: 0, default_concurrency: 1, site_name: 'Sub2API', @@ -1152,6 +1169,7 @@ async function saveSettings() { registration_enabled: form.registration_enabled, email_verify_enabled: form.email_verify_enabled, promo_code_enabled: form.promo_code_enabled, + password_reset_enabled: form.password_reset_enabled, default_balance: form.default_balance, default_concurrency: form.default_concurrency, site_name: form.site_name, diff --git a/frontend/src/views/auth/ForgotPasswordView.vue b/frontend/src/views/auth/ForgotPasswordView.vue new file mode 100644 index 00000000..22799a9a --- /dev/null +++ b/frontend/src/views/auth/ForgotPasswordView.vue @@ -0,0 +1,297 @@ + + + + + diff --git a/frontend/src/views/auth/LoginView.vue b/frontend/src/views/auth/LoginView.vue index 6e6cee27..3232048a 100644 --- a/frontend/src/views/auth/LoginView.vue +++ b/frontend/src/views/auth/LoginView.vue @@ -72,9 +72,19 @@ -

- {{ errors.password }} -

+
+

+ {{ errors.password }} +

+ + + {{ t('auth.forgotPassword') }} + +
@@ -184,6 +194,7 @@ const showPassword = ref(false) const turnstileEnabled = ref(false) const turnstileSiteKey = ref('') const linuxdoOAuthEnabled = ref(false) +const passwordResetEnabled = ref(false) // Turnstile const turnstileRef = ref | null>(null) @@ -216,6 +227,7 @@ onMounted(async () => { turnstileEnabled.value = settings.turnstile_enabled turnstileSiteKey.value = settings.turnstile_site_key || '' linuxdoOAuthEnabled.value = settings.linuxdo_oauth_enabled + passwordResetEnabled.value = settings.password_reset_enabled } catch (error) { console.error('Failed to load public settings:', error) } diff --git a/frontend/src/views/auth/ResetPasswordView.vue b/frontend/src/views/auth/ResetPasswordView.vue new file mode 100644 index 00000000..94c153e1 --- /dev/null +++ b/frontend/src/views/auth/ResetPasswordView.vue @@ -0,0 +1,355 @@ + + + + + From 716272a1e2c534bf3d4cbde20c39d2d90acd6de5 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Sat, 24 Jan 2026 23:04:48 +0800 Subject: [PATCH 112/147] =?UTF-8?q?fix(oauth):=20=E5=BD=BB=E5=BA=95?= =?UTF-8?q?=E4=BF=AE=E5=A4=8D=20project=5Fid=20=E4=B8=A2=E5=A4=B1=E9=97=AE?= =?UTF-8?q?=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 根本原因: - BuildAccountCredentials 只在 project_id 非空时才添加该字段 - LoadCodeAssist 失败时返回空字符串 → 新 credentials 不包含 project_id 键 - 普通合并逻辑只保留新 credentials 中不存在的键,无法覆盖空值 解决方案: 1. 在合并后特殊处理 project_id:如果新值为空但旧值非空,保留旧值 2. LoadCodeAssist 失败不再返回错误,只记录警告 3. Token 刷新成功(access_token 已更新)就不应标记账户为 error 改进效果: - 即使 LoadCodeAssist 连续失败,已有的 project_id 也不会丢失 - 避免因临时网络问题将账户误标记为不可用 - 允许在下次刷新时自动重试获取 project_id Co-Authored-By: Claude Sonnet 4.5 --- .../internal/handler/admin/account_handler.go | 23 +++++++++++-------- .../service/antigravity_token_refresher.go | 22 +++++++++++++----- 2 files changed, 30 insertions(+), 15 deletions(-) diff --git a/backend/internal/handler/admin/account_handler.go b/backend/internal/handler/admin/account_handler.go index 188aa0ec..bbf5d026 100644 --- a/backend/internal/handler/admin/account_handler.go +++ b/backend/internal/handler/admin/account_handler.go @@ -547,9 +547,18 @@ func (h *AccountHandler) Refresh(c *gin.Context) { } } - // 如果 project_id 获取失败,先更新凭证,再标记账户为 error + // 特殊处理 project_id:如果新值为空但旧值非空,保留旧值 + // 这确保了即使 LoadCodeAssist 失败,project_id 也不会丢失 + if newProjectID, _ := newCredentials["project_id"].(string); newProjectID == "" { + if oldProjectID := strings.TrimSpace(account.GetCredential("project_id")); oldProjectID != "" { + newCredentials["project_id"] = oldProjectID + } + } + + // 如果 project_id 获取失败,更新凭证但不标记为 error + // LoadCodeAssist 失败可能是临时网络问题,给它机会在下次自动刷新时重试 if tokenInfo.ProjectIDMissing { - // 先更新凭证 + // 先更新凭证(token 本身刷新成功了) _, updateErr := h.adminService.UpdateAccount(c.Request.Context(), accountID, &service.UpdateAccountInput{ Credentials: newCredentials, }) @@ -557,14 +566,10 @@ func (h *AccountHandler) Refresh(c *gin.Context) { response.InternalError(c, "Failed to update credentials: "+updateErr.Error()) return } - // 标记账户为 error - if setErr := h.adminService.SetAccountError(c.Request.Context(), accountID, "missing_project_id: 账户缺少project id,可能无法使用Antigravity"); setErr != nil { - response.InternalError(c, "Failed to set account error: "+setErr.Error()) - return - } + // 不标记为 error,只返回警告信息 response.Success(c, gin.H{ - "message": "Token refreshed but project_id is missing, account marked as error", - "warning": "missing_project_id", + "message": "Token refreshed successfully, but project_id could not be retrieved (will retry automatically)", + "warning": "missing_project_id_temporary", }) return } diff --git a/backend/internal/service/antigravity_token_refresher.go b/backend/internal/service/antigravity_token_refresher.go index 2beb9e84..e33f88d0 100644 --- a/backend/internal/service/antigravity_token_refresher.go +++ b/backend/internal/service/antigravity_token_refresher.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "log" + "strings" "time" ) @@ -56,22 +57,31 @@ func (r *AntigravityTokenRefresher) Refresh(ctx context.Context, account *Accoun } newCredentials := r.antigravityOAuthService.BuildAccountCredentials(tokenInfo) + // 合并旧的 credentials,保留新 credentials 中不存在的字段 for k, v := range account.Credentials { if _, exists := newCredentials[k]; !exists { newCredentials[k] = v } } - // 如果 project_id 获取失败但之前有 project_id,不返回错误(只是临时网络故障) - // 只有真正缺失 project_id(从未获取过)时才返回错误 + // 特殊处理 project_id:如果新值为空但旧值非空,保留旧值 + // 这确保了即使 LoadCodeAssist 失败,project_id 也不会丢失 + if newProjectID, _ := newCredentials["project_id"].(string); newProjectID == "" { + if oldProjectID := strings.TrimSpace(account.GetCredential("project_id")); oldProjectID != "" { + newCredentials["project_id"] = oldProjectID + } + } + + // 如果 project_id 获取失败,只记录警告,不返回错误 + // LoadCodeAssist 失败可能是临时网络问题,应该允许重试而不是立即标记为不可重试错误 + // Token 刷新本身是成功的(access_token 和 refresh_token 已更新) if tokenInfo.ProjectIDMissing { - // 检查是否保留了旧的 project_id if tokenInfo.ProjectID != "" { - // 有旧的 project_id,只是本次获取失败,记录警告但不返回错误 + // 有旧的 project_id,本次获取失败,保留旧值 log.Printf("[AntigravityTokenRefresher] Account %d: LoadCodeAssist 临时失败,保留旧 project_id", account.ID) } else { - // 真正缺失 project_id,返回错误 - return newCredentials, fmt.Errorf("missing_project_id: 账户缺少project id,可能无法使用Antigravity") + // 从未获取过 project_id,本次也失败,但不返回错误以允许下次重试 + log.Printf("[AntigravityTokenRefresher] Account %d: LoadCodeAssist 失败,project_id 缺失,但 token 已更新,将在下次刷新时重试", account.ID) } } From 4ded9e7d49f50de57f531c54ff5eff05f90d3169 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Sat, 24 Jan 2026 23:41:36 +0800 Subject: [PATCH 113/147] =?UTF-8?q?fix(oauth):=20=E4=B8=BA=E5=88=9D?= =?UTF-8?q?=E5=A7=8B=20OAuth=20=E6=8E=88=E6=9D=83=E6=B7=BB=E5=8A=A0=20Load?= =?UTF-8?q?CodeAssist=20=E9=87=8D=E8=AF=95=E6=9C=BA=E5=88=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 问题: - 初始授权时 LoadCodeAssist 没有重试机制,失败后直接跳过 - 导致账号创建时就可能缺失 project_id - 之后每次刷新都因为 missing_project_id 报错 修复: - 统一使用 loadProjectIDWithRetry 方法(最多4次尝试) - 初始授权和token刷新使用相同的重试策略 - 保留原注释说明部分账户可能没有 project_id Co-Authored-By: Claude Sonnet 4.5 --- .../internal/service/antigravity_oauth_service.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/backend/internal/service/antigravity_oauth_service.go b/backend/internal/service/antigravity_oauth_service.go index 3850fa57..fa8379ed 100644 --- a/backend/internal/service/antigravity_oauth_service.go +++ b/backend/internal/service/antigravity_oauth_service.go @@ -142,12 +142,13 @@ func (s *AntigravityOAuthService) ExchangeCode(ctx context.Context, input *Antig result.Email = userInfo.Email } - // 获取 project_id(部分账户类型可能没有) - loadResp, _, err := client.LoadCodeAssist(ctx, tokenResp.AccessToken) - if err != nil { - fmt.Printf("[AntigravityOAuth] 警告: 获取 project_id 失败: %v\n", err) - } else if loadResp != nil && loadResp.CloudAICompanionProject != "" { - result.ProjectID = loadResp.CloudAICompanionProject + // 获取 project_id(部分账户类型可能没有),失败时重试 + projectID, loadErr := s.loadProjectIDWithRetry(ctx, tokenResp.AccessToken, proxyURL, 3) + if loadErr != nil { + fmt.Printf("[AntigravityOAuth] 警告: 获取 project_id 失败(重试后): %v\n", loadErr) + result.ProjectIDMissing = true + } else { + result.ProjectID = projectID } return result, nil From 74e05b83eaa175029cc2a25a154bc750045aba99 Mon Sep 17 00:00:00 2001 From: shaw Date: Sun, 25 Jan 2026 13:32:08 +0800 Subject: [PATCH 114/147] =?UTF-8?q?fix(ratelimit):=20=E4=BF=AE=E5=A4=8D=20?= =?UTF-8?q?OpenAI=20=E8=B4=A6=E5=8F=B7=E9=99=90=E6=B5=81=E5=80=92=E8=AE=A1?= =?UTF-8?q?=E6=97=B6=E8=AE=A1=E7=AE=97=E9=94=99=E8=AF=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 解析 x-codex-* 响应头获取正确的重置时间 - 7d 限制用尽时使用 codex_7d_reset_after_seconds - 提取 Normalize() 方法统一窗口规范化逻辑 --- .../service/openai_gateway_service.go | 207 +++++----- backend/internal/service/ratelimit_service.go | 61 +++ .../service/ratelimit_service_openai_test.go | 364 ++++++++++++++++++ 3 files changed, 531 insertions(+), 101 deletions(-) create mode 100644 backend/internal/service/ratelimit_service_openai_test.go diff --git a/backend/internal/service/openai_gateway_service.go b/backend/internal/service/openai_gateway_service.go index 65ba01b3..289a13af 100644 --- a/backend/internal/service/openai_gateway_service.go +++ b/backend/internal/service/openai_gateway_service.go @@ -60,6 +60,92 @@ type OpenAICodexUsageSnapshot struct { UpdatedAt string `json:"updated_at,omitempty"` } +// NormalizedCodexLimits contains normalized 5h/7d rate limit data +type NormalizedCodexLimits struct { + Used5hPercent *float64 + Reset5hSeconds *int + Window5hMinutes *int + Used7dPercent *float64 + Reset7dSeconds *int + Window7dMinutes *int +} + +// Normalize converts primary/secondary fields to canonical 5h/7d fields. +// Strategy: Compare window_minutes to determine which is 5h vs 7d. +// Returns nil if snapshot is nil or has no useful data. +func (s *OpenAICodexUsageSnapshot) Normalize() *NormalizedCodexLimits { + if s == nil { + return nil + } + + result := &NormalizedCodexLimits{} + + primaryMins := 0 + secondaryMins := 0 + hasPrimaryWindow := false + hasSecondaryWindow := false + + if s.PrimaryWindowMinutes != nil { + primaryMins = *s.PrimaryWindowMinutes + hasPrimaryWindow = true + } + if s.SecondaryWindowMinutes != nil { + secondaryMins = *s.SecondaryWindowMinutes + hasSecondaryWindow = true + } + + // Determine mapping based on window_minutes + use5hFromPrimary := false + use7dFromPrimary := false + + if hasPrimaryWindow && hasSecondaryWindow { + // Both known: smaller window is 5h, larger is 7d + if primaryMins < secondaryMins { + use5hFromPrimary = true + } else { + use7dFromPrimary = true + } + } else if hasPrimaryWindow { + // Only primary known: classify by threshold (<=360 min = 6h -> 5h window) + if primaryMins <= 360 { + use5hFromPrimary = true + } else { + use7dFromPrimary = true + } + } else if hasSecondaryWindow { + // Only secondary known: classify by threshold + if secondaryMins <= 360 { + // 5h from secondary, so primary (if any data) is 7d + use7dFromPrimary = true + } else { + // 7d from secondary, so primary (if any data) is 5h + use5hFromPrimary = true + } + } else { + // No window_minutes: fall back to legacy assumption (primary=7d, secondary=5h) + use7dFromPrimary = true + } + + // Assign values + if use5hFromPrimary { + result.Used5hPercent = s.PrimaryUsedPercent + result.Reset5hSeconds = s.PrimaryResetAfterSeconds + result.Window5hMinutes = s.PrimaryWindowMinutes + result.Used7dPercent = s.SecondaryUsedPercent + result.Reset7dSeconds = s.SecondaryResetAfterSeconds + result.Window7dMinutes = s.SecondaryWindowMinutes + } else if use7dFromPrimary { + result.Used7dPercent = s.PrimaryUsedPercent + result.Reset7dSeconds = s.PrimaryResetAfterSeconds + result.Window7dMinutes = s.PrimaryWindowMinutes + result.Used5hPercent = s.SecondaryUsedPercent + result.Reset5hSeconds = s.SecondaryResetAfterSeconds + result.Window5hMinutes = s.SecondaryWindowMinutes + } + + return result +} + // OpenAIUsage represents OpenAI API response usage type OpenAIUsage struct { InputTokens int `json:"input_tokens"` @@ -867,7 +953,7 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco // Extract and save Codex usage snapshot from response headers (for OAuth accounts) if account.Type == AccountTypeOAuth { - if snapshot := extractCodexUsageHeaders(resp.Header); snapshot != nil { + if snapshot := ParseCodexRateLimitHeaders(resp.Header); snapshot != nil { s.updateCodexUsageSnapshot(ctx, account.ID, snapshot) } } @@ -1665,8 +1751,9 @@ func (s *OpenAIGatewayService) RecordUsage(ctx context.Context, input *OpenAIRec return nil } -// extractCodexUsageHeaders extracts Codex usage limits from response headers -func extractCodexUsageHeaders(headers http.Header) *OpenAICodexUsageSnapshot { +// ParseCodexRateLimitHeaders extracts Codex usage limits from response headers. +// Exported for use in ratelimit_service when handling OpenAI 429 responses. +func ParseCodexRateLimitHeaders(headers http.Header) *OpenAICodexUsageSnapshot { snapshot := &OpenAICodexUsageSnapshot{} hasData := false @@ -1740,6 +1827,8 @@ func (s *OpenAIGatewayService) updateCodexUsageSnapshot(ctx context.Context, acc // Convert snapshot to map for merging into Extra updates := make(map[string]any) + + // Save raw primary/secondary fields for debugging/tracing if snapshot.PrimaryUsedPercent != nil { updates["codex_primary_used_percent"] = *snapshot.PrimaryUsedPercent } @@ -1763,109 +1852,25 @@ func (s *OpenAIGatewayService) updateCodexUsageSnapshot(ctx context.Context, acc } updates["codex_usage_updated_at"] = snapshot.UpdatedAt - // Normalize to canonical 5h/7d fields based on window_minutes - // This fixes the issue where OpenAI's primary/secondary naming is reversed - // Strategy: Compare the two windows and assign the smaller one to 5h, larger one to 7d - - // IMPORTANT: We can only reliably determine window type from window_minutes field - // The reset_after_seconds is remaining time, not window size, so it cannot be used for comparison - - var primaryWindowMins, secondaryWindowMins int - var hasPrimaryWindow, hasSecondaryWindow bool - - // Only use window_minutes for reliable window size comparison - if snapshot.PrimaryWindowMinutes != nil { - primaryWindowMins = *snapshot.PrimaryWindowMinutes - hasPrimaryWindow = true - } - - if snapshot.SecondaryWindowMinutes != nil { - secondaryWindowMins = *snapshot.SecondaryWindowMinutes - hasSecondaryWindow = true - } - - // Determine which is 5h and which is 7d - var use5hFromPrimary, use7dFromPrimary bool - var use5hFromSecondary, use7dFromSecondary bool - - if hasPrimaryWindow && hasSecondaryWindow { - // Both window sizes known: compare and assign smaller to 5h, larger to 7d - if primaryWindowMins < secondaryWindowMins { - use5hFromPrimary = true - use7dFromSecondary = true - } else { - use5hFromSecondary = true - use7dFromPrimary = true + // Normalize to canonical 5h/7d fields + if normalized := snapshot.Normalize(); normalized != nil { + if normalized.Used5hPercent != nil { + updates["codex_5h_used_percent"] = *normalized.Used5hPercent } - } else if hasPrimaryWindow { - // Only primary window size known: classify by absolute threshold - if primaryWindowMins <= 360 { - use5hFromPrimary = true - } else { - use7dFromPrimary = true + if normalized.Reset5hSeconds != nil { + updates["codex_5h_reset_after_seconds"] = *normalized.Reset5hSeconds } - } else if hasSecondaryWindow { - // Only secondary window size known: classify by absolute threshold - if secondaryWindowMins <= 360 { - use5hFromSecondary = true - } else { - use7dFromSecondary = true + if normalized.Window5hMinutes != nil { + updates["codex_5h_window_minutes"] = *normalized.Window5hMinutes } - } else { - // No window_minutes available: cannot reliably determine window types - // Fall back to legacy assumption (may be incorrect) - // Assume primary=7d, secondary=5h based on historical observation - if snapshot.SecondaryUsedPercent != nil || snapshot.SecondaryResetAfterSeconds != nil || snapshot.SecondaryWindowMinutes != nil { - use5hFromSecondary = true + if normalized.Used7dPercent != nil { + updates["codex_7d_used_percent"] = *normalized.Used7dPercent } - if snapshot.PrimaryUsedPercent != nil || snapshot.PrimaryResetAfterSeconds != nil || snapshot.PrimaryWindowMinutes != nil { - use7dFromPrimary = true + if normalized.Reset7dSeconds != nil { + updates["codex_7d_reset_after_seconds"] = *normalized.Reset7dSeconds } - } - - // Write canonical 5h fields - if use5hFromPrimary { - if snapshot.PrimaryUsedPercent != nil { - updates["codex_5h_used_percent"] = *snapshot.PrimaryUsedPercent - } - if snapshot.PrimaryResetAfterSeconds != nil { - updates["codex_5h_reset_after_seconds"] = *snapshot.PrimaryResetAfterSeconds - } - if snapshot.PrimaryWindowMinutes != nil { - updates["codex_5h_window_minutes"] = *snapshot.PrimaryWindowMinutes - } - } else if use5hFromSecondary { - if snapshot.SecondaryUsedPercent != nil { - updates["codex_5h_used_percent"] = *snapshot.SecondaryUsedPercent - } - if snapshot.SecondaryResetAfterSeconds != nil { - updates["codex_5h_reset_after_seconds"] = *snapshot.SecondaryResetAfterSeconds - } - if snapshot.SecondaryWindowMinutes != nil { - updates["codex_5h_window_minutes"] = *snapshot.SecondaryWindowMinutes - } - } - - // Write canonical 7d fields - if use7dFromPrimary { - if snapshot.PrimaryUsedPercent != nil { - updates["codex_7d_used_percent"] = *snapshot.PrimaryUsedPercent - } - if snapshot.PrimaryResetAfterSeconds != nil { - updates["codex_7d_reset_after_seconds"] = *snapshot.PrimaryResetAfterSeconds - } - if snapshot.PrimaryWindowMinutes != nil { - updates["codex_7d_window_minutes"] = *snapshot.PrimaryWindowMinutes - } - } else if use7dFromSecondary { - if snapshot.SecondaryUsedPercent != nil { - updates["codex_7d_used_percent"] = *snapshot.SecondaryUsedPercent - } - if snapshot.SecondaryResetAfterSeconds != nil { - updates["codex_7d_reset_after_seconds"] = *snapshot.SecondaryResetAfterSeconds - } - if snapshot.SecondaryWindowMinutes != nil { - updates["codex_7d_window_minutes"] = *snapshot.SecondaryWindowMinutes + if normalized.Window7dMinutes != nil { + updates["codex_7d_window_minutes"] = *normalized.Window7dMinutes } } diff --git a/backend/internal/service/ratelimit_service.go b/backend/internal/service/ratelimit_service.go index 41bd253c..fc8c831c 100644 --- a/backend/internal/service/ratelimit_service.go +++ b/backend/internal/service/ratelimit_service.go @@ -343,6 +343,19 @@ func (s *RateLimitService) handleCustomErrorCode(ctx context.Context, account *A // handle429 处理429限流错误 // 解析响应头获取重置时间,标记账号为限流状态 func (s *RateLimitService) handle429(ctx context.Context, account *Account, headers http.Header, responseBody []byte) { + // OpenAI 平台:解析 x-codex-* 响应头 + if account.Platform == PlatformOpenAI { + if resetAt := s.calculateOpenAI429ResetTime(headers); resetAt != nil { + if err := s.accountRepo.SetRateLimited(ctx, account.ID, *resetAt); err != nil { + slog.Warn("rate_limit_set_failed", "account_id", account.ID, "error", err) + return + } + slog.Info("openai_account_rate_limited", "account_id", account.ID, "reset_at", *resetAt) + return + } + // 如果解析失败,继续使用默认逻辑 + } + // 解析重置时间戳 resetTimestamp := headers.Get("anthropic-ratelimit-unified-reset") if resetTimestamp == "" { @@ -419,6 +432,54 @@ func (s *RateLimitService) shouldScopeClaudeSonnetRateLimit(account *Account, re return strings.Contains(msg, "sonnet") } +// calculateOpenAI429ResetTime 从 OpenAI 429 响应头计算正确的重置时间 +// 返回 nil 表示无法从响应头中确定重置时间 +func (s *RateLimitService) calculateOpenAI429ResetTime(headers http.Header) *time.Time { + snapshot := ParseCodexRateLimitHeaders(headers) + if snapshot == nil { + return nil + } + + normalized := snapshot.Normalize() + if normalized == nil { + return nil + } + + now := time.Now() + + // 判断哪个限制被触发(used_percent >= 100) + is7dExhausted := normalized.Used7dPercent != nil && *normalized.Used7dPercent >= 100 + is5hExhausted := normalized.Used5hPercent != nil && *normalized.Used5hPercent >= 100 + + // 优先使用被触发限制的重置时间 + if is7dExhausted && normalized.Reset7dSeconds != nil { + resetAt := now.Add(time.Duration(*normalized.Reset7dSeconds) * time.Second) + slog.Info("openai_429_7d_limit_exhausted", "reset_after_seconds", *normalized.Reset7dSeconds, "reset_at", resetAt) + return &resetAt + } + if is5hExhausted && normalized.Reset5hSeconds != nil { + resetAt := now.Add(time.Duration(*normalized.Reset5hSeconds) * time.Second) + slog.Info("openai_429_5h_limit_exhausted", "reset_after_seconds", *normalized.Reset5hSeconds, "reset_at", resetAt) + return &resetAt + } + + // 都未达到100%但收到429,使用较长的重置时间 + var maxResetSecs int + if normalized.Reset7dSeconds != nil && *normalized.Reset7dSeconds > maxResetSecs { + maxResetSecs = *normalized.Reset7dSeconds + } + if normalized.Reset5hSeconds != nil && *normalized.Reset5hSeconds > maxResetSecs { + maxResetSecs = *normalized.Reset5hSeconds + } + if maxResetSecs > 0 { + resetAt := now.Add(time.Duration(maxResetSecs) * time.Second) + slog.Info("openai_429_using_max_reset", "max_reset_seconds", maxResetSecs, "reset_at", resetAt) + return &resetAt + } + + return nil +} + // handle529 处理529过载错误 // 根据配置设置过载冷却时间 func (s *RateLimitService) handle529(ctx context.Context, account *Account) { diff --git a/backend/internal/service/ratelimit_service_openai_test.go b/backend/internal/service/ratelimit_service_openai_test.go new file mode 100644 index 00000000..00902068 --- /dev/null +++ b/backend/internal/service/ratelimit_service_openai_test.go @@ -0,0 +1,364 @@ +package service + +import ( + "net/http" + "testing" + "time" +) + +func TestCalculateOpenAI429ResetTime_7dExhausted(t *testing.T) { + svc := &RateLimitService{} + + // Simulate headers when 7d limit is exhausted (100% used) + // Primary = 7d (10080 minutes), Secondary = 5h (300 minutes) + headers := http.Header{} + headers.Set("x-codex-primary-used-percent", "100") + headers.Set("x-codex-primary-reset-after-seconds", "384607") // ~4.5 days + headers.Set("x-codex-primary-window-minutes", "10080") // 7 days + headers.Set("x-codex-secondary-used-percent", "3") + headers.Set("x-codex-secondary-reset-after-seconds", "17369") // ~4.8 hours + headers.Set("x-codex-secondary-window-minutes", "300") // 5 hours + + before := time.Now() + resetAt := svc.calculateOpenAI429ResetTime(headers) + after := time.Now() + + if resetAt == nil { + t.Fatal("expected non-nil resetAt") + } + + // Should be approximately 384607 seconds from now + expectedDuration := 384607 * time.Second + minExpected := before.Add(expectedDuration) + maxExpected := after.Add(expectedDuration) + + if resetAt.Before(minExpected) || resetAt.After(maxExpected) { + t.Errorf("resetAt %v not in expected range [%v, %v]", resetAt, minExpected, maxExpected) + } +} + +func TestCalculateOpenAI429ResetTime_5hExhausted(t *testing.T) { + svc := &RateLimitService{} + + // Simulate headers when 5h limit is exhausted (100% used) + headers := http.Header{} + headers.Set("x-codex-primary-used-percent", "50") + headers.Set("x-codex-primary-reset-after-seconds", "500000") + headers.Set("x-codex-primary-window-minutes", "10080") // 7 days + headers.Set("x-codex-secondary-used-percent", "100") + headers.Set("x-codex-secondary-reset-after-seconds", "3600") // 1 hour + headers.Set("x-codex-secondary-window-minutes", "300") // 5 hours + + before := time.Now() + resetAt := svc.calculateOpenAI429ResetTime(headers) + after := time.Now() + + if resetAt == nil { + t.Fatal("expected non-nil resetAt") + } + + // Should be approximately 3600 seconds from now + expectedDuration := 3600 * time.Second + minExpected := before.Add(expectedDuration) + maxExpected := after.Add(expectedDuration) + + if resetAt.Before(minExpected) || resetAt.After(maxExpected) { + t.Errorf("resetAt %v not in expected range [%v, %v]", resetAt, minExpected, maxExpected) + } +} + +func TestCalculateOpenAI429ResetTime_NeitherExhausted_UsesMax(t *testing.T) { + svc := &RateLimitService{} + + // Neither limit at 100%, should use the longer reset time + headers := http.Header{} + headers.Set("x-codex-primary-used-percent", "80") + headers.Set("x-codex-primary-reset-after-seconds", "100000") + headers.Set("x-codex-primary-window-minutes", "10080") + headers.Set("x-codex-secondary-used-percent", "90") + headers.Set("x-codex-secondary-reset-after-seconds", "5000") + headers.Set("x-codex-secondary-window-minutes", "300") + + before := time.Now() + resetAt := svc.calculateOpenAI429ResetTime(headers) + after := time.Now() + + if resetAt == nil { + t.Fatal("expected non-nil resetAt") + } + + // Should use the max (100000 seconds from 7d window) + expectedDuration := 100000 * time.Second + minExpected := before.Add(expectedDuration) + maxExpected := after.Add(expectedDuration) + + if resetAt.Before(minExpected) || resetAt.After(maxExpected) { + t.Errorf("resetAt %v not in expected range [%v, %v]", resetAt, minExpected, maxExpected) + } +} + +func TestCalculateOpenAI429ResetTime_NoCodexHeaders(t *testing.T) { + svc := &RateLimitService{} + + // No codex headers at all + headers := http.Header{} + headers.Set("content-type", "application/json") + + resetAt := svc.calculateOpenAI429ResetTime(headers) + + if resetAt != nil { + t.Errorf("expected nil resetAt when no codex headers, got %v", resetAt) + } +} + +func TestCalculateOpenAI429ResetTime_ReversedWindowOrder(t *testing.T) { + svc := &RateLimitService{} + + // Test when OpenAI sends primary as 5h and secondary as 7d (reversed) + headers := http.Header{} + headers.Set("x-codex-primary-used-percent", "100") // This is 5h + headers.Set("x-codex-primary-reset-after-seconds", "3600") // 1 hour + headers.Set("x-codex-primary-window-minutes", "300") // 5 hours - smaller! + headers.Set("x-codex-secondary-used-percent", "50") + headers.Set("x-codex-secondary-reset-after-seconds", "500000") + headers.Set("x-codex-secondary-window-minutes", "10080") // 7 days - larger! + + before := time.Now() + resetAt := svc.calculateOpenAI429ResetTime(headers) + after := time.Now() + + if resetAt == nil { + t.Fatal("expected non-nil resetAt") + } + + // Should correctly identify that primary is 5h (smaller window) and use its reset time + expectedDuration := 3600 * time.Second + minExpected := before.Add(expectedDuration) + maxExpected := after.Add(expectedDuration) + + if resetAt.Before(minExpected) || resetAt.After(maxExpected) { + t.Errorf("resetAt %v not in expected range [%v, %v]", resetAt, minExpected, maxExpected) + } +} + +func TestNormalizedCodexLimits(t *testing.T) { + // Test the Normalize() method directly + pUsed := 100.0 + pReset := 384607 + pWindow := 10080 + sUsed := 3.0 + sReset := 17369 + sWindow := 300 + + snapshot := &OpenAICodexUsageSnapshot{ + PrimaryUsedPercent: &pUsed, + PrimaryResetAfterSeconds: &pReset, + PrimaryWindowMinutes: &pWindow, + SecondaryUsedPercent: &sUsed, + SecondaryResetAfterSeconds: &sReset, + SecondaryWindowMinutes: &sWindow, + } + + normalized := snapshot.Normalize() + if normalized == nil { + t.Fatal("expected non-nil normalized") + } + + // Primary has larger window (10080 > 300), so primary should be 7d + if normalized.Used7dPercent == nil || *normalized.Used7dPercent != 100.0 { + t.Errorf("expected Used7dPercent=100, got %v", normalized.Used7dPercent) + } + if normalized.Reset7dSeconds == nil || *normalized.Reset7dSeconds != 384607 { + t.Errorf("expected Reset7dSeconds=384607, got %v", normalized.Reset7dSeconds) + } + if normalized.Used5hPercent == nil || *normalized.Used5hPercent != 3.0 { + t.Errorf("expected Used5hPercent=3, got %v", normalized.Used5hPercent) + } + if normalized.Reset5hSeconds == nil || *normalized.Reset5hSeconds != 17369 { + t.Errorf("expected Reset5hSeconds=17369, got %v", normalized.Reset5hSeconds) + } +} + +func TestNormalizedCodexLimits_OnlyPrimaryData(t *testing.T) { + // Test when only primary has data, no window_minutes + pUsed := 80.0 + pReset := 50000 + + snapshot := &OpenAICodexUsageSnapshot{ + PrimaryUsedPercent: &pUsed, + PrimaryResetAfterSeconds: &pReset, + // No window_minutes, no secondary data + } + + normalized := snapshot.Normalize() + if normalized == nil { + t.Fatal("expected non-nil normalized") + } + + // Legacy assumption: primary=7d, secondary=5h + if normalized.Used7dPercent == nil || *normalized.Used7dPercent != 80.0 { + t.Errorf("expected Used7dPercent=80, got %v", normalized.Used7dPercent) + } + if normalized.Reset7dSeconds == nil || *normalized.Reset7dSeconds != 50000 { + t.Errorf("expected Reset7dSeconds=50000, got %v", normalized.Reset7dSeconds) + } + // Secondary (5h) should be nil + if normalized.Used5hPercent != nil { + t.Errorf("expected Used5hPercent=nil, got %v", *normalized.Used5hPercent) + } + if normalized.Reset5hSeconds != nil { + t.Errorf("expected Reset5hSeconds=nil, got %v", *normalized.Reset5hSeconds) + } +} + +func TestNormalizedCodexLimits_OnlySecondaryData(t *testing.T) { + // Test when only secondary has data, no window_minutes + sUsed := 60.0 + sReset := 3000 + + snapshot := &OpenAICodexUsageSnapshot{ + SecondaryUsedPercent: &sUsed, + SecondaryResetAfterSeconds: &sReset, + // No window_minutes, no primary data + } + + normalized := snapshot.Normalize() + if normalized == nil { + t.Fatal("expected non-nil normalized") + } + + // Legacy assumption: primary=7d, secondary=5h + // So secondary goes to 5h + if normalized.Used5hPercent == nil || *normalized.Used5hPercent != 60.0 { + t.Errorf("expected Used5hPercent=60, got %v", normalized.Used5hPercent) + } + if normalized.Reset5hSeconds == nil || *normalized.Reset5hSeconds != 3000 { + t.Errorf("expected Reset5hSeconds=3000, got %v", normalized.Reset5hSeconds) + } + // Primary (7d) should be nil + if normalized.Used7dPercent != nil { + t.Errorf("expected Used7dPercent=nil, got %v", *normalized.Used7dPercent) + } +} + +func TestNormalizedCodexLimits_BothDataNoWindowMinutes(t *testing.T) { + // Test when both have data but no window_minutes + pUsed := 100.0 + pReset := 400000 + sUsed := 50.0 + sReset := 10000 + + snapshot := &OpenAICodexUsageSnapshot{ + PrimaryUsedPercent: &pUsed, + PrimaryResetAfterSeconds: &pReset, + SecondaryUsedPercent: &sUsed, + SecondaryResetAfterSeconds: &sReset, + // No window_minutes + } + + normalized := snapshot.Normalize() + if normalized == nil { + t.Fatal("expected non-nil normalized") + } + + // Legacy assumption: primary=7d, secondary=5h + if normalized.Used7dPercent == nil || *normalized.Used7dPercent != 100.0 { + t.Errorf("expected Used7dPercent=100, got %v", normalized.Used7dPercent) + } + if normalized.Reset7dSeconds == nil || *normalized.Reset7dSeconds != 400000 { + t.Errorf("expected Reset7dSeconds=400000, got %v", normalized.Reset7dSeconds) + } + if normalized.Used5hPercent == nil || *normalized.Used5hPercent != 50.0 { + t.Errorf("expected Used5hPercent=50, got %v", normalized.Used5hPercent) + } + if normalized.Reset5hSeconds == nil || *normalized.Reset5hSeconds != 10000 { + t.Errorf("expected Reset5hSeconds=10000, got %v", normalized.Reset5hSeconds) + } +} + +func TestHandle429_AnthropicPlatformUnaffected(t *testing.T) { + // Verify that Anthropic platform accounts still use the original logic + // This test ensures we don't break existing Claude account rate limiting + + svc := &RateLimitService{} + + // Simulate Anthropic 429 headers + headers := http.Header{} + headers.Set("anthropic-ratelimit-unified-reset", "1737820800") // A future Unix timestamp + + // For Anthropic platform, calculateOpenAI429ResetTime should return nil + // because it only handles OpenAI platform + resetAt := svc.calculateOpenAI429ResetTime(headers) + + // Should return nil since there are no x-codex-* headers + if resetAt != nil { + t.Errorf("expected nil for Anthropic headers, got %v", resetAt) + } +} + +func TestCalculateOpenAI429ResetTime_UserProvidedScenario(t *testing.T) { + // This is the exact scenario from the user: + // codex_7d_used_percent: 100 + // codex_7d_reset_after_seconds: 384607 (约4.5天后重置) + // codex_5h_used_percent: 3 + // codex_5h_reset_after_seconds: 17369 (约4.8小时后重置) + + svc := &RateLimitService{} + + // Simulate headers matching user's data + // Note: We need to map the canonical 5h/7d back to primary/secondary + // Based on typical OpenAI behavior: primary=7d (larger window), secondary=5h (smaller window) + headers := http.Header{} + headers.Set("x-codex-primary-used-percent", "100") + headers.Set("x-codex-primary-reset-after-seconds", "384607") + headers.Set("x-codex-primary-window-minutes", "10080") // 7 days = 10080 minutes + headers.Set("x-codex-secondary-used-percent", "3") + headers.Set("x-codex-secondary-reset-after-seconds", "17369") + headers.Set("x-codex-secondary-window-minutes", "300") // 5 hours = 300 minutes + + before := time.Now() + resetAt := svc.calculateOpenAI429ResetTime(headers) + after := time.Now() + + if resetAt == nil { + t.Fatal("expected non-nil resetAt for user scenario") + } + + // Should use the 7d reset time (384607 seconds) since 7d limit is exhausted (100%) + expectedDuration := 384607 * time.Second + minExpected := before.Add(expectedDuration) + maxExpected := after.Add(expectedDuration) + + if resetAt.Before(minExpected) || resetAt.After(maxExpected) { + t.Errorf("resetAt %v not in expected range [%v, %v]", resetAt, minExpected, maxExpected) + } + + // Verify it's approximately 4.45 days (384607 seconds) + duration := resetAt.Sub(before) + actualDays := duration.Hours() / 24.0 + + // 384607 / 86400 = ~4.45 days + if actualDays < 4.4 || actualDays > 4.5 { + t.Errorf("expected ~4.45 days, got %.2f days", actualDays) + } + + t.Logf("User scenario: reset_at=%v, duration=%.2f days", resetAt, actualDays) +} + +func TestCalculateOpenAI429ResetTime_5MinFallbackWhenNoReset(t *testing.T) { + // Test that we return nil when there's used_percent but no reset_after_seconds + // This should cause the caller to use the default 5-minute fallback + + svc := &RateLimitService{} + + headers := http.Header{} + headers.Set("x-codex-primary-used-percent", "100") + // No reset_after_seconds! + + resetAt := svc.calculateOpenAI429ResetTime(headers) + + // Should return nil since there's no reset time available + if resetAt != nil { + t.Errorf("expected nil when no reset_after_seconds, got %v", resetAt) + } +} From 9cdb0568ccf30c27a3af81da8194e8bd7a1161b9 Mon Sep 17 00:00:00 2001 From: Gemini Wen Date: Sun, 25 Jan 2026 18:12:15 +0800 Subject: [PATCH 115/147] =?UTF-8?q?fix(subscription):=20=E4=BF=AE=E5=A4=8D?= =?UTF-8?q?=E8=AE=A2=E9=98=85=E8=B0=83=E6=95=B4=E9=80=BB=E8=BE=91,?= =?UTF-8?q?=E5=B7=B2=E8=BF=87=E6=9C=9F=E8=AE=A2=E9=98=85=E4=BB=8E=E5=BD=93?= =?UTF-8?q?=E5=89=8D=E6=97=B6=E9=97=B4=E8=AE=A1=E7=AE=97?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 已过期订阅延长时,从当前时间开始增加天数 - 已过期订阅不允许负向调整(缩短) - 未过期订阅保持原逻辑,从原过期时间增减 Co-Authored-By: Claude Sonnet 4.5 --- .../internal/service/subscription_service.go | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/backend/internal/service/subscription_service.go b/backend/internal/service/subscription_service.go index 6b3ac8ab..3c42852e 100644 --- a/backend/internal/service/subscription_service.go +++ b/backend/internal/service/subscription_service.go @@ -324,14 +324,29 @@ func (s *SubscriptionService) ExtendSubscription(ctx context.Context, subscripti days = -MaxValidityDays } + now := time.Now() + isExpired := !sub.ExpiresAt.After(now) + + // 如果订阅已过期,不允许负向调整 + if isExpired && days < 0 { + return nil, infraerrors.BadRequest("CANNOT_SHORTEN_EXPIRED", "cannot shorten an expired subscription") + } + // 计算新的过期时间 - newExpiresAt := sub.ExpiresAt.AddDate(0, 0, days) + var newExpiresAt time.Time + if isExpired { + // 已过期:从当前时间开始增加天数 + newExpiresAt = now.AddDate(0, 0, days) + } else { + // 未过期:从原过期时间增加/减少天数 + newExpiresAt = sub.ExpiresAt.AddDate(0, 0, days) + } + if newExpiresAt.After(MaxExpiresAt) { newExpiresAt = MaxExpiresAt } // 检查新的过期时间必须大于当前时间 - now := time.Now() if !newExpiresAt.After(now) { return nil, ErrAdjustWouldExpire } From 8c1233393ffcd20b17c555786fc571b9558186f7 Mon Sep 17 00:00:00 2001 From: ianshaw Date: Mon, 26 Jan 2026 02:53:19 +0800 Subject: [PATCH 116/147] =?UTF-8?q?fix(antigravity):=20=E4=BF=AE=E5=A4=8D?= =?UTF-8?q?=20Gemini=20=E6=A8=A1=E5=9E=8B=20thoughtSignature=20=E8=A2=AB?= =?UTF-8?q?=E9=94=99=E8=AF=AF=E8=A6=86=E7=9B=96=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## 问题描述 在使用 Gemini 模型(gemini-3-flash-preview)时,出现 400 错误: "Unable to submit request because Thought signature is not valid" ## 根本原因 在 `request_transformer.go` 的 `buildParts()` 函数中: - 对于 `tool_use` 和 `thinking` 块,当 `allowDummyThought=true`(Gemini 模型)时 - 代码会无条件将客户端传入的真实 `thoughtSignature` 覆盖成 dummy 值 - 导致 Gemini API 验证签名失败(签名与上下文不匹配) ## 修复方案 修改 signature 处理逻辑: 1. **优先透传真实 signature**:如果客户端提供了有效的 signature,保留它 2. **缺失时才使用 dummy**:只有在 signature 缺失且是 Gemini 模型时,才使用 dummy signature 3. **Claude 模型特殊处理**:将 dummy signature 视为缺失,避免透传到需要真实签名的链路 ## 修改内容 ### request_transformer.go - `thinking` 块(第 367-386 行):优先透传真实 signature - `tool_use` 块(第 411-418 行):优先透传真实 signature ### request_transformer_test.go - 修改测试用例名称,反映新的行为 - 新增测试用例验证"缺失时才使用 dummy"的逻辑 ## 影响范围 - 修复 Gemini 模型在多轮对话中使用 tool_use 时的签名验证错误 - 不影响 Claude 模型的现有行为 - 提高跨账号切换时的稳定性 相关问题:#[issue_number] --- .../pkg/antigravity/request_transformer.go | 16 +++++++++------- .../antigravity/request_transformer_test.go | 19 ++++++++++++++++++- 2 files changed, 27 insertions(+), 8 deletions(-) diff --git a/backend/internal/pkg/antigravity/request_transformer.go b/backend/internal/pkg/antigravity/request_transformer.go index 1b21bd58..63f6ee7c 100644 --- a/backend/internal/pkg/antigravity/request_transformer.go +++ b/backend/internal/pkg/antigravity/request_transformer.go @@ -367,8 +367,10 @@ func buildParts(content json.RawMessage, toolIDToName map[string]string, allowDu Text: block.Thinking, Thought: true, } - // 保留原有 signature(Claude 模型需要有效的 signature) - if block.Signature != "" { + // signature 处理: + // - Claude 模型(allowDummyThought=false):必须是上游返回的真实 signature(dummy 视为缺失) + // - Gemini 模型(allowDummyThought=true):优先透传真实 signature,缺失时使用 dummy signature + if block.Signature != "" && (allowDummyThought || block.Signature != dummyThoughtSignature) { part.ThoughtSignature = block.Signature } else if !allowDummyThought { // Claude 模型需要有效 signature;在缺失时降级为普通文本,并在上层禁用 thinking mode。 @@ -407,12 +409,12 @@ func buildParts(content json.RawMessage, toolIDToName map[string]string, allowDu }, } // tool_use 的 signature 处理: - // - Gemini 模型:使用 dummy signature(跳过 thought_signature 校验) - // - Claude 模型:透传上游返回的真实 signature(Vertex/Google 需要完整签名链路) - if allowDummyThought { - part.ThoughtSignature = dummyThoughtSignature - } else if block.Signature != "" && block.Signature != dummyThoughtSignature { + // - Claude 模型(allowDummyThought=false):必须是上游返回的真实 signature(dummy 视为缺失) + // - Gemini 模型(allowDummyThought=true):优先透传真实 signature,缺失时使用 dummy signature + if block.Signature != "" && (allowDummyThought || block.Signature != dummyThoughtSignature) { part.ThoughtSignature = block.Signature + } else if allowDummyThought { + part.ThoughtSignature = dummyThoughtSignature } parts = append(parts, part) diff --git a/backend/internal/pkg/antigravity/request_transformer_test.go b/backend/internal/pkg/antigravity/request_transformer_test.go index 60ee6f63..9d62a4a1 100644 --- a/backend/internal/pkg/antigravity/request_transformer_test.go +++ b/backend/internal/pkg/antigravity/request_transformer_test.go @@ -100,7 +100,7 @@ func TestBuildParts_ToolUseSignatureHandling(t *testing.T) { {"type": "tool_use", "id": "t1", "name": "Bash", "input": {"command": "ls"}, "signature": "sig_tool_abc"} ]` - t.Run("Gemini uses dummy tool_use signature", func(t *testing.T) { + t.Run("Gemini preserves provided tool_use signature", func(t *testing.T) { toolIDToName := make(map[string]string) parts, _, err := buildParts(json.RawMessage(content), toolIDToName, true) if err != nil { @@ -109,6 +109,23 @@ func TestBuildParts_ToolUseSignatureHandling(t *testing.T) { if len(parts) != 1 || parts[0].FunctionCall == nil { t.Fatalf("expected 1 functionCall part, got %+v", parts) } + if parts[0].ThoughtSignature != "sig_tool_abc" { + t.Fatalf("expected preserved tool signature %q, got %q", "sig_tool_abc", parts[0].ThoughtSignature) + } + }) + + t.Run("Gemini falls back to dummy tool_use signature when missing", func(t *testing.T) { + contentNoSig := `[ + {"type": "tool_use", "id": "t1", "name": "Bash", "input": {"command": "ls"}} + ]` + toolIDToName := make(map[string]string) + parts, _, err := buildParts(json.RawMessage(contentNoSig), toolIDToName, true) + if err != nil { + t.Fatalf("buildParts() error = %v", err) + } + if len(parts) != 1 || parts[0].FunctionCall == nil { + t.Fatalf("expected 1 functionCall part, got %+v", parts) + } if parts[0].ThoughtSignature != dummyThoughtSignature { t.Fatalf("expected dummy tool signature %q, got %q", dummyThoughtSignature, parts[0].ThoughtSignature) } From 839975b0cfdb7833d9a9215c7ed2cda961425153 Mon Sep 17 00:00:00 2001 From: ianshaw Date: Mon, 26 Jan 2026 04:40:38 +0800 Subject: [PATCH 117/147] =?UTF-8?q?feat(gemini):=20=E6=94=AF=E6=8C=81=20Ge?= =?UTF-8?q?mini=20CLI=20=E7=B2=98=E6=80=A7=E4=BC=9A=E8=AF=9D=E4=B8=8E?= =?UTF-8?q?=E8=B7=A8=E8=B4=A6=E5=8F=B7=20thoughtSignature=20=E6=B8=85?= =?UTF-8?q?=E7=90=86?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## 问题背景 1. Gemini CLI 没有明确的会话标识(如 Claude Code 的 metadata.user_id) 2. thoughtSignature 与具体上游账号强绑定,跨账号使用会导致 400 错误 3. 粘性会话切换账号或 cache 丢失时,旧签名会导致请求失败 ## 解决方案 ### 1. Gemini CLI 会话标识提取 - 从 `x-gemini-api-privileged-user-id` header 和请求体中的 tmp 目录哈希生成会话标识 - 组合策略:SHA256(privileged-user-id + ":" + tmp_dir_hash) - 正则提取:`/\.gemini/tmp/([A-Fa-f0-9]{64})` ### 2. 跨账号 thoughtSignature 清理 实现三种场景的智能清理: 1. **Cache 命中 + 账号切换** - 粘性会话绑定的账号与当前选择的账号不同时清理 2. **同一请求内 failover 切换** - 通过 sessionBoundAccountID 跟踪,检测重试时的账号切换 3. **Gemini CLI + Cache 未命中 + 含签名** - 预防性清理,避免 cache 丢失后首次转发就 400 - 仅对 Gemini CLI 请求且请求体包含 thoughtSignature 时触发 ## 修改内容 ### backend/internal/handler/gemini_v1beta_handler.go - 添加 `extractGeminiCLISessionHash` 函数提取 Gemini CLI 会话标识 - 添加 `isGeminiCLIRequest` 函数识别 Gemini CLI 请求 - 实现账号切换检测与 thoughtSignature 清理逻辑 - 添加 `geminiCLITmpDirRegex` 正则表达式 ### backend/internal/service/gateway_service.go - 添加 `GetCachedSessionAccountID` 方法查询粘性会话绑定的账号 ID ### backend/internal/service/gemini_native_signature_cleaner.go (新增) - 实现 `CleanGeminiNativeThoughtSignatures` 函数 - 递归清理 JSON 中的所有 thoughtSignature 字段 - 支持任意 JSON 顶层类型(object/array) ### backend/internal/handler/gemini_cli_session_test.go (新增) - 测试 Gemini CLI 会话哈希提取逻辑 - 测试 tmp 目录正则匹配 - 覆盖有/无 privileged-user-id 的场景 ## 影响范围 - 修复 Gemini CLI 多轮对话时账号切换导致的 400 错误 - 提高粘性会话的稳定性和容错能力 - 不影响其他客户端(Claude Code 等)的会话标识生成 ## 测试 - 单元测试:go test -tags=unit ./internal/handler -run TestExtractGeminiCLISessionHash - 单元测试:go test -tags=unit ./internal/handler -run TestGeminiCLITmpDirRegex - 编译验证:go build ./cmd/server --- .../handler/gemini_cli_session_test.go | 122 ++++++++++++++++++ .../internal/handler/gemini_v1beta_handler.go | 86 +++++++++++- backend/internal/service/gateway_service.go | 13 ++ .../gemini_native_signature_cleaner.go | 72 +++++++++++ 4 files changed, 291 insertions(+), 2 deletions(-) create mode 100644 backend/internal/handler/gemini_cli_session_test.go create mode 100644 backend/internal/service/gemini_native_signature_cleaner.go diff --git a/backend/internal/handler/gemini_cli_session_test.go b/backend/internal/handler/gemini_cli_session_test.go new file mode 100644 index 00000000..0b37f5f2 --- /dev/null +++ b/backend/internal/handler/gemini_cli_session_test.go @@ -0,0 +1,122 @@ +//go:build unit + +package handler + +import ( + "crypto/sha256" + "encoding/hex" + "net/http/httptest" + "testing" + + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/require" +) + +func TestExtractGeminiCLISessionHash(t *testing.T) { + tests := []struct { + name string + body string + privilegedUserID string + wantEmpty bool + wantHash string + }{ + { + name: "with privileged-user-id and tmp dir", + body: `{"contents":[{"parts":[{"text":"The project's temporary directory is: /Users/ianshaw/.gemini/tmp/f7851b009ed314d1baee62e83115f486160283f4a55a582d89fdac8b9fe3b740"}]}]}`, + privilegedUserID: "90785f52-8bbe-4b17-b111-a1ddea1636c3", + wantEmpty: false, + wantHash: func() string { + combined := "90785f52-8bbe-4b17-b111-a1ddea1636c3:f7851b009ed314d1baee62e83115f486160283f4a55a582d89fdac8b9fe3b740" + hash := sha256.Sum256([]byte(combined)) + return hex.EncodeToString(hash[:]) + }(), + }, + { + name: "without privileged-user-id but with tmp dir", + body: `{"contents":[{"parts":[{"text":"The project's temporary directory is: /Users/ianshaw/.gemini/tmp/f7851b009ed314d1baee62e83115f486160283f4a55a582d89fdac8b9fe3b740"}]}]}`, + privilegedUserID: "", + wantEmpty: false, + wantHash: "f7851b009ed314d1baee62e83115f486160283f4a55a582d89fdac8b9fe3b740", + }, + { + name: "without tmp dir", + body: `{"contents":[{"parts":[{"text":"Hello world"}]}]}`, + privilegedUserID: "90785f52-8bbe-4b17-b111-a1ddea1636c3", + wantEmpty: true, + }, + { + name: "empty body", + body: "", + privilegedUserID: "90785f52-8bbe-4b17-b111-a1ddea1636c3", + wantEmpty: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // 创建测试上下文 + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Request = httptest.NewRequest("POST", "/test", nil) + if tt.privilegedUserID != "" { + c.Request.Header.Set("x-gemini-api-privileged-user-id", tt.privilegedUserID) + } + + // 调用函数 + result := extractGeminiCLISessionHash(c, []byte(tt.body)) + + // 验证结果 + if tt.wantEmpty { + require.Empty(t, result, "expected empty session hash") + } else { + require.NotEmpty(t, result, "expected non-empty session hash") + require.Equal(t, tt.wantHash, result, "session hash mismatch") + } + }) + } +} + +func TestGeminiCLITmpDirRegex(t *testing.T) { + tests := []struct { + name string + input string + wantMatch bool + wantHash string + }{ + { + name: "valid tmp dir path", + input: "/Users/ianshaw/.gemini/tmp/f7851b009ed314d1baee62e83115f486160283f4a55a582d89fdac8b9fe3b740", + wantMatch: true, + wantHash: "f7851b009ed314d1baee62e83115f486160283f4a55a582d89fdac8b9fe3b740", + }, + { + name: "valid tmp dir path in text", + input: "The project's temporary directory is: /Users/ianshaw/.gemini/tmp/f7851b009ed314d1baee62e83115f486160283f4a55a582d89fdac8b9fe3b740\nOther text", + wantMatch: true, + wantHash: "f7851b009ed314d1baee62e83115f486160283f4a55a582d89fdac8b9fe3b740", + }, + { + name: "invalid hash length", + input: "/Users/ianshaw/.gemini/tmp/abc123", + wantMatch: false, + }, + { + name: "no tmp dir", + input: "Hello world", + wantMatch: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + match := geminiCLITmpDirRegex.FindStringSubmatch(tt.input) + if tt.wantMatch { + require.NotNil(t, match, "expected regex to match") + require.Len(t, match, 2, "expected 2 capture groups") + require.Equal(t, tt.wantHash, match[1], "hash mismatch") + } else { + require.Nil(t, match, "expected regex not to match") + } + }) + } +} diff --git a/backend/internal/handler/gemini_v1beta_handler.go b/backend/internal/handler/gemini_v1beta_handler.go index c7646b38..32f83013 100644 --- a/backend/internal/handler/gemini_v1beta_handler.go +++ b/backend/internal/handler/gemini_v1beta_handler.go @@ -1,11 +1,15 @@ package handler import ( + "bytes" "context" + "crypto/sha256" + "encoding/hex" "errors" "io" "log" "net/http" + "regexp" "strings" "time" @@ -19,6 +23,17 @@ import ( "github.com/gin-gonic/gin" ) +// geminiCLITmpDirRegex 用于从 Gemini CLI 请求体中提取 tmp 目录的哈希值 +// 匹配格式: /Users/xxx/.gemini/tmp/[64位十六进制哈希] +var geminiCLITmpDirRegex = regexp.MustCompile(`/\.gemini/tmp/([A-Fa-f0-9]{64})`) + +func isGeminiCLIRequest(c *gin.Context, body []byte) bool { + if strings.TrimSpace(c.GetHeader("x-gemini-api-privileged-user-id")) != "" { + return true + } + return geminiCLITmpDirRegex.Match(body) +} + // GeminiV1BetaListModels proxies: // GET /v1beta/models func (h *GatewayHandler) GeminiV1BetaListModels(c *gin.Context) { @@ -214,12 +229,26 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { } // 3) select account (sticky session based on request body) - parsedReq, _ := service.ParseGatewayRequest(body) - sessionHash := h.gatewayService.GenerateSessionHash(parsedReq) + // 优先使用 Gemini CLI 的会话标识(privileged-user-id + tmp 目录哈希) + sessionHash := extractGeminiCLISessionHash(c, body) + if sessionHash == "" { + // Fallback: 使用通用的会话哈希生成逻辑(适用于其他客户端) + parsedReq, _ := service.ParseGatewayRequest(body) + sessionHash = h.gatewayService.GenerateSessionHash(parsedReq) + } sessionKey := sessionHash if sessionHash != "" { sessionKey = "gemini:" + sessionHash } + + // 查询粘性会话绑定的账号 ID(用于检测账号切换) + var sessionBoundAccountID int64 + if sessionKey != "" { + sessionBoundAccountID, _ = h.gatewayService.GetCachedSessionAccountID(c.Request.Context(), apiKey.GroupID, sessionKey) + } + isCLI := isGeminiCLIRequest(c, body) + cleanedForUnknownBinding := false + maxAccountSwitches := h.maxAccountSwitchesGemini switchCount := 0 failedAccountIDs := make(map[int64]struct{}) @@ -238,6 +267,24 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { account := selection.Account setOpsSelectedAccount(c, account.ID) + // 检测账号切换:如果粘性会话绑定的账号与当前选择的账号不同,清除 thoughtSignature + // 注意:Gemini 原生 API 的 thoughtSignature 与具体上游账号强相关;跨账号透传会导致 400。 + if sessionBoundAccountID > 0 && sessionBoundAccountID != account.ID { + log.Printf("[Gemini] Sticky session account switched: %d -> %d, cleaning thoughtSignature", sessionBoundAccountID, account.ID) + body = service.CleanGeminiNativeThoughtSignatures(body) + sessionBoundAccountID = account.ID + } else if sessionKey != "" && sessionBoundAccountID == 0 && isCLI && !cleanedForUnknownBinding && bytes.Contains(body, []byte(`"thoughtSignature"`)) { + // 无缓存绑定但请求里已有 thoughtSignature:常见于缓存丢失/TTL 过期后,CLI 继续携带旧签名。 + // 为避免第一次转发就 400,这里做一次确定性清理,让新账号重新生成签名链路。 + log.Printf("[Gemini] Sticky session binding missing for CLI request, cleaning thoughtSignature proactively") + body = service.CleanGeminiNativeThoughtSignatures(body) + cleanedForUnknownBinding = true + sessionBoundAccountID = account.ID + } else if sessionBoundAccountID == 0 { + // 记录本次请求中首次选择到的账号,便于同一请求内 failover 时检测切换。 + sessionBoundAccountID = account.ID + } + // 4) account concurrency slot accountReleaseFunc := selection.ReleaseFunc if !selection.Acquired { @@ -433,3 +480,38 @@ func shouldFallbackGeminiModels(res *service.UpstreamHTTPResult) bool { } return false } + +// extractGeminiCLISessionHash 从 Gemini CLI 请求中提取会话标识。 +// 组合 x-gemini-api-privileged-user-id header 和请求体中的 tmp 目录哈希。 +// +// 会话标识生成策略: +// 1. 从请求体中提取 tmp 目录哈希(64位十六进制) +// 2. 从 header 中提取 privileged-user-id(UUID) +// 3. 组合两者生成 SHA256 哈希作为最终的会话标识 +// +// 如果找不到 tmp 目录哈希,返回空字符串(不使用粘性会话)。 +// +// extractGeminiCLISessionHash extracts session identifier from Gemini CLI requests. +// Combines x-gemini-api-privileged-user-id header with tmp directory hash from request body. +func extractGeminiCLISessionHash(c *gin.Context, body []byte) string { + // 1. 从请求体中提取 tmp 目录哈希 + match := geminiCLITmpDirRegex.FindSubmatch(body) + if len(match) < 2 { + return "" // 没有找到 tmp 目录,不使用粘性会话 + } + tmpDirHash := string(match[1]) + + // 2. 提取 privileged-user-id + privilegedUserID := strings.TrimSpace(c.GetHeader("x-gemini-api-privileged-user-id")) + + // 3. 组合生成最终的 session hash + if privilegedUserID != "" { + // 组合两个标识符:privileged-user-id + tmp 目录哈希 + combined := privilegedUserID + ":" + tmpDirHash + hash := sha256.Sum256([]byte(combined)) + return hex.EncodeToString(hash[:]) + } + + // 如果没有 privileged-user-id,直接使用 tmp 目录哈希 + return tmpDirHash +} diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 9565da29..a8f5baeb 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -305,6 +305,19 @@ func (s *GatewayService) BindStickySession(ctx context.Context, groupID *int64, return s.cache.SetSessionAccountID(ctx, derefGroupID(groupID), sessionHash, accountID, stickySessionTTL) } +// GetCachedSessionAccountID retrieves the account ID bound to a sticky session. +// Returns 0 if no binding exists or on error. +func (s *GatewayService) GetCachedSessionAccountID(ctx context.Context, groupID *int64, sessionHash string) (int64, error) { + if sessionHash == "" || s.cache == nil { + return 0, nil + } + accountID, err := s.cache.GetSessionAccountID(ctx, derefGroupID(groupID), sessionHash) + if err != nil { + return 0, err + } + return accountID, nil +} + func (s *GatewayService) extractCacheableContent(parsed *ParsedRequest) string { if parsed == nil { return "" diff --git a/backend/internal/service/gemini_native_signature_cleaner.go b/backend/internal/service/gemini_native_signature_cleaner.go new file mode 100644 index 00000000..b3352fb0 --- /dev/null +++ b/backend/internal/service/gemini_native_signature_cleaner.go @@ -0,0 +1,72 @@ +package service + +import ( + "encoding/json" +) + +// CleanGeminiNativeThoughtSignatures 从 Gemini 原生 API 请求中移除 thoughtSignature 字段, +// 以避免跨账号签名验证错误。 +// +// 当粘性会话切换账号时(例如原账号异常、不可调度等),旧账号返回的 thoughtSignature +// 会导致新账号的签名验证失败。通过移除这些签名,让新账号重新生成有效的签名。 +// +// CleanGeminiNativeThoughtSignatures removes thoughtSignature fields from Gemini native API requests +// to avoid cross-account signature validation errors. +// +// When sticky session switches accounts (e.g., original account becomes unavailable), +// thoughtSignatures from the old account will cause validation failures on the new account. +// By removing these signatures, we allow the new account to generate valid signatures. +func CleanGeminiNativeThoughtSignatures(body []byte) []byte { + if len(body) == 0 { + return body + } + + // 解析 JSON + var data any + if err := json.Unmarshal(body, &data); err != nil { + // 如果解析失败,返回原始 body(可能不是 JSON 或格式不正确) + return body + } + + // 递归清理 thoughtSignature + cleaned := cleanThoughtSignaturesRecursive(data) + + // 重新序列化 + result, err := json.Marshal(cleaned) + if err != nil { + // 如果序列化失败,返回原始 body + return body + } + + return result +} + +// cleanThoughtSignaturesRecursive 递归遍历数据结构,移除所有 thoughtSignature 字段 +func cleanThoughtSignaturesRecursive(data any) any { + switch v := data.(type) { + case map[string]any: + // 创建新的 map,移除 thoughtSignature + result := make(map[string]any, len(v)) + for key, value := range v { + // 跳过 thoughtSignature 字段 + if key == "thoughtSignature" { + continue + } + // 递归处理嵌套结构 + result[key] = cleanThoughtSignaturesRecursive(value) + } + return result + + case []any: + // 递归处理数组中的每个元素 + result := make([]any, len(v)) + for i, item := range v { + result[i] = cleanThoughtSignaturesRecursive(item) + } + return result + + default: + // 基本类型(string, number, bool, null)直接返回 + return v + } +} From 1245f07a2d4a0fe79c89364a440bcfc1d2af2c27 Mon Sep 17 00:00:00 2001 From: shaw Date: Mon, 26 Jan 2026 08:45:43 +0800 Subject: [PATCH 118/147] =?UTF-8?q?feat(auth):=20=E5=AE=9E=E7=8E=B0=20TOTP?= =?UTF-8?q?=20=E5=8F=8C=E5=9B=A0=E7=B4=A0=E8=AE=A4=E8=AF=81=E5=8A=9F?= =?UTF-8?q?=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 新增功能: - 支持 Google Authenticator 等应用进行 TOTP 二次验证 - 用户可在个人设置中启用/禁用 2FA - 登录时支持 TOTP 验证流程 - 管理后台可全局开关 TOTP 功能 安全增强: - TOTP 密钥使用 AES-256-GCM 加密存储 - 添加 TOTP_ENCRYPTION_KEY 配置项,必须手动配置才能启用功能 - 防止服务重启导致加密密钥变更使用户无法登录 - 验证失败次数限制,防止暴力破解 配置说明: - Docker 部署:在 .env 中设置 TOTP_ENCRYPTION_KEY - 非 Docker 部署:在 config.yaml 中设置 totp.encryption_key - 生成密钥命令:openssl rand -hex 32 --- backend/cmd/server/wire_gen.go | 11 +- backend/ent/migrate/schema.go | 3 + backend/ent/mutation.go | 202 ++++++- backend/ent/runtime/runtime.go | 4 + backend/ent/schema/user.go | 11 + backend/ent/user.go | 45 +- backend/ent/user/user.go | 26 + backend/ent/user/where.go | 150 ++++++ backend/ent/user_create.go | 221 ++++++++ backend/ent/user_update.go | 138 +++++ backend/go.mod | 2 + backend/go.sum | 4 + backend/internal/config/config.go | 28 + .../internal/handler/admin/setting_handler.go | 19 + backend/internal/handler/auth_handler.go | 100 +++- backend/internal/handler/dto/settings.go | 11 +- backend/internal/handler/handler.go | 1 + backend/internal/handler/totp_handler.go | 181 +++++++ backend/internal/handler/wire.go | 3 + .../tlsfingerprint/dialer_integration_test.go | 278 ++++++++++ .../pkg/tlsfingerprint/dialer_test.go | 293 +--------- backend/internal/repository/aes_encryptor.go | 95 ++++ backend/internal/repository/api_key_repo.go | 25 +- backend/internal/repository/totp_cache.go | 149 ++++++ backend/internal/repository/user_repo.go | 44 ++ backend/internal/repository/wire.go | 4 + backend/internal/server/api_contract_test.go | 16 +- backend/internal/server/routes/auth.go | 1 + backend/internal/server/routes/user.go | 11 + .../service/admin_service_delete_test.go | 12 + backend/internal/service/domain_constants.go | 3 + backend/internal/service/email_service.go | 4 +- backend/internal/service/setting_service.go | 21 + backend/internal/service/settings_view.go | 2 + backend/internal/service/totp_service.go | 506 ++++++++++++++++++ backend/internal/service/user.go | 5 + backend/internal/service/user_service.go | 5 + backend/internal/service/wire.go | 1 + backend/migrations/044_add_user_totp.sql | 12 + deploy/.env.example | 12 + deploy/config.example.yaml | 15 + deploy/docker-compose.yml | 10 + frontend/package-lock.json | 272 +++++++++- frontend/package.json | 2 + frontend/pnpm-lock.yaml | 175 ++++++ frontend/src/api/admin/settings.ts | 3 + frontend/src/api/auth.ts | 41 +- frontend/src/api/index.ts | 3 +- frontend/src/api/totp.ts | 83 +++ .../src/components/auth/TotpLoginModal.vue | 176 ++++++ .../user/profile/ProfileTotpCard.vue | 154 ++++++ .../user/profile/TotpDisableDialog.vue | 179 +++++++ .../user/profile/TotpSetupModal.vue | 400 ++++++++++++++ frontend/src/i18n/locales/en.ts | 50 +- frontend/src/i18n/locales/zh.ts | 50 +- frontend/src/stores/auth.ts | 73 ++- frontend/src/types/index.ts | 49 ++ frontend/src/views/admin/SettingsView.vue | 28 + frontend/src/views/auth/LoginView.vue | 66 ++- frontend/src/views/user/ProfileView.vue | 2 + 60 files changed, 4140 insertions(+), 350 deletions(-) create mode 100644 backend/internal/handler/totp_handler.go create mode 100644 backend/internal/pkg/tlsfingerprint/dialer_integration_test.go create mode 100644 backend/internal/repository/aes_encryptor.go create mode 100644 backend/internal/repository/totp_cache.go create mode 100644 backend/internal/service/totp_service.go create mode 100644 backend/migrations/044_add_user_totp.sql create mode 100644 frontend/src/api/totp.ts create mode 100644 frontend/src/components/auth/TotpLoginModal.vue create mode 100644 frontend/src/components/user/profile/ProfileTotpCard.vue create mode 100644 frontend/src/components/user/profile/TotpDisableDialog.vue create mode 100644 frontend/src/components/user/profile/TotpSetupModal.vue diff --git a/backend/cmd/server/wire_gen.go b/backend/cmd/server/wire_gen.go index 6a1c79f0..71624091 100644 --- a/backend/cmd/server/wire_gen.go +++ b/backend/cmd/server/wire_gen.go @@ -63,7 +63,13 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { promoService := service.NewPromoService(promoCodeRepository, userRepository, billingCacheService, client, apiKeyAuthCacheInvalidator) authService := service.NewAuthService(userRepository, configConfig, settingService, emailService, turnstileService, emailQueueService, promoService) userService := service.NewUserService(userRepository, apiKeyAuthCacheInvalidator) - authHandler := handler.NewAuthHandler(configConfig, authService, userService, settingService, promoService) + secretEncryptor, err := repository.NewAESEncryptor(configConfig) + if err != nil { + return nil, err + } + totpCache := repository.NewTotpCache(redisClient) + totpService := service.NewTotpService(userRepository, secretEncryptor, totpCache, settingService, emailService, emailQueueService) + authHandler := handler.NewAuthHandler(configConfig, authService, userService, settingService, promoService, totpService) userHandler := handler.NewUserHandler(userService) apiKeyHandler := handler.NewAPIKeyHandler(apiKeyService) usageLogRepository := repository.NewUsageLogRepository(client, db) @@ -165,7 +171,8 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { gatewayHandler := handler.NewGatewayHandler(gatewayService, geminiMessagesCompatService, antigravityGatewayService, userService, concurrencyService, billingCacheService, configConfig) openAIGatewayHandler := handler.NewOpenAIGatewayHandler(openAIGatewayService, concurrencyService, billingCacheService, configConfig) handlerSettingHandler := handler.ProvideSettingHandler(settingService, buildInfo) - handlers := handler.ProvideHandlers(authHandler, userHandler, apiKeyHandler, usageHandler, redeemHandler, subscriptionHandler, adminHandlers, gatewayHandler, openAIGatewayHandler, handlerSettingHandler) + totpHandler := handler.NewTotpHandler(totpService) + handlers := handler.ProvideHandlers(authHandler, userHandler, apiKeyHandler, usageHandler, redeemHandler, subscriptionHandler, adminHandlers, gatewayHandler, openAIGatewayHandler, handlerSettingHandler, totpHandler) jwtAuthMiddleware := middleware.NewJWTAuthMiddleware(authService, userService) adminAuthMiddleware := middleware.NewAdminAuthMiddleware(authService, userService, settingService) apiKeyAuthMiddleware := middleware.NewAPIKeyAuthMiddleware(apiKeyService, subscriptionService, configConfig) diff --git a/backend/ent/migrate/schema.go b/backend/ent/migrate/schema.go index d1f05186..d2a39331 100644 --- a/backend/ent/migrate/schema.go +++ b/backend/ent/migrate/schema.go @@ -610,6 +610,9 @@ var ( {Name: "status", Type: field.TypeString, Size: 20, Default: "active"}, {Name: "username", Type: field.TypeString, Size: 100, Default: ""}, {Name: "notes", Type: field.TypeString, Default: "", SchemaType: map[string]string{"postgres": "text"}}, + {Name: "totp_secret_encrypted", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "totp_enabled", Type: field.TypeBool, Default: false}, + {Name: "totp_enabled_at", Type: field.TypeTime, Nullable: true}, } // UsersTable holds the schema information for the "users" table. UsersTable = &schema.Table{ diff --git a/backend/ent/mutation.go b/backend/ent/mutation.go index 9b330616..7f3071c2 100644 --- a/backend/ent/mutation.go +++ b/backend/ent/mutation.go @@ -14360,6 +14360,9 @@ type UserMutation struct { status *string username *string notes *string + totp_secret_encrypted *string + totp_enabled *bool + totp_enabled_at *time.Time clearedFields map[string]struct{} api_keys map[int64]struct{} removedapi_keys map[int64]struct{} @@ -14937,6 +14940,140 @@ func (m *UserMutation) ResetNotes() { m.notes = nil } +// SetTotpSecretEncrypted sets the "totp_secret_encrypted" field. +func (m *UserMutation) SetTotpSecretEncrypted(s string) { + m.totp_secret_encrypted = &s +} + +// TotpSecretEncrypted returns the value of the "totp_secret_encrypted" field in the mutation. +func (m *UserMutation) TotpSecretEncrypted() (r string, exists bool) { + v := m.totp_secret_encrypted + if v == nil { + return + } + return *v, true +} + +// OldTotpSecretEncrypted returns the old "totp_secret_encrypted" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldTotpSecretEncrypted(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldTotpSecretEncrypted is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldTotpSecretEncrypted requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldTotpSecretEncrypted: %w", err) + } + return oldValue.TotpSecretEncrypted, nil +} + +// ClearTotpSecretEncrypted clears the value of the "totp_secret_encrypted" field. +func (m *UserMutation) ClearTotpSecretEncrypted() { + m.totp_secret_encrypted = nil + m.clearedFields[user.FieldTotpSecretEncrypted] = struct{}{} +} + +// TotpSecretEncryptedCleared returns if the "totp_secret_encrypted" field was cleared in this mutation. +func (m *UserMutation) TotpSecretEncryptedCleared() bool { + _, ok := m.clearedFields[user.FieldTotpSecretEncrypted] + return ok +} + +// ResetTotpSecretEncrypted resets all changes to the "totp_secret_encrypted" field. +func (m *UserMutation) ResetTotpSecretEncrypted() { + m.totp_secret_encrypted = nil + delete(m.clearedFields, user.FieldTotpSecretEncrypted) +} + +// SetTotpEnabled sets the "totp_enabled" field. +func (m *UserMutation) SetTotpEnabled(b bool) { + m.totp_enabled = &b +} + +// TotpEnabled returns the value of the "totp_enabled" field in the mutation. +func (m *UserMutation) TotpEnabled() (r bool, exists bool) { + v := m.totp_enabled + if v == nil { + return + } + return *v, true +} + +// OldTotpEnabled returns the old "totp_enabled" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldTotpEnabled(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldTotpEnabled is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldTotpEnabled requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldTotpEnabled: %w", err) + } + return oldValue.TotpEnabled, nil +} + +// ResetTotpEnabled resets all changes to the "totp_enabled" field. +func (m *UserMutation) ResetTotpEnabled() { + m.totp_enabled = nil +} + +// SetTotpEnabledAt sets the "totp_enabled_at" field. +func (m *UserMutation) SetTotpEnabledAt(t time.Time) { + m.totp_enabled_at = &t +} + +// TotpEnabledAt returns the value of the "totp_enabled_at" field in the mutation. +func (m *UserMutation) TotpEnabledAt() (r time.Time, exists bool) { + v := m.totp_enabled_at + if v == nil { + return + } + return *v, true +} + +// OldTotpEnabledAt returns the old "totp_enabled_at" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldTotpEnabledAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldTotpEnabledAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldTotpEnabledAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldTotpEnabledAt: %w", err) + } + return oldValue.TotpEnabledAt, nil +} + +// ClearTotpEnabledAt clears the value of the "totp_enabled_at" field. +func (m *UserMutation) ClearTotpEnabledAt() { + m.totp_enabled_at = nil + m.clearedFields[user.FieldTotpEnabledAt] = struct{}{} +} + +// TotpEnabledAtCleared returns if the "totp_enabled_at" field was cleared in this mutation. +func (m *UserMutation) TotpEnabledAtCleared() bool { + _, ok := m.clearedFields[user.FieldTotpEnabledAt] + return ok +} + +// ResetTotpEnabledAt resets all changes to the "totp_enabled_at" field. +func (m *UserMutation) ResetTotpEnabledAt() { + m.totp_enabled_at = nil + delete(m.clearedFields, user.FieldTotpEnabledAt) +} + // AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by ids. func (m *UserMutation) AddAPIKeyIDs(ids ...int64) { if m.api_keys == nil { @@ -15403,7 +15540,7 @@ func (m *UserMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *UserMutation) Fields() []string { - fields := make([]string, 0, 11) + fields := make([]string, 0, 14) if m.created_at != nil { fields = append(fields, user.FieldCreatedAt) } @@ -15437,6 +15574,15 @@ func (m *UserMutation) Fields() []string { if m.notes != nil { fields = append(fields, user.FieldNotes) } + if m.totp_secret_encrypted != nil { + fields = append(fields, user.FieldTotpSecretEncrypted) + } + if m.totp_enabled != nil { + fields = append(fields, user.FieldTotpEnabled) + } + if m.totp_enabled_at != nil { + fields = append(fields, user.FieldTotpEnabledAt) + } return fields } @@ -15467,6 +15613,12 @@ func (m *UserMutation) Field(name string) (ent.Value, bool) { return m.Username() case user.FieldNotes: return m.Notes() + case user.FieldTotpSecretEncrypted: + return m.TotpSecretEncrypted() + case user.FieldTotpEnabled: + return m.TotpEnabled() + case user.FieldTotpEnabledAt: + return m.TotpEnabledAt() } return nil, false } @@ -15498,6 +15650,12 @@ func (m *UserMutation) OldField(ctx context.Context, name string) (ent.Value, er return m.OldUsername(ctx) case user.FieldNotes: return m.OldNotes(ctx) + case user.FieldTotpSecretEncrypted: + return m.OldTotpSecretEncrypted(ctx) + case user.FieldTotpEnabled: + return m.OldTotpEnabled(ctx) + case user.FieldTotpEnabledAt: + return m.OldTotpEnabledAt(ctx) } return nil, fmt.Errorf("unknown User field %s", name) } @@ -15584,6 +15742,27 @@ func (m *UserMutation) SetField(name string, value ent.Value) error { } m.SetNotes(v) return nil + case user.FieldTotpSecretEncrypted: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetTotpSecretEncrypted(v) + return nil + case user.FieldTotpEnabled: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetTotpEnabled(v) + return nil + case user.FieldTotpEnabledAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetTotpEnabledAt(v) + return nil } return fmt.Errorf("unknown User field %s", name) } @@ -15644,6 +15823,12 @@ func (m *UserMutation) ClearedFields() []string { if m.FieldCleared(user.FieldDeletedAt) { fields = append(fields, user.FieldDeletedAt) } + if m.FieldCleared(user.FieldTotpSecretEncrypted) { + fields = append(fields, user.FieldTotpSecretEncrypted) + } + if m.FieldCleared(user.FieldTotpEnabledAt) { + fields = append(fields, user.FieldTotpEnabledAt) + } return fields } @@ -15661,6 +15846,12 @@ func (m *UserMutation) ClearField(name string) error { case user.FieldDeletedAt: m.ClearDeletedAt() return nil + case user.FieldTotpSecretEncrypted: + m.ClearTotpSecretEncrypted() + return nil + case user.FieldTotpEnabledAt: + m.ClearTotpEnabledAt() + return nil } return fmt.Errorf("unknown User nullable field %s", name) } @@ -15702,6 +15893,15 @@ func (m *UserMutation) ResetField(name string) error { case user.FieldNotes: m.ResetNotes() return nil + case user.FieldTotpSecretEncrypted: + m.ResetTotpSecretEncrypted() + return nil + case user.FieldTotpEnabled: + m.ResetTotpEnabled() + return nil + case user.FieldTotpEnabledAt: + m.ResetTotpEnabledAt() + return nil } return fmt.Errorf("unknown User field %s", name) } diff --git a/backend/ent/runtime/runtime.go b/backend/ent/runtime/runtime.go index 1e3f4cbe..14323f8c 100644 --- a/backend/ent/runtime/runtime.go +++ b/backend/ent/runtime/runtime.go @@ -736,6 +736,10 @@ func init() { userDescNotes := userFields[7].Descriptor() // user.DefaultNotes holds the default value on creation for the notes field. user.DefaultNotes = userDescNotes.Default.(string) + // userDescTotpEnabled is the schema descriptor for totp_enabled field. + userDescTotpEnabled := userFields[9].Descriptor() + // user.DefaultTotpEnabled holds the default value on creation for the totp_enabled field. + user.DefaultTotpEnabled = userDescTotpEnabled.Default.(bool) userallowedgroupFields := schema.UserAllowedGroup{}.Fields() _ = userallowedgroupFields // userallowedgroupDescCreatedAt is the schema descriptor for created_at field. diff --git a/backend/ent/schema/user.go b/backend/ent/schema/user.go index 79dc2286..335c1cc8 100644 --- a/backend/ent/schema/user.go +++ b/backend/ent/schema/user.go @@ -61,6 +61,17 @@ func (User) Fields() []ent.Field { field.String("notes"). SchemaType(map[string]string{dialect.Postgres: "text"}). Default(""), + + // TOTP 双因素认证字段 + field.String("totp_secret_encrypted"). + SchemaType(map[string]string{dialect.Postgres: "text"}). + Optional(). + Nillable(), + field.Bool("totp_enabled"). + Default(false), + field.Time("totp_enabled_at"). + Optional(). + Nillable(), } } diff --git a/backend/ent/user.go b/backend/ent/user.go index 0b9a48cc..82830a95 100644 --- a/backend/ent/user.go +++ b/backend/ent/user.go @@ -39,6 +39,12 @@ type User struct { Username string `json:"username,omitempty"` // Notes holds the value of the "notes" field. Notes string `json:"notes,omitempty"` + // TotpSecretEncrypted holds the value of the "totp_secret_encrypted" field. + TotpSecretEncrypted *string `json:"totp_secret_encrypted,omitempty"` + // TotpEnabled holds the value of the "totp_enabled" field. + TotpEnabled bool `json:"totp_enabled,omitempty"` + // TotpEnabledAt holds the value of the "totp_enabled_at" field. + TotpEnabledAt *time.Time `json:"totp_enabled_at,omitempty"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the UserQuery when eager-loading is set. Edges UserEdges `json:"edges"` @@ -156,13 +162,15 @@ func (*User) scanValues(columns []string) ([]any, error) { values := make([]any, len(columns)) for i := range columns { switch columns[i] { + case user.FieldTotpEnabled: + values[i] = new(sql.NullBool) case user.FieldBalance: values[i] = new(sql.NullFloat64) case user.FieldID, user.FieldConcurrency: values[i] = new(sql.NullInt64) - case user.FieldEmail, user.FieldPasswordHash, user.FieldRole, user.FieldStatus, user.FieldUsername, user.FieldNotes: + case user.FieldEmail, user.FieldPasswordHash, user.FieldRole, user.FieldStatus, user.FieldUsername, user.FieldNotes, user.FieldTotpSecretEncrypted: values[i] = new(sql.NullString) - case user.FieldCreatedAt, user.FieldUpdatedAt, user.FieldDeletedAt: + case user.FieldCreatedAt, user.FieldUpdatedAt, user.FieldDeletedAt, user.FieldTotpEnabledAt: values[i] = new(sql.NullTime) default: values[i] = new(sql.UnknownType) @@ -252,6 +260,26 @@ func (_m *User) assignValues(columns []string, values []any) error { } else if value.Valid { _m.Notes = value.String } + case user.FieldTotpSecretEncrypted: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field totp_secret_encrypted", values[i]) + } else if value.Valid { + _m.TotpSecretEncrypted = new(string) + *_m.TotpSecretEncrypted = value.String + } + case user.FieldTotpEnabled: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field totp_enabled", values[i]) + } else if value.Valid { + _m.TotpEnabled = value.Bool + } + case user.FieldTotpEnabledAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field totp_enabled_at", values[i]) + } else if value.Valid { + _m.TotpEnabledAt = new(time.Time) + *_m.TotpEnabledAt = value.Time + } default: _m.selectValues.Set(columns[i], values[i]) } @@ -367,6 +395,19 @@ func (_m *User) String() string { builder.WriteString(", ") builder.WriteString("notes=") builder.WriteString(_m.Notes) + builder.WriteString(", ") + if v := _m.TotpSecretEncrypted; v != nil { + builder.WriteString("totp_secret_encrypted=") + builder.WriteString(*v) + } + builder.WriteString(", ") + builder.WriteString("totp_enabled=") + builder.WriteString(fmt.Sprintf("%v", _m.TotpEnabled)) + builder.WriteString(", ") + if v := _m.TotpEnabledAt; v != nil { + builder.WriteString("totp_enabled_at=") + builder.WriteString(v.Format(time.ANSIC)) + } builder.WriteByte(')') return builder.String() } diff --git a/backend/ent/user/user.go b/backend/ent/user/user.go index 1be1d871..0685ed72 100644 --- a/backend/ent/user/user.go +++ b/backend/ent/user/user.go @@ -37,6 +37,12 @@ const ( FieldUsername = "username" // FieldNotes holds the string denoting the notes field in the database. FieldNotes = "notes" + // FieldTotpSecretEncrypted holds the string denoting the totp_secret_encrypted field in the database. + FieldTotpSecretEncrypted = "totp_secret_encrypted" + // FieldTotpEnabled holds the string denoting the totp_enabled field in the database. + FieldTotpEnabled = "totp_enabled" + // FieldTotpEnabledAt holds the string denoting the totp_enabled_at field in the database. + FieldTotpEnabledAt = "totp_enabled_at" // EdgeAPIKeys holds the string denoting the api_keys edge name in mutations. EdgeAPIKeys = "api_keys" // EdgeRedeemCodes holds the string denoting the redeem_codes edge name in mutations. @@ -134,6 +140,9 @@ var Columns = []string{ FieldStatus, FieldUsername, FieldNotes, + FieldTotpSecretEncrypted, + FieldTotpEnabled, + FieldTotpEnabledAt, } var ( @@ -188,6 +197,8 @@ var ( UsernameValidator func(string) error // DefaultNotes holds the default value on creation for the "notes" field. DefaultNotes string + // DefaultTotpEnabled holds the default value on creation for the "totp_enabled" field. + DefaultTotpEnabled bool ) // OrderOption defines the ordering options for the User queries. @@ -253,6 +264,21 @@ func ByNotes(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldNotes, opts...).ToFunc() } +// ByTotpSecretEncrypted orders the results by the totp_secret_encrypted field. +func ByTotpSecretEncrypted(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldTotpSecretEncrypted, opts...).ToFunc() +} + +// ByTotpEnabled orders the results by the totp_enabled field. +func ByTotpEnabled(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldTotpEnabled, opts...).ToFunc() +} + +// ByTotpEnabledAt orders the results by the totp_enabled_at field. +func ByTotpEnabledAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldTotpEnabledAt, opts...).ToFunc() +} + // ByAPIKeysCount orders the results by api_keys count. func ByAPIKeysCount(opts ...sql.OrderTermOption) OrderOption { return func(s *sql.Selector) { diff --git a/backend/ent/user/where.go b/backend/ent/user/where.go index 6a460f10..3dc4fec7 100644 --- a/backend/ent/user/where.go +++ b/backend/ent/user/where.go @@ -110,6 +110,21 @@ func Notes(v string) predicate.User { return predicate.User(sql.FieldEQ(FieldNotes, v)) } +// TotpSecretEncrypted applies equality check predicate on the "totp_secret_encrypted" field. It's identical to TotpSecretEncryptedEQ. +func TotpSecretEncrypted(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldTotpSecretEncrypted, v)) +} + +// TotpEnabled applies equality check predicate on the "totp_enabled" field. It's identical to TotpEnabledEQ. +func TotpEnabled(v bool) predicate.User { + return predicate.User(sql.FieldEQ(FieldTotpEnabled, v)) +} + +// TotpEnabledAt applies equality check predicate on the "totp_enabled_at" field. It's identical to TotpEnabledAtEQ. +func TotpEnabledAt(v time.Time) predicate.User { + return predicate.User(sql.FieldEQ(FieldTotpEnabledAt, v)) +} + // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.User { return predicate.User(sql.FieldEQ(FieldCreatedAt, v)) @@ -710,6 +725,141 @@ func NotesContainsFold(v string) predicate.User { return predicate.User(sql.FieldContainsFold(FieldNotes, v)) } +// TotpSecretEncryptedEQ applies the EQ predicate on the "totp_secret_encrypted" field. +func TotpSecretEncryptedEQ(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldTotpSecretEncrypted, v)) +} + +// TotpSecretEncryptedNEQ applies the NEQ predicate on the "totp_secret_encrypted" field. +func TotpSecretEncryptedNEQ(v string) predicate.User { + return predicate.User(sql.FieldNEQ(FieldTotpSecretEncrypted, v)) +} + +// TotpSecretEncryptedIn applies the In predicate on the "totp_secret_encrypted" field. +func TotpSecretEncryptedIn(vs ...string) predicate.User { + return predicate.User(sql.FieldIn(FieldTotpSecretEncrypted, vs...)) +} + +// TotpSecretEncryptedNotIn applies the NotIn predicate on the "totp_secret_encrypted" field. +func TotpSecretEncryptedNotIn(vs ...string) predicate.User { + return predicate.User(sql.FieldNotIn(FieldTotpSecretEncrypted, vs...)) +} + +// TotpSecretEncryptedGT applies the GT predicate on the "totp_secret_encrypted" field. +func TotpSecretEncryptedGT(v string) predicate.User { + return predicate.User(sql.FieldGT(FieldTotpSecretEncrypted, v)) +} + +// TotpSecretEncryptedGTE applies the GTE predicate on the "totp_secret_encrypted" field. +func TotpSecretEncryptedGTE(v string) predicate.User { + return predicate.User(sql.FieldGTE(FieldTotpSecretEncrypted, v)) +} + +// TotpSecretEncryptedLT applies the LT predicate on the "totp_secret_encrypted" field. +func TotpSecretEncryptedLT(v string) predicate.User { + return predicate.User(sql.FieldLT(FieldTotpSecretEncrypted, v)) +} + +// TotpSecretEncryptedLTE applies the LTE predicate on the "totp_secret_encrypted" field. +func TotpSecretEncryptedLTE(v string) predicate.User { + return predicate.User(sql.FieldLTE(FieldTotpSecretEncrypted, v)) +} + +// TotpSecretEncryptedContains applies the Contains predicate on the "totp_secret_encrypted" field. +func TotpSecretEncryptedContains(v string) predicate.User { + return predicate.User(sql.FieldContains(FieldTotpSecretEncrypted, v)) +} + +// TotpSecretEncryptedHasPrefix applies the HasPrefix predicate on the "totp_secret_encrypted" field. +func TotpSecretEncryptedHasPrefix(v string) predicate.User { + return predicate.User(sql.FieldHasPrefix(FieldTotpSecretEncrypted, v)) +} + +// TotpSecretEncryptedHasSuffix applies the HasSuffix predicate on the "totp_secret_encrypted" field. +func TotpSecretEncryptedHasSuffix(v string) predicate.User { + return predicate.User(sql.FieldHasSuffix(FieldTotpSecretEncrypted, v)) +} + +// TotpSecretEncryptedIsNil applies the IsNil predicate on the "totp_secret_encrypted" field. +func TotpSecretEncryptedIsNil() predicate.User { + return predicate.User(sql.FieldIsNull(FieldTotpSecretEncrypted)) +} + +// TotpSecretEncryptedNotNil applies the NotNil predicate on the "totp_secret_encrypted" field. +func TotpSecretEncryptedNotNil() predicate.User { + return predicate.User(sql.FieldNotNull(FieldTotpSecretEncrypted)) +} + +// TotpSecretEncryptedEqualFold applies the EqualFold predicate on the "totp_secret_encrypted" field. +func TotpSecretEncryptedEqualFold(v string) predicate.User { + return predicate.User(sql.FieldEqualFold(FieldTotpSecretEncrypted, v)) +} + +// TotpSecretEncryptedContainsFold applies the ContainsFold predicate on the "totp_secret_encrypted" field. +func TotpSecretEncryptedContainsFold(v string) predicate.User { + return predicate.User(sql.FieldContainsFold(FieldTotpSecretEncrypted, v)) +} + +// TotpEnabledEQ applies the EQ predicate on the "totp_enabled" field. +func TotpEnabledEQ(v bool) predicate.User { + return predicate.User(sql.FieldEQ(FieldTotpEnabled, v)) +} + +// TotpEnabledNEQ applies the NEQ predicate on the "totp_enabled" field. +func TotpEnabledNEQ(v bool) predicate.User { + return predicate.User(sql.FieldNEQ(FieldTotpEnabled, v)) +} + +// TotpEnabledAtEQ applies the EQ predicate on the "totp_enabled_at" field. +func TotpEnabledAtEQ(v time.Time) predicate.User { + return predicate.User(sql.FieldEQ(FieldTotpEnabledAt, v)) +} + +// TotpEnabledAtNEQ applies the NEQ predicate on the "totp_enabled_at" field. +func TotpEnabledAtNEQ(v time.Time) predicate.User { + return predicate.User(sql.FieldNEQ(FieldTotpEnabledAt, v)) +} + +// TotpEnabledAtIn applies the In predicate on the "totp_enabled_at" field. +func TotpEnabledAtIn(vs ...time.Time) predicate.User { + return predicate.User(sql.FieldIn(FieldTotpEnabledAt, vs...)) +} + +// TotpEnabledAtNotIn applies the NotIn predicate on the "totp_enabled_at" field. +func TotpEnabledAtNotIn(vs ...time.Time) predicate.User { + return predicate.User(sql.FieldNotIn(FieldTotpEnabledAt, vs...)) +} + +// TotpEnabledAtGT applies the GT predicate on the "totp_enabled_at" field. +func TotpEnabledAtGT(v time.Time) predicate.User { + return predicate.User(sql.FieldGT(FieldTotpEnabledAt, v)) +} + +// TotpEnabledAtGTE applies the GTE predicate on the "totp_enabled_at" field. +func TotpEnabledAtGTE(v time.Time) predicate.User { + return predicate.User(sql.FieldGTE(FieldTotpEnabledAt, v)) +} + +// TotpEnabledAtLT applies the LT predicate on the "totp_enabled_at" field. +func TotpEnabledAtLT(v time.Time) predicate.User { + return predicate.User(sql.FieldLT(FieldTotpEnabledAt, v)) +} + +// TotpEnabledAtLTE applies the LTE predicate on the "totp_enabled_at" field. +func TotpEnabledAtLTE(v time.Time) predicate.User { + return predicate.User(sql.FieldLTE(FieldTotpEnabledAt, v)) +} + +// TotpEnabledAtIsNil applies the IsNil predicate on the "totp_enabled_at" field. +func TotpEnabledAtIsNil() predicate.User { + return predicate.User(sql.FieldIsNull(FieldTotpEnabledAt)) +} + +// TotpEnabledAtNotNil applies the NotNil predicate on the "totp_enabled_at" field. +func TotpEnabledAtNotNil() predicate.User { + return predicate.User(sql.FieldNotNull(FieldTotpEnabledAt)) +} + // HasAPIKeys applies the HasEdge predicate on the "api_keys" edge. func HasAPIKeys() predicate.User { return predicate.User(func(s *sql.Selector) { diff --git a/backend/ent/user_create.go b/backend/ent/user_create.go index e12e476c..6b4ebc59 100644 --- a/backend/ent/user_create.go +++ b/backend/ent/user_create.go @@ -167,6 +167,48 @@ func (_c *UserCreate) SetNillableNotes(v *string) *UserCreate { return _c } +// SetTotpSecretEncrypted sets the "totp_secret_encrypted" field. +func (_c *UserCreate) SetTotpSecretEncrypted(v string) *UserCreate { + _c.mutation.SetTotpSecretEncrypted(v) + return _c +} + +// SetNillableTotpSecretEncrypted sets the "totp_secret_encrypted" field if the given value is not nil. +func (_c *UserCreate) SetNillableTotpSecretEncrypted(v *string) *UserCreate { + if v != nil { + _c.SetTotpSecretEncrypted(*v) + } + return _c +} + +// SetTotpEnabled sets the "totp_enabled" field. +func (_c *UserCreate) SetTotpEnabled(v bool) *UserCreate { + _c.mutation.SetTotpEnabled(v) + return _c +} + +// SetNillableTotpEnabled sets the "totp_enabled" field if the given value is not nil. +func (_c *UserCreate) SetNillableTotpEnabled(v *bool) *UserCreate { + if v != nil { + _c.SetTotpEnabled(*v) + } + return _c +} + +// SetTotpEnabledAt sets the "totp_enabled_at" field. +func (_c *UserCreate) SetTotpEnabledAt(v time.Time) *UserCreate { + _c.mutation.SetTotpEnabledAt(v) + return _c +} + +// SetNillableTotpEnabledAt sets the "totp_enabled_at" field if the given value is not nil. +func (_c *UserCreate) SetNillableTotpEnabledAt(v *time.Time) *UserCreate { + if v != nil { + _c.SetTotpEnabledAt(*v) + } + return _c +} + // AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs. func (_c *UserCreate) AddAPIKeyIDs(ids ...int64) *UserCreate { _c.mutation.AddAPIKeyIDs(ids...) @@ -362,6 +404,10 @@ func (_c *UserCreate) defaults() error { v := user.DefaultNotes _c.mutation.SetNotes(v) } + if _, ok := _c.mutation.TotpEnabled(); !ok { + v := user.DefaultTotpEnabled + _c.mutation.SetTotpEnabled(v) + } return nil } @@ -422,6 +468,9 @@ func (_c *UserCreate) check() error { if _, ok := _c.mutation.Notes(); !ok { return &ValidationError{Name: "notes", err: errors.New(`ent: missing required field "User.notes"`)} } + if _, ok := _c.mutation.TotpEnabled(); !ok { + return &ValidationError{Name: "totp_enabled", err: errors.New(`ent: missing required field "User.totp_enabled"`)} + } return nil } @@ -493,6 +542,18 @@ func (_c *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) { _spec.SetField(user.FieldNotes, field.TypeString, value) _node.Notes = value } + if value, ok := _c.mutation.TotpSecretEncrypted(); ok { + _spec.SetField(user.FieldTotpSecretEncrypted, field.TypeString, value) + _node.TotpSecretEncrypted = &value + } + if value, ok := _c.mutation.TotpEnabled(); ok { + _spec.SetField(user.FieldTotpEnabled, field.TypeBool, value) + _node.TotpEnabled = value + } + if value, ok := _c.mutation.TotpEnabledAt(); ok { + _spec.SetField(user.FieldTotpEnabledAt, field.TypeTime, value) + _node.TotpEnabledAt = &value + } if nodes := _c.mutation.APIKeysIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, @@ -815,6 +876,54 @@ func (u *UserUpsert) UpdateNotes() *UserUpsert { return u } +// SetTotpSecretEncrypted sets the "totp_secret_encrypted" field. +func (u *UserUpsert) SetTotpSecretEncrypted(v string) *UserUpsert { + u.Set(user.FieldTotpSecretEncrypted, v) + return u +} + +// UpdateTotpSecretEncrypted sets the "totp_secret_encrypted" field to the value that was provided on create. +func (u *UserUpsert) UpdateTotpSecretEncrypted() *UserUpsert { + u.SetExcluded(user.FieldTotpSecretEncrypted) + return u +} + +// ClearTotpSecretEncrypted clears the value of the "totp_secret_encrypted" field. +func (u *UserUpsert) ClearTotpSecretEncrypted() *UserUpsert { + u.SetNull(user.FieldTotpSecretEncrypted) + return u +} + +// SetTotpEnabled sets the "totp_enabled" field. +func (u *UserUpsert) SetTotpEnabled(v bool) *UserUpsert { + u.Set(user.FieldTotpEnabled, v) + return u +} + +// UpdateTotpEnabled sets the "totp_enabled" field to the value that was provided on create. +func (u *UserUpsert) UpdateTotpEnabled() *UserUpsert { + u.SetExcluded(user.FieldTotpEnabled) + return u +} + +// SetTotpEnabledAt sets the "totp_enabled_at" field. +func (u *UserUpsert) SetTotpEnabledAt(v time.Time) *UserUpsert { + u.Set(user.FieldTotpEnabledAt, v) + return u +} + +// UpdateTotpEnabledAt sets the "totp_enabled_at" field to the value that was provided on create. +func (u *UserUpsert) UpdateTotpEnabledAt() *UserUpsert { + u.SetExcluded(user.FieldTotpEnabledAt) + return u +} + +// ClearTotpEnabledAt clears the value of the "totp_enabled_at" field. +func (u *UserUpsert) ClearTotpEnabledAt() *UserUpsert { + u.SetNull(user.FieldTotpEnabledAt) + return u +} + // UpdateNewValues updates the mutable fields using the new values that were set on create. // Using this option is equivalent to using: // @@ -1021,6 +1130,62 @@ func (u *UserUpsertOne) UpdateNotes() *UserUpsertOne { }) } +// SetTotpSecretEncrypted sets the "totp_secret_encrypted" field. +func (u *UserUpsertOne) SetTotpSecretEncrypted(v string) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetTotpSecretEncrypted(v) + }) +} + +// UpdateTotpSecretEncrypted sets the "totp_secret_encrypted" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateTotpSecretEncrypted() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateTotpSecretEncrypted() + }) +} + +// ClearTotpSecretEncrypted clears the value of the "totp_secret_encrypted" field. +func (u *UserUpsertOne) ClearTotpSecretEncrypted() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.ClearTotpSecretEncrypted() + }) +} + +// SetTotpEnabled sets the "totp_enabled" field. +func (u *UserUpsertOne) SetTotpEnabled(v bool) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetTotpEnabled(v) + }) +} + +// UpdateTotpEnabled sets the "totp_enabled" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateTotpEnabled() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateTotpEnabled() + }) +} + +// SetTotpEnabledAt sets the "totp_enabled_at" field. +func (u *UserUpsertOne) SetTotpEnabledAt(v time.Time) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetTotpEnabledAt(v) + }) +} + +// UpdateTotpEnabledAt sets the "totp_enabled_at" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateTotpEnabledAt() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateTotpEnabledAt() + }) +} + +// ClearTotpEnabledAt clears the value of the "totp_enabled_at" field. +func (u *UserUpsertOne) ClearTotpEnabledAt() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.ClearTotpEnabledAt() + }) +} + // Exec executes the query. func (u *UserUpsertOne) Exec(ctx context.Context) error { if len(u.create.conflict) == 0 { @@ -1393,6 +1558,62 @@ func (u *UserUpsertBulk) UpdateNotes() *UserUpsertBulk { }) } +// SetTotpSecretEncrypted sets the "totp_secret_encrypted" field. +func (u *UserUpsertBulk) SetTotpSecretEncrypted(v string) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetTotpSecretEncrypted(v) + }) +} + +// UpdateTotpSecretEncrypted sets the "totp_secret_encrypted" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateTotpSecretEncrypted() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateTotpSecretEncrypted() + }) +} + +// ClearTotpSecretEncrypted clears the value of the "totp_secret_encrypted" field. +func (u *UserUpsertBulk) ClearTotpSecretEncrypted() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.ClearTotpSecretEncrypted() + }) +} + +// SetTotpEnabled sets the "totp_enabled" field. +func (u *UserUpsertBulk) SetTotpEnabled(v bool) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetTotpEnabled(v) + }) +} + +// UpdateTotpEnabled sets the "totp_enabled" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateTotpEnabled() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateTotpEnabled() + }) +} + +// SetTotpEnabledAt sets the "totp_enabled_at" field. +func (u *UserUpsertBulk) SetTotpEnabledAt(v time.Time) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetTotpEnabledAt(v) + }) +} + +// UpdateTotpEnabledAt sets the "totp_enabled_at" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateTotpEnabledAt() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateTotpEnabledAt() + }) +} + +// ClearTotpEnabledAt clears the value of the "totp_enabled_at" field. +func (u *UserUpsertBulk) ClearTotpEnabledAt() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.ClearTotpEnabledAt() + }) +} + // Exec executes the query. func (u *UserUpsertBulk) Exec(ctx context.Context) error { if u.create.err != nil { diff --git a/backend/ent/user_update.go b/backend/ent/user_update.go index cf189fea..b98a41c6 100644 --- a/backend/ent/user_update.go +++ b/backend/ent/user_update.go @@ -187,6 +187,60 @@ func (_u *UserUpdate) SetNillableNotes(v *string) *UserUpdate { return _u } +// SetTotpSecretEncrypted sets the "totp_secret_encrypted" field. +func (_u *UserUpdate) SetTotpSecretEncrypted(v string) *UserUpdate { + _u.mutation.SetTotpSecretEncrypted(v) + return _u +} + +// SetNillableTotpSecretEncrypted sets the "totp_secret_encrypted" field if the given value is not nil. +func (_u *UserUpdate) SetNillableTotpSecretEncrypted(v *string) *UserUpdate { + if v != nil { + _u.SetTotpSecretEncrypted(*v) + } + return _u +} + +// ClearTotpSecretEncrypted clears the value of the "totp_secret_encrypted" field. +func (_u *UserUpdate) ClearTotpSecretEncrypted() *UserUpdate { + _u.mutation.ClearTotpSecretEncrypted() + return _u +} + +// SetTotpEnabled sets the "totp_enabled" field. +func (_u *UserUpdate) SetTotpEnabled(v bool) *UserUpdate { + _u.mutation.SetTotpEnabled(v) + return _u +} + +// SetNillableTotpEnabled sets the "totp_enabled" field if the given value is not nil. +func (_u *UserUpdate) SetNillableTotpEnabled(v *bool) *UserUpdate { + if v != nil { + _u.SetTotpEnabled(*v) + } + return _u +} + +// SetTotpEnabledAt sets the "totp_enabled_at" field. +func (_u *UserUpdate) SetTotpEnabledAt(v time.Time) *UserUpdate { + _u.mutation.SetTotpEnabledAt(v) + return _u +} + +// SetNillableTotpEnabledAt sets the "totp_enabled_at" field if the given value is not nil. +func (_u *UserUpdate) SetNillableTotpEnabledAt(v *time.Time) *UserUpdate { + if v != nil { + _u.SetTotpEnabledAt(*v) + } + return _u +} + +// ClearTotpEnabledAt clears the value of the "totp_enabled_at" field. +func (_u *UserUpdate) ClearTotpEnabledAt() *UserUpdate { + _u.mutation.ClearTotpEnabledAt() + return _u +} + // AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs. func (_u *UserUpdate) AddAPIKeyIDs(ids ...int64) *UserUpdate { _u.mutation.AddAPIKeyIDs(ids...) @@ -603,6 +657,21 @@ func (_u *UserUpdate) sqlSave(ctx context.Context) (_node int, err error) { if value, ok := _u.mutation.Notes(); ok { _spec.SetField(user.FieldNotes, field.TypeString, value) } + if value, ok := _u.mutation.TotpSecretEncrypted(); ok { + _spec.SetField(user.FieldTotpSecretEncrypted, field.TypeString, value) + } + if _u.mutation.TotpSecretEncryptedCleared() { + _spec.ClearField(user.FieldTotpSecretEncrypted, field.TypeString) + } + if value, ok := _u.mutation.TotpEnabled(); ok { + _spec.SetField(user.FieldTotpEnabled, field.TypeBool, value) + } + if value, ok := _u.mutation.TotpEnabledAt(); ok { + _spec.SetField(user.FieldTotpEnabledAt, field.TypeTime, value) + } + if _u.mutation.TotpEnabledAtCleared() { + _spec.ClearField(user.FieldTotpEnabledAt, field.TypeTime) + } if _u.mutation.APIKeysCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, @@ -1147,6 +1216,60 @@ func (_u *UserUpdateOne) SetNillableNotes(v *string) *UserUpdateOne { return _u } +// SetTotpSecretEncrypted sets the "totp_secret_encrypted" field. +func (_u *UserUpdateOne) SetTotpSecretEncrypted(v string) *UserUpdateOne { + _u.mutation.SetTotpSecretEncrypted(v) + return _u +} + +// SetNillableTotpSecretEncrypted sets the "totp_secret_encrypted" field if the given value is not nil. +func (_u *UserUpdateOne) SetNillableTotpSecretEncrypted(v *string) *UserUpdateOne { + if v != nil { + _u.SetTotpSecretEncrypted(*v) + } + return _u +} + +// ClearTotpSecretEncrypted clears the value of the "totp_secret_encrypted" field. +func (_u *UserUpdateOne) ClearTotpSecretEncrypted() *UserUpdateOne { + _u.mutation.ClearTotpSecretEncrypted() + return _u +} + +// SetTotpEnabled sets the "totp_enabled" field. +func (_u *UserUpdateOne) SetTotpEnabled(v bool) *UserUpdateOne { + _u.mutation.SetTotpEnabled(v) + return _u +} + +// SetNillableTotpEnabled sets the "totp_enabled" field if the given value is not nil. +func (_u *UserUpdateOne) SetNillableTotpEnabled(v *bool) *UserUpdateOne { + if v != nil { + _u.SetTotpEnabled(*v) + } + return _u +} + +// SetTotpEnabledAt sets the "totp_enabled_at" field. +func (_u *UserUpdateOne) SetTotpEnabledAt(v time.Time) *UserUpdateOne { + _u.mutation.SetTotpEnabledAt(v) + return _u +} + +// SetNillableTotpEnabledAt sets the "totp_enabled_at" field if the given value is not nil. +func (_u *UserUpdateOne) SetNillableTotpEnabledAt(v *time.Time) *UserUpdateOne { + if v != nil { + _u.SetTotpEnabledAt(*v) + } + return _u +} + +// ClearTotpEnabledAt clears the value of the "totp_enabled_at" field. +func (_u *UserUpdateOne) ClearTotpEnabledAt() *UserUpdateOne { + _u.mutation.ClearTotpEnabledAt() + return _u +} + // AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs. func (_u *UserUpdateOne) AddAPIKeyIDs(ids ...int64) *UserUpdateOne { _u.mutation.AddAPIKeyIDs(ids...) @@ -1593,6 +1716,21 @@ func (_u *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) { if value, ok := _u.mutation.Notes(); ok { _spec.SetField(user.FieldNotes, field.TypeString, value) } + if value, ok := _u.mutation.TotpSecretEncrypted(); ok { + _spec.SetField(user.FieldTotpSecretEncrypted, field.TypeString, value) + } + if _u.mutation.TotpSecretEncryptedCleared() { + _spec.ClearField(user.FieldTotpSecretEncrypted, field.TypeString) + } + if value, ok := _u.mutation.TotpEnabled(); ok { + _spec.SetField(user.FieldTotpEnabled, field.TypeBool, value) + } + if value, ok := _u.mutation.TotpEnabledAt(); ok { + _spec.SetField(user.FieldTotpEnabledAt, field.TypeTime, value) + } + if _u.mutation.TotpEnabledAtCleared() { + _spec.ClearField(user.FieldTotpEnabledAt, field.TypeTime) + } if _u.mutation.APIKeysCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, diff --git a/backend/go.mod b/backend/go.mod index fd429b07..ad7d76b6 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -37,6 +37,7 @@ require ( github.com/andybalholm/brotli v1.2.0 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/bmatcuk/doublestar v1.3.4 // indirect + github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect github.com/bytedance/sonic v1.9.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect @@ -106,6 +107,7 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/pquerna/otp v1.5.0 // indirect github.com/quic-go/qpack v0.6.0 // indirect github.com/quic-go/quic-go v0.57.1 // indirect github.com/refraction-networking/utls v1.8.1 // indirect diff --git a/backend/go.sum b/backend/go.sum index aa10718c..0addb5bb 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -20,6 +20,8 @@ github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= github.com/bmatcuk/doublestar v1.3.4 h1:gPypJ5xD31uhX6Tf54sDPUOBXTqKH4c9aPY66CyQrS0= github.com/bmatcuk/doublestar v1.3.4/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE= +github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI= +github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= @@ -217,6 +219,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/pquerna/otp v1.5.0 h1:NMMR+WrmaqXU4EzdGJEE1aUUI0AMRzsp96fFFWNPwxs= +github.com/pquerna/otp v1.5.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8= diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index 00a78480..477cb59d 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -47,6 +47,7 @@ type Config struct { Redis RedisConfig `mapstructure:"redis"` Ops OpsConfig `mapstructure:"ops"` JWT JWTConfig `mapstructure:"jwt"` + Totp TotpConfig `mapstructure:"totp"` LinuxDo LinuxDoConnectConfig `mapstructure:"linuxdo_connect"` Default DefaultConfig `mapstructure:"default"` RateLimit RateLimitConfig `mapstructure:"rate_limit"` @@ -466,6 +467,16 @@ type JWTConfig struct { ExpireHour int `mapstructure:"expire_hour"` } +// TotpConfig TOTP 双因素认证配置 +type TotpConfig struct { + // EncryptionKey 用于加密 TOTP 密钥的 AES-256 密钥(32 字节 hex 编码) + // 如果为空,将自动生成一个随机密钥(仅适用于开发环境) + EncryptionKey string `mapstructure:"encryption_key"` + // EncryptionKeyConfigured 标记加密密钥是否为手动配置(非自动生成) + // 只有手动配置了密钥才允许在管理后台启用 TOTP 功能 + EncryptionKeyConfigured bool `mapstructure:"-"` +} + type TurnstileConfig struct { Required bool `mapstructure:"required"` } @@ -626,6 +637,20 @@ func Load() (*Config, error) { log.Println("Warning: JWT secret auto-generated. Consider setting a fixed secret for production.") } + // Auto-generate TOTP encryption key if not set (32 bytes = 64 hex chars for AES-256) + cfg.Totp.EncryptionKey = strings.TrimSpace(cfg.Totp.EncryptionKey) + if cfg.Totp.EncryptionKey == "" { + key, err := generateJWTSecret(32) // Reuse the same random generation function + if err != nil { + return nil, fmt.Errorf("generate totp encryption key error: %w", err) + } + cfg.Totp.EncryptionKey = key + cfg.Totp.EncryptionKeyConfigured = false + log.Println("Warning: TOTP encryption key auto-generated. Consider setting a fixed key for production.") + } else { + cfg.Totp.EncryptionKeyConfigured = true + } + if err := cfg.Validate(); err != nil { return nil, fmt.Errorf("validate config error: %w", err) } @@ -756,6 +781,9 @@ func setDefaults() { viper.SetDefault("jwt.secret", "") viper.SetDefault("jwt.expire_hour", 24) + // TOTP + viper.SetDefault("totp.encryption_key", "") + // Default // Admin credentials are created via the setup flow (web wizard / CLI / AUTO_SETUP). // Do not ship fixed defaults here to avoid insecure "known credentials" in production. diff --git a/backend/internal/handler/admin/setting_handler.go b/backend/internal/handler/admin/setting_handler.go index 53c94b6a..4a798fa1 100644 --- a/backend/internal/handler/admin/setting_handler.go +++ b/backend/internal/handler/admin/setting_handler.go @@ -49,6 +49,8 @@ func (h *SettingHandler) GetSettings(c *gin.Context) { EmailVerifyEnabled: settings.EmailVerifyEnabled, PromoCodeEnabled: settings.PromoCodeEnabled, PasswordResetEnabled: settings.PasswordResetEnabled, + TotpEnabled: settings.TotpEnabled, + TotpEncryptionKeyConfigured: h.settingService.IsTotpEncryptionKeyConfigured(), SMTPHost: settings.SMTPHost, SMTPPort: settings.SMTPPort, SMTPUsername: settings.SMTPUsername, @@ -94,6 +96,7 @@ type UpdateSettingsRequest struct { EmailVerifyEnabled bool `json:"email_verify_enabled"` PromoCodeEnabled bool `json:"promo_code_enabled"` PasswordResetEnabled bool `json:"password_reset_enabled"` + TotpEnabled bool `json:"totp_enabled"` // TOTP 双因素认证 // 邮件服务设置 SMTPHost string `json:"smtp_host"` @@ -200,6 +203,16 @@ func (h *SettingHandler) UpdateSettings(c *gin.Context) { } } + // TOTP 双因素认证参数验证 + // 只有手动配置了加密密钥才允许启用 TOTP 功能 + if req.TotpEnabled && !previousSettings.TotpEnabled { + // 尝试启用 TOTP,检查加密密钥是否已手动配置 + if !h.settingService.IsTotpEncryptionKeyConfigured() { + response.BadRequest(c, "Cannot enable TOTP: TOTP_ENCRYPTION_KEY environment variable must be configured first. Generate a key with 'openssl rand -hex 32' and set it in your environment.") + return + } + } + // LinuxDo Connect 参数验证 if req.LinuxDoConnectEnabled { req.LinuxDoConnectClientID = strings.TrimSpace(req.LinuxDoConnectClientID) @@ -246,6 +259,7 @@ func (h *SettingHandler) UpdateSettings(c *gin.Context) { EmailVerifyEnabled: req.EmailVerifyEnabled, PromoCodeEnabled: req.PromoCodeEnabled, PasswordResetEnabled: req.PasswordResetEnabled, + TotpEnabled: req.TotpEnabled, SMTPHost: req.SMTPHost, SMTPPort: req.SMTPPort, SMTPUsername: req.SMTPUsername, @@ -322,6 +336,8 @@ func (h *SettingHandler) UpdateSettings(c *gin.Context) { EmailVerifyEnabled: updatedSettings.EmailVerifyEnabled, PromoCodeEnabled: updatedSettings.PromoCodeEnabled, PasswordResetEnabled: updatedSettings.PasswordResetEnabled, + TotpEnabled: updatedSettings.TotpEnabled, + TotpEncryptionKeyConfigured: h.settingService.IsTotpEncryptionKeyConfigured(), SMTPHost: updatedSettings.SMTPHost, SMTPPort: updatedSettings.SMTPPort, SMTPUsername: updatedSettings.SMTPUsername, @@ -391,6 +407,9 @@ func diffSettings(before *service.SystemSettings, after *service.SystemSettings, if before.PasswordResetEnabled != after.PasswordResetEnabled { changed = append(changed, "password_reset_enabled") } + if before.TotpEnabled != after.TotpEnabled { + changed = append(changed, "totp_enabled") + } if before.SMTPHost != after.SMTPHost { changed = append(changed, "smtp_host") } diff --git a/backend/internal/handler/auth_handler.go b/backend/internal/handler/auth_handler.go index e73593bd..3522407d 100644 --- a/backend/internal/handler/auth_handler.go +++ b/backend/internal/handler/auth_handler.go @@ -1,6 +1,8 @@ package handler import ( + "log/slog" + "github.com/Wei-Shaw/sub2api/internal/config" "github.com/Wei-Shaw/sub2api/internal/handler/dto" "github.com/Wei-Shaw/sub2api/internal/pkg/ip" @@ -18,16 +20,18 @@ type AuthHandler struct { userService *service.UserService settingSvc *service.SettingService promoService *service.PromoService + totpService *service.TotpService } // NewAuthHandler creates a new AuthHandler -func NewAuthHandler(cfg *config.Config, authService *service.AuthService, userService *service.UserService, settingService *service.SettingService, promoService *service.PromoService) *AuthHandler { +func NewAuthHandler(cfg *config.Config, authService *service.AuthService, userService *service.UserService, settingService *service.SettingService, promoService *service.PromoService, totpService *service.TotpService) *AuthHandler { return &AuthHandler{ cfg: cfg, authService: authService, userService: userService, settingSvc: settingService, promoService: promoService, + totpService: totpService, } } @@ -144,6 +148,100 @@ func (h *AuthHandler) Login(c *gin.Context) { return } + // Check if TOTP 2FA is enabled for this user + if h.totpService != nil && h.settingSvc.IsTotpEnabled(c.Request.Context()) && user.TotpEnabled { + // Create a temporary login session for 2FA + tempToken, err := h.totpService.CreateLoginSession(c.Request.Context(), user.ID, user.Email) + if err != nil { + response.InternalError(c, "Failed to create 2FA session") + return + } + + response.Success(c, TotpLoginResponse{ + Requires2FA: true, + TempToken: tempToken, + UserEmailMasked: service.MaskEmail(user.Email), + }) + return + } + + response.Success(c, AuthResponse{ + AccessToken: token, + TokenType: "Bearer", + User: dto.UserFromService(user), + }) +} + +// TotpLoginResponse represents the response when 2FA is required +type TotpLoginResponse struct { + Requires2FA bool `json:"requires_2fa"` + TempToken string `json:"temp_token,omitempty"` + UserEmailMasked string `json:"user_email_masked,omitempty"` +} + +// Login2FARequest represents the 2FA login request +type Login2FARequest struct { + TempToken string `json:"temp_token" binding:"required"` + TotpCode string `json:"totp_code" binding:"required,len=6"` +} + +// Login2FA completes the login with 2FA verification +// POST /api/v1/auth/login/2fa +func (h *AuthHandler) Login2FA(c *gin.Context) { + var req Login2FARequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + slog.Debug("login_2fa_request", + "temp_token_len", len(req.TempToken), + "totp_code_len", len(req.TotpCode)) + + // Get the login session + session, err := h.totpService.GetLoginSession(c.Request.Context(), req.TempToken) + if err != nil || session == nil { + tokenPrefix := "" + if len(req.TempToken) >= 8 { + tokenPrefix = req.TempToken[:8] + } + slog.Debug("login_2fa_session_invalid", + "temp_token_prefix", tokenPrefix, + "error", err) + response.BadRequest(c, "Invalid or expired 2FA session") + return + } + + slog.Debug("login_2fa_session_found", + "user_id", session.UserID, + "email", session.Email) + + // Verify the TOTP code + if err := h.totpService.VerifyCode(c.Request.Context(), session.UserID, req.TotpCode); err != nil { + slog.Debug("login_2fa_verify_failed", + "user_id", session.UserID, + "error", err) + response.ErrorFrom(c, err) + return + } + + // Delete the login session + _ = h.totpService.DeleteLoginSession(c.Request.Context(), req.TempToken) + + // Get the user + user, err := h.userService.GetByID(c.Request.Context(), session.UserID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + // Generate the JWT token + token, err := h.authService.GenerateToken(user) + if err != nil { + response.InternalError(c, "Failed to generate token") + return + } + response.Success(c, AuthResponse{ AccessToken: token, TokenType: "Bearer", diff --git a/backend/internal/handler/dto/settings.go b/backend/internal/handler/dto/settings.go index f98c7750..fc7b1349 100644 --- a/backend/internal/handler/dto/settings.go +++ b/backend/internal/handler/dto/settings.go @@ -2,10 +2,12 @@ package dto // SystemSettings represents the admin settings API response payload. type SystemSettings struct { - RegistrationEnabled bool `json:"registration_enabled"` - EmailVerifyEnabled bool `json:"email_verify_enabled"` - PromoCodeEnabled bool `json:"promo_code_enabled"` - PasswordResetEnabled bool `json:"password_reset_enabled"` + RegistrationEnabled bool `json:"registration_enabled"` + EmailVerifyEnabled bool `json:"email_verify_enabled"` + PromoCodeEnabled bool `json:"promo_code_enabled"` + PasswordResetEnabled bool `json:"password_reset_enabled"` + TotpEnabled bool `json:"totp_enabled"` // TOTP 双因素认证 + TotpEncryptionKeyConfigured bool `json:"totp_encryption_key_configured"` // TOTP 加密密钥是否已配置 SMTPHost string `json:"smtp_host"` SMTPPort int `json:"smtp_port"` @@ -59,6 +61,7 @@ type PublicSettings struct { EmailVerifyEnabled bool `json:"email_verify_enabled"` PromoCodeEnabled bool `json:"promo_code_enabled"` PasswordResetEnabled bool `json:"password_reset_enabled"` + TotpEnabled bool `json:"totp_enabled"` // TOTP 双因素认证 TurnstileEnabled bool `json:"turnstile_enabled"` TurnstileSiteKey string `json:"turnstile_site_key"` SiteName string `json:"site_name"` diff --git a/backend/internal/handler/handler.go b/backend/internal/handler/handler.go index 5b1b317d..907c314d 100644 --- a/backend/internal/handler/handler.go +++ b/backend/internal/handler/handler.go @@ -37,6 +37,7 @@ type Handlers struct { Gateway *GatewayHandler OpenAIGateway *OpenAIGatewayHandler Setting *SettingHandler + Totp *TotpHandler } // BuildInfo contains build-time information diff --git a/backend/internal/handler/totp_handler.go b/backend/internal/handler/totp_handler.go new file mode 100644 index 00000000..5c5eb567 --- /dev/null +++ b/backend/internal/handler/totp_handler.go @@ -0,0 +1,181 @@ +package handler + +import ( + "github.com/gin-gonic/gin" + + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" +) + +// TotpHandler handles TOTP-related requests +type TotpHandler struct { + totpService *service.TotpService +} + +// NewTotpHandler creates a new TotpHandler +func NewTotpHandler(totpService *service.TotpService) *TotpHandler { + return &TotpHandler{ + totpService: totpService, + } +} + +// TotpStatusResponse represents the TOTP status response +type TotpStatusResponse struct { + Enabled bool `json:"enabled"` + EnabledAt *int64 `json:"enabled_at,omitempty"` // Unix timestamp + FeatureEnabled bool `json:"feature_enabled"` +} + +// GetStatus returns the TOTP status for the current user +// GET /api/v1/user/totp/status +func (h *TotpHandler) GetStatus(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not authenticated") + return + } + + status, err := h.totpService.GetStatus(c.Request.Context(), subject.UserID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + resp := TotpStatusResponse{ + Enabled: status.Enabled, + FeatureEnabled: status.FeatureEnabled, + } + + if status.EnabledAt != nil { + ts := status.EnabledAt.Unix() + resp.EnabledAt = &ts + } + + response.Success(c, resp) +} + +// TotpSetupRequest represents the request to initiate TOTP setup +type TotpSetupRequest struct { + EmailCode string `json:"email_code"` + Password string `json:"password"` +} + +// TotpSetupResponse represents the TOTP setup response +type TotpSetupResponse struct { + Secret string `json:"secret"` + QRCodeURL string `json:"qr_code_url"` + SetupToken string `json:"setup_token"` + Countdown int `json:"countdown"` +} + +// InitiateSetup starts the TOTP setup process +// POST /api/v1/user/totp/setup +func (h *TotpHandler) InitiateSetup(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not authenticated") + return + } + + var req TotpSetupRequest + if err := c.ShouldBindJSON(&req); err != nil { + // Allow empty body (optional params) + req = TotpSetupRequest{} + } + + result, err := h.totpService.InitiateSetup(c.Request.Context(), subject.UserID, req.EmailCode, req.Password) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, TotpSetupResponse{ + Secret: result.Secret, + QRCodeURL: result.QRCodeURL, + SetupToken: result.SetupToken, + Countdown: result.Countdown, + }) +} + +// TotpEnableRequest represents the request to enable TOTP +type TotpEnableRequest struct { + TotpCode string `json:"totp_code" binding:"required,len=6"` + SetupToken string `json:"setup_token" binding:"required"` +} + +// Enable completes the TOTP setup +// POST /api/v1/user/totp/enable +func (h *TotpHandler) Enable(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not authenticated") + return + } + + var req TotpEnableRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + if err := h.totpService.CompleteSetup(c.Request.Context(), subject.UserID, req.TotpCode, req.SetupToken); err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{"success": true}) +} + +// TotpDisableRequest represents the request to disable TOTP +type TotpDisableRequest struct { + EmailCode string `json:"email_code"` + Password string `json:"password"` +} + +// Disable disables TOTP for the current user +// POST /api/v1/user/totp/disable +func (h *TotpHandler) Disable(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not authenticated") + return + } + + var req TotpDisableRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + if err := h.totpService.Disable(c.Request.Context(), subject.UserID, req.EmailCode, req.Password); err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{"success": true}) +} + +// GetVerificationMethod returns the verification method for TOTP operations +// GET /api/v1/user/totp/verification-method +func (h *TotpHandler) GetVerificationMethod(c *gin.Context) { + method := h.totpService.GetVerificationMethod(c.Request.Context()) + response.Success(c, method) +} + +// SendVerifyCode sends an email verification code for TOTP operations +// POST /api/v1/user/totp/send-code +func (h *TotpHandler) SendVerifyCode(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not authenticated") + return + } + + if err := h.totpService.SendVerifyCode(c.Request.Context(), subject.UserID); err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{"success": true}) +} diff --git a/backend/internal/handler/wire.go b/backend/internal/handler/wire.go index 2af7905e..92e8edeb 100644 --- a/backend/internal/handler/wire.go +++ b/backend/internal/handler/wire.go @@ -70,6 +70,7 @@ func ProvideHandlers( gatewayHandler *GatewayHandler, openaiGatewayHandler *OpenAIGatewayHandler, settingHandler *SettingHandler, + totpHandler *TotpHandler, ) *Handlers { return &Handlers{ Auth: authHandler, @@ -82,6 +83,7 @@ func ProvideHandlers( Gateway: gatewayHandler, OpenAIGateway: openaiGatewayHandler, Setting: settingHandler, + Totp: totpHandler, } } @@ -96,6 +98,7 @@ var ProviderSet = wire.NewSet( NewSubscriptionHandler, NewGatewayHandler, NewOpenAIGatewayHandler, + NewTotpHandler, ProvideSettingHandler, // Admin handlers diff --git a/backend/internal/pkg/tlsfingerprint/dialer_integration_test.go b/backend/internal/pkg/tlsfingerprint/dialer_integration_test.go new file mode 100644 index 00000000..eea74fcc --- /dev/null +++ b/backend/internal/pkg/tlsfingerprint/dialer_integration_test.go @@ -0,0 +1,278 @@ +//go:build integration + +// Package tlsfingerprint provides TLS fingerprint simulation for HTTP clients. +// +// Integration tests for verifying TLS fingerprint correctness. +// These tests make actual network requests to external services and should be run manually. +// +// Run with: go test -v -tags=integration ./internal/pkg/tlsfingerprint/... +package tlsfingerprint + +import ( + "context" + "encoding/json" + "io" + "net/http" + "strings" + "testing" + "time" +) + +// skipIfExternalServiceUnavailable checks if the external service is available. +// If not, it skips the test instead of failing. +func skipIfExternalServiceUnavailable(t *testing.T, err error) { + t.Helper() + if err != nil { + // Check for common network/TLS errors that indicate external service issues + errStr := err.Error() + if strings.Contains(errStr, "certificate has expired") || + strings.Contains(errStr, "certificate is not yet valid") || + strings.Contains(errStr, "connection refused") || + strings.Contains(errStr, "no such host") || + strings.Contains(errStr, "network is unreachable") || + strings.Contains(errStr, "timeout") { + t.Skipf("skipping test: external service unavailable: %v", err) + } + t.Fatalf("failed to get fingerprint: %v", err) + } +} + +// TestJA3Fingerprint verifies the JA3/JA4 fingerprint matches expected value. +// This test uses tls.peet.ws to verify the fingerprint. +// Expected JA3 hash: 1a28e69016765d92e3b381168d68922c (Claude CLI / Node.js 20.x) +// Expected JA4: t13d5911h1_a33745022dd6_1f22a2ca17c4 (d=domain) or t13i5911h1_... (i=IP) +func TestJA3Fingerprint(t *testing.T) { + // Skip if network is unavailable or if running in short mode + if testing.Short() { + t.Skip("skipping integration test in short mode") + } + + profile := &Profile{ + Name: "Claude CLI Test", + EnableGREASE: false, + } + dialer := NewDialer(profile, nil) + + client := &http.Client{ + Transport: &http.Transport{ + DialTLSContext: dialer.DialTLSContext, + }, + Timeout: 30 * time.Second, + } + + // Use tls.peet.ws fingerprint detection API + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, "GET", "https://tls.peet.ws/api/all", nil) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + req.Header.Set("User-Agent", "Claude Code/2.0.0 Node.js/20.0.0") + + resp, err := client.Do(req) + skipIfExternalServiceUnavailable(t, err) + defer func() { _ = resp.Body.Close() }() + + body, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("failed to read response: %v", err) + } + + var fpResp FingerprintResponse + if err := json.Unmarshal(body, &fpResp); err != nil { + t.Logf("Response body: %s", string(body)) + t.Fatalf("failed to parse fingerprint response: %v", err) + } + + // Log all fingerprint information + t.Logf("JA3: %s", fpResp.TLS.JA3) + t.Logf("JA3 Hash: %s", fpResp.TLS.JA3Hash) + t.Logf("JA4: %s", fpResp.TLS.JA4) + t.Logf("PeetPrint: %s", fpResp.TLS.PeetPrint) + t.Logf("PeetPrint Hash: %s", fpResp.TLS.PeetPrintHash) + + // Verify JA3 hash matches expected value + expectedJA3Hash := "1a28e69016765d92e3b381168d68922c" + if fpResp.TLS.JA3Hash == expectedJA3Hash { + t.Logf("✓ JA3 hash matches expected value: %s", expectedJA3Hash) + } else { + t.Errorf("✗ JA3 hash mismatch: got %s, expected %s", fpResp.TLS.JA3Hash, expectedJA3Hash) + } + + // Verify JA4 fingerprint + // JA4 format: t[version][sni][cipher_count][ext_count][alpn]_[cipher_hash]_[ext_hash] + // Expected: t13d5910h1 (d=domain) or t13i5910h1 (i=IP) + // The suffix _a33745022dd6_1f22a2ca17c4 should match + expectedJA4Suffix := "_a33745022dd6_1f22a2ca17c4" + if strings.HasSuffix(fpResp.TLS.JA4, expectedJA4Suffix) { + t.Logf("✓ JA4 suffix matches expected value: %s", expectedJA4Suffix) + } else { + t.Errorf("✗ JA4 suffix mismatch: got %s, expected suffix %s", fpResp.TLS.JA4, expectedJA4Suffix) + } + + // Verify JA4 prefix (t13d5911h1 or t13i5911h1) + // d = domain (SNI present), i = IP (no SNI) + // Since we connect to tls.peet.ws (domain), we expect 'd' + expectedJA4Prefix := "t13d5911h1" + if strings.HasPrefix(fpResp.TLS.JA4, expectedJA4Prefix) { + t.Logf("✓ JA4 prefix matches: %s (t13=TLS1.3, d=domain, 59=ciphers, 11=extensions, h1=HTTP/1.1)", expectedJA4Prefix) + } else { + // Also accept 'i' variant for IP connections + altPrefix := "t13i5911h1" + if strings.HasPrefix(fpResp.TLS.JA4, altPrefix) { + t.Logf("✓ JA4 prefix matches (IP variant): %s", altPrefix) + } else { + t.Errorf("✗ JA4 prefix mismatch: got %s, expected %s or %s", fpResp.TLS.JA4, expectedJA4Prefix, altPrefix) + } + } + + // Verify JA3 contains expected cipher suites (TLS 1.3 ciphers at the beginning) + if strings.Contains(fpResp.TLS.JA3, "4866-4867-4865") { + t.Logf("✓ JA3 contains expected TLS 1.3 cipher suites") + } else { + t.Logf("Warning: JA3 does not contain expected TLS 1.3 cipher suites") + } + + // Verify extension list (should be 11 extensions including SNI) + // Expected: 0-11-10-35-16-22-23-13-43-45-51 + expectedExtensions := "0-11-10-35-16-22-23-13-43-45-51" + if strings.Contains(fpResp.TLS.JA3, expectedExtensions) { + t.Logf("✓ JA3 contains expected extension list: %s", expectedExtensions) + } else { + t.Logf("Warning: JA3 extension list may differ") + } +} + +// TestProfileExpectation defines expected fingerprint values for a profile. +type TestProfileExpectation struct { + Profile *Profile + ExpectedJA3 string // Expected JA3 hash (empty = don't check) + ExpectedJA4 string // Expected full JA4 (empty = don't check) + JA4CipherHash string // Expected JA4 cipher hash - the stable middle part (empty = don't check) +} + +// TestAllProfiles tests multiple TLS fingerprint profiles against tls.peet.ws. +// Run with: go test -v -tags=integration -run TestAllProfiles ./internal/pkg/tlsfingerprint/... +func TestAllProfiles(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode") + } + + // Define all profiles to test with their expected fingerprints + // These profiles are from config.yaml gateway.tls_fingerprint.profiles + profiles := []TestProfileExpectation{ + { + // Linux x64 Node.js v22.17.1 + // Expected JA3 Hash: 1a28e69016765d92e3b381168d68922c + // Expected JA4: t13d5911h1_a33745022dd6_1f22a2ca17c4 + Profile: &Profile{ + Name: "linux_x64_node_v22171", + EnableGREASE: false, + CipherSuites: []uint16{4866, 4867, 4865, 49199, 49195, 49200, 49196, 158, 49191, 103, 49192, 107, 163, 159, 52393, 52392, 52394, 49327, 49325, 49315, 49311, 49245, 49249, 49239, 49235, 162, 49326, 49324, 49314, 49310, 49244, 49248, 49238, 49234, 49188, 106, 49187, 64, 49162, 49172, 57, 56, 49161, 49171, 51, 50, 157, 49313, 49309, 49233, 156, 49312, 49308, 49232, 61, 60, 53, 47, 255}, + Curves: []uint16{29, 23, 30, 25, 24, 256, 257, 258, 259, 260}, + PointFormats: []uint8{0, 1, 2}, + }, + JA4CipherHash: "a33745022dd6", // stable part + }, + { + // MacOS arm64 Node.js v22.18.0 + // Expected JA3 Hash: 70cb5ca646080902703ffda87036a5ea + // Expected JA4: t13d5912h1_a33745022dd6_dbd39dd1d406 + Profile: &Profile{ + Name: "macos_arm64_node_v22180", + EnableGREASE: false, + CipherSuites: []uint16{4866, 4867, 4865, 49199, 49195, 49200, 49196, 158, 49191, 103, 49192, 107, 163, 159, 52393, 52392, 52394, 49327, 49325, 49315, 49311, 49245, 49249, 49239, 49235, 162, 49326, 49324, 49314, 49310, 49244, 49248, 49238, 49234, 49188, 106, 49187, 64, 49162, 49172, 57, 56, 49161, 49171, 51, 50, 157, 49313, 49309, 49233, 156, 49312, 49308, 49232, 61, 60, 53, 47, 255}, + Curves: []uint16{29, 23, 30, 25, 24, 256, 257, 258, 259, 260}, + PointFormats: []uint8{0, 1, 2}, + }, + JA4CipherHash: "a33745022dd6", // stable part (same cipher suites) + }, + } + + for _, tc := range profiles { + tc := tc // capture range variable + t.Run(tc.Profile.Name, func(t *testing.T) { + fp := fetchFingerprint(t, tc.Profile) + if fp == nil { + return // fetchFingerprint already called t.Fatal + } + + t.Logf("Profile: %s", tc.Profile.Name) + t.Logf(" JA3: %s", fp.JA3) + t.Logf(" JA3 Hash: %s", fp.JA3Hash) + t.Logf(" JA4: %s", fp.JA4) + t.Logf(" PeetPrint: %s", fp.PeetPrint) + t.Logf(" PeetPrintHash: %s", fp.PeetPrintHash) + + // Verify expectations + if tc.ExpectedJA3 != "" { + if fp.JA3Hash == tc.ExpectedJA3 { + t.Logf(" ✓ JA3 hash matches: %s", tc.ExpectedJA3) + } else { + t.Errorf(" ✗ JA3 hash mismatch: got %s, expected %s", fp.JA3Hash, tc.ExpectedJA3) + } + } + + if tc.ExpectedJA4 != "" { + if fp.JA4 == tc.ExpectedJA4 { + t.Logf(" ✓ JA4 matches: %s", tc.ExpectedJA4) + } else { + t.Errorf(" ✗ JA4 mismatch: got %s, expected %s", fp.JA4, tc.ExpectedJA4) + } + } + + // Check JA4 cipher hash (stable middle part) + // JA4 format: prefix_cipherHash_extHash + if tc.JA4CipherHash != "" { + if strings.Contains(fp.JA4, "_"+tc.JA4CipherHash+"_") { + t.Logf(" ✓ JA4 cipher hash matches: %s", tc.JA4CipherHash) + } else { + t.Errorf(" ✗ JA4 cipher hash mismatch: got %s, expected cipher hash %s", fp.JA4, tc.JA4CipherHash) + } + } + }) + } +} + +// fetchFingerprint makes a request to tls.peet.ws and returns the TLS fingerprint info. +func fetchFingerprint(t *testing.T, profile *Profile) *TLSInfo { + t.Helper() + + dialer := NewDialer(profile, nil) + client := &http.Client{ + Transport: &http.Transport{ + DialTLSContext: dialer.DialTLSContext, + }, + Timeout: 30 * time.Second, + } + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, "GET", "https://tls.peet.ws/api/all", nil) + if err != nil { + t.Fatalf("failed to create request: %v", err) + return nil + } + req.Header.Set("User-Agent", "Claude Code/2.0.0 Node.js/20.0.0") + + resp, err := client.Do(req) + skipIfExternalServiceUnavailable(t, err) + defer func() { _ = resp.Body.Close() }() + + body, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("failed to read response: %v", err) + return nil + } + + var fpResp FingerprintResponse + if err := json.Unmarshal(body, &fpResp); err != nil { + t.Logf("Response body: %s", string(body)) + t.Fatalf("failed to parse fingerprint response: %v", err) + return nil + } + + return &fpResp.TLS +} diff --git a/backend/internal/pkg/tlsfingerprint/dialer_test.go b/backend/internal/pkg/tlsfingerprint/dialer_test.go index 845d51e5..dff7570f 100644 --- a/backend/internal/pkg/tlsfingerprint/dialer_test.go +++ b/backend/internal/pkg/tlsfingerprint/dialer_test.go @@ -1,21 +1,16 @@ // Package tlsfingerprint provides TLS fingerprint simulation for HTTP clients. // -// Integration tests for verifying TLS fingerprint correctness. -// These tests make actual network requests and should be run manually. +// Unit tests for TLS fingerprint dialer. +// Integration tests that require external network are in dialer_integration_test.go +// and require the 'integration' build tag. // -// Run with: go test -v ./internal/pkg/tlsfingerprint/... -// Run integration tests: go test -v -run TestJA3 ./internal/pkg/tlsfingerprint/... +// Run unit tests: go test -v ./internal/pkg/tlsfingerprint/... +// Run integration tests: go test -v -tags=integration ./internal/pkg/tlsfingerprint/... package tlsfingerprint import ( - "context" - "encoding/json" - "io" - "net/http" "net/url" - "strings" "testing" - "time" ) // FingerprintResponse represents the response from tls.peet.ws/api/all. @@ -36,148 +31,6 @@ type TLSInfo struct { SessionID string `json:"session_id"` } -// TestDialerBasicConnection tests that the dialer can establish TLS connections. -func TestDialerBasicConnection(t *testing.T) { - if testing.Short() { - t.Skip("skipping network test in short mode") - } - - // Create a dialer with default profile - profile := &Profile{ - Name: "Test Profile", - EnableGREASE: false, - } - dialer := NewDialer(profile, nil) - - // Create HTTP client with custom TLS dialer - client := &http.Client{ - Transport: &http.Transport{ - DialTLSContext: dialer.DialTLSContext, - }, - Timeout: 30 * time.Second, - } - - // Make a request to a known HTTPS endpoint - resp, err := client.Get("https://www.google.com") - if err != nil { - t.Fatalf("failed to connect: %v", err) - } - defer func() { _ = resp.Body.Close() }() - - if resp.StatusCode != http.StatusOK { - t.Errorf("expected status 200, got %d", resp.StatusCode) - } -} - -// TestJA3Fingerprint verifies the JA3/JA4 fingerprint matches expected value. -// This test uses tls.peet.ws to verify the fingerprint. -// Expected JA3 hash: 1a28e69016765d92e3b381168d68922c (Claude CLI / Node.js 20.x) -// Expected JA4: t13d5911h1_a33745022dd6_1f22a2ca17c4 (d=domain) or t13i5911h1_... (i=IP) -func TestJA3Fingerprint(t *testing.T) { - // Skip if network is unavailable or if running in short mode - if testing.Short() { - t.Skip("skipping integration test in short mode") - } - - profile := &Profile{ - Name: "Claude CLI Test", - EnableGREASE: false, - } - dialer := NewDialer(profile, nil) - - client := &http.Client{ - Transport: &http.Transport{ - DialTLSContext: dialer.DialTLSContext, - }, - Timeout: 30 * time.Second, - } - - // Use tls.peet.ws fingerprint detection API - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - req, err := http.NewRequestWithContext(ctx, "GET", "https://tls.peet.ws/api/all", nil) - if err != nil { - t.Fatalf("failed to create request: %v", err) - } - req.Header.Set("User-Agent", "Claude Code/2.0.0 Node.js/20.0.0") - - resp, err := client.Do(req) - if err != nil { - t.Fatalf("failed to get fingerprint: %v", err) - } - defer func() { _ = resp.Body.Close() }() - - body, err := io.ReadAll(resp.Body) - if err != nil { - t.Fatalf("failed to read response: %v", err) - } - - var fpResp FingerprintResponse - if err := json.Unmarshal(body, &fpResp); err != nil { - t.Logf("Response body: %s", string(body)) - t.Fatalf("failed to parse fingerprint response: %v", err) - } - - // Log all fingerprint information - t.Logf("JA3: %s", fpResp.TLS.JA3) - t.Logf("JA3 Hash: %s", fpResp.TLS.JA3Hash) - t.Logf("JA4: %s", fpResp.TLS.JA4) - t.Logf("PeetPrint: %s", fpResp.TLS.PeetPrint) - t.Logf("PeetPrint Hash: %s", fpResp.TLS.PeetPrintHash) - - // Verify JA3 hash matches expected value - expectedJA3Hash := "1a28e69016765d92e3b381168d68922c" - if fpResp.TLS.JA3Hash == expectedJA3Hash { - t.Logf("✓ JA3 hash matches expected value: %s", expectedJA3Hash) - } else { - t.Errorf("✗ JA3 hash mismatch: got %s, expected %s", fpResp.TLS.JA3Hash, expectedJA3Hash) - } - - // Verify JA4 fingerprint - // JA4 format: t[version][sni][cipher_count][ext_count][alpn]_[cipher_hash]_[ext_hash] - // Expected: t13d5910h1 (d=domain) or t13i5910h1 (i=IP) - // The suffix _a33745022dd6_1f22a2ca17c4 should match - expectedJA4Suffix := "_a33745022dd6_1f22a2ca17c4" - if strings.HasSuffix(fpResp.TLS.JA4, expectedJA4Suffix) { - t.Logf("✓ JA4 suffix matches expected value: %s", expectedJA4Suffix) - } else { - t.Errorf("✗ JA4 suffix mismatch: got %s, expected suffix %s", fpResp.TLS.JA4, expectedJA4Suffix) - } - - // Verify JA4 prefix (t13d5911h1 or t13i5911h1) - // d = domain (SNI present), i = IP (no SNI) - // Since we connect to tls.peet.ws (domain), we expect 'd' - expectedJA4Prefix := "t13d5911h1" - if strings.HasPrefix(fpResp.TLS.JA4, expectedJA4Prefix) { - t.Logf("✓ JA4 prefix matches: %s (t13=TLS1.3, d=domain, 59=ciphers, 11=extensions, h1=HTTP/1.1)", expectedJA4Prefix) - } else { - // Also accept 'i' variant for IP connections - altPrefix := "t13i5911h1" - if strings.HasPrefix(fpResp.TLS.JA4, altPrefix) { - t.Logf("✓ JA4 prefix matches (IP variant): %s", altPrefix) - } else { - t.Errorf("✗ JA4 prefix mismatch: got %s, expected %s or %s", fpResp.TLS.JA4, expectedJA4Prefix, altPrefix) - } - } - - // Verify JA3 contains expected cipher suites (TLS 1.3 ciphers at the beginning) - if strings.Contains(fpResp.TLS.JA3, "4866-4867-4865") { - t.Logf("✓ JA3 contains expected TLS 1.3 cipher suites") - } else { - t.Logf("Warning: JA3 does not contain expected TLS 1.3 cipher suites") - } - - // Verify extension list (should be 11 extensions including SNI) - // Expected: 0-11-10-35-16-22-23-13-43-45-51 - expectedExtensions := "0-11-10-35-16-22-23-13-43-45-51" - if strings.Contains(fpResp.TLS.JA3, expectedExtensions) { - t.Logf("✓ JA3 contains expected extension list: %s", expectedExtensions) - } else { - t.Logf("Warning: JA3 extension list may differ") - } -} - // TestDialerWithProfile tests that different profiles produce different fingerprints. func TestDialerWithProfile(t *testing.T) { // Create two dialers with different profiles @@ -305,139 +158,3 @@ func mustParseURL(rawURL string) *url.URL { } return u } - -// TestProfileExpectation defines expected fingerprint values for a profile. -type TestProfileExpectation struct { - Profile *Profile - ExpectedJA3 string // Expected JA3 hash (empty = don't check) - ExpectedJA4 string // Expected full JA4 (empty = don't check) - JA4CipherHash string // Expected JA4 cipher hash - the stable middle part (empty = don't check) -} - -// TestAllProfiles tests multiple TLS fingerprint profiles against tls.peet.ws. -// Run with: go test -v -run TestAllProfiles ./internal/pkg/tlsfingerprint/... -func TestAllProfiles(t *testing.T) { - if testing.Short() { - t.Skip("skipping integration test in short mode") - } - - // Define all profiles to test with their expected fingerprints - // These profiles are from config.yaml gateway.tls_fingerprint.profiles - profiles := []TestProfileExpectation{ - { - // Linux x64 Node.js v22.17.1 - // Expected JA3 Hash: 1a28e69016765d92e3b381168d68922c - // Expected JA4: t13d5911h1_a33745022dd6_1f22a2ca17c4 - Profile: &Profile{ - Name: "linux_x64_node_v22171", - EnableGREASE: false, - CipherSuites: []uint16{4866, 4867, 4865, 49199, 49195, 49200, 49196, 158, 49191, 103, 49192, 107, 163, 159, 52393, 52392, 52394, 49327, 49325, 49315, 49311, 49245, 49249, 49239, 49235, 162, 49326, 49324, 49314, 49310, 49244, 49248, 49238, 49234, 49188, 106, 49187, 64, 49162, 49172, 57, 56, 49161, 49171, 51, 50, 157, 49313, 49309, 49233, 156, 49312, 49308, 49232, 61, 60, 53, 47, 255}, - Curves: []uint16{29, 23, 30, 25, 24, 256, 257, 258, 259, 260}, - PointFormats: []uint8{0, 1, 2}, - }, - JA4CipherHash: "a33745022dd6", // stable part - }, - { - // MacOS arm64 Node.js v22.18.0 - // Expected JA3 Hash: 70cb5ca646080902703ffda87036a5ea - // Expected JA4: t13d5912h1_a33745022dd6_dbd39dd1d406 - Profile: &Profile{ - Name: "macos_arm64_node_v22180", - EnableGREASE: false, - CipherSuites: []uint16{4866, 4867, 4865, 49199, 49195, 49200, 49196, 158, 49191, 103, 49192, 107, 163, 159, 52393, 52392, 52394, 49327, 49325, 49315, 49311, 49245, 49249, 49239, 49235, 162, 49326, 49324, 49314, 49310, 49244, 49248, 49238, 49234, 49188, 106, 49187, 64, 49162, 49172, 57, 56, 49161, 49171, 51, 50, 157, 49313, 49309, 49233, 156, 49312, 49308, 49232, 61, 60, 53, 47, 255}, - Curves: []uint16{29, 23, 30, 25, 24, 256, 257, 258, 259, 260}, - PointFormats: []uint8{0, 1, 2}, - }, - JA4CipherHash: "a33745022dd6", // stable part (same cipher suites) - }, - } - - for _, tc := range profiles { - tc := tc // capture range variable - t.Run(tc.Profile.Name, func(t *testing.T) { - fp := fetchFingerprint(t, tc.Profile) - if fp == nil { - return // fetchFingerprint already called t.Fatal - } - - t.Logf("Profile: %s", tc.Profile.Name) - t.Logf(" JA3: %s", fp.JA3) - t.Logf(" JA3 Hash: %s", fp.JA3Hash) - t.Logf(" JA4: %s", fp.JA4) - t.Logf(" PeetPrint: %s", fp.PeetPrint) - t.Logf(" PeetPrintHash: %s", fp.PeetPrintHash) - - // Verify expectations - if tc.ExpectedJA3 != "" { - if fp.JA3Hash == tc.ExpectedJA3 { - t.Logf(" ✓ JA3 hash matches: %s", tc.ExpectedJA3) - } else { - t.Errorf(" ✗ JA3 hash mismatch: got %s, expected %s", fp.JA3Hash, tc.ExpectedJA3) - } - } - - if tc.ExpectedJA4 != "" { - if fp.JA4 == tc.ExpectedJA4 { - t.Logf(" ✓ JA4 matches: %s", tc.ExpectedJA4) - } else { - t.Errorf(" ✗ JA4 mismatch: got %s, expected %s", fp.JA4, tc.ExpectedJA4) - } - } - - // Check JA4 cipher hash (stable middle part) - // JA4 format: prefix_cipherHash_extHash - if tc.JA4CipherHash != "" { - if strings.Contains(fp.JA4, "_"+tc.JA4CipherHash+"_") { - t.Logf(" ✓ JA4 cipher hash matches: %s", tc.JA4CipherHash) - } else { - t.Errorf(" ✗ JA4 cipher hash mismatch: got %s, expected cipher hash %s", fp.JA4, tc.JA4CipherHash) - } - } - }) - } -} - -// fetchFingerprint makes a request to tls.peet.ws and returns the TLS fingerprint info. -func fetchFingerprint(t *testing.T, profile *Profile) *TLSInfo { - t.Helper() - - dialer := NewDialer(profile, nil) - client := &http.Client{ - Transport: &http.Transport{ - DialTLSContext: dialer.DialTLSContext, - }, - Timeout: 30 * time.Second, - } - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - req, err := http.NewRequestWithContext(ctx, "GET", "https://tls.peet.ws/api/all", nil) - if err != nil { - t.Fatalf("failed to create request: %v", err) - return nil - } - req.Header.Set("User-Agent", "Claude Code/2.0.0 Node.js/20.0.0") - - resp, err := client.Do(req) - if err != nil { - t.Fatalf("failed to get fingerprint: %v", err) - return nil - } - defer func() { _ = resp.Body.Close() }() - - body, err := io.ReadAll(resp.Body) - if err != nil { - t.Fatalf("failed to read response: %v", err) - return nil - } - - var fpResp FingerprintResponse - if err := json.Unmarshal(body, &fpResp); err != nil { - t.Logf("Response body: %s", string(body)) - t.Fatalf("failed to parse fingerprint response: %v", err) - return nil - } - - return &fpResp.TLS -} diff --git a/backend/internal/repository/aes_encryptor.go b/backend/internal/repository/aes_encryptor.go new file mode 100644 index 00000000..924e3698 --- /dev/null +++ b/backend/internal/repository/aes_encryptor.go @@ -0,0 +1,95 @@ +package repository + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "encoding/base64" + "encoding/hex" + "fmt" + "io" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/service" +) + +// AESEncryptor implements SecretEncryptor using AES-256-GCM +type AESEncryptor struct { + key []byte +} + +// NewAESEncryptor creates a new AES encryptor +func NewAESEncryptor(cfg *config.Config) (service.SecretEncryptor, error) { + key, err := hex.DecodeString(cfg.Totp.EncryptionKey) + if err != nil { + return nil, fmt.Errorf("invalid totp encryption key: %w", err) + } + + if len(key) != 32 { + return nil, fmt.Errorf("totp encryption key must be 32 bytes (64 hex chars), got %d bytes", len(key)) + } + + return &AESEncryptor{key: key}, nil +} + +// Encrypt encrypts plaintext using AES-256-GCM +// Output format: base64(nonce + ciphertext + tag) +func (e *AESEncryptor) Encrypt(plaintext string) (string, error) { + block, err := aes.NewCipher(e.key) + if err != nil { + return "", fmt.Errorf("create cipher: %w", err) + } + + gcm, err := cipher.NewGCM(block) + if err != nil { + return "", fmt.Errorf("create gcm: %w", err) + } + + // Generate a random nonce + nonce := make([]byte, gcm.NonceSize()) + if _, err := io.ReadFull(rand.Reader, nonce); err != nil { + return "", fmt.Errorf("generate nonce: %w", err) + } + + // Encrypt the plaintext + // Seal appends the ciphertext and tag to the nonce + ciphertext := gcm.Seal(nonce, nonce, []byte(plaintext), nil) + + // Encode as base64 + return base64.StdEncoding.EncodeToString(ciphertext), nil +} + +// Decrypt decrypts ciphertext using AES-256-GCM +func (e *AESEncryptor) Decrypt(ciphertext string) (string, error) { + // Decode from base64 + data, err := base64.StdEncoding.DecodeString(ciphertext) + if err != nil { + return "", fmt.Errorf("decode base64: %w", err) + } + + block, err := aes.NewCipher(e.key) + if err != nil { + return "", fmt.Errorf("create cipher: %w", err) + } + + gcm, err := cipher.NewGCM(block) + if err != nil { + return "", fmt.Errorf("create gcm: %w", err) + } + + nonceSize := gcm.NonceSize() + if len(data) < nonceSize { + return "", fmt.Errorf("ciphertext too short") + } + + // Extract nonce and ciphertext + nonce, ciphertextData := data[:nonceSize], data[nonceSize:] + + // Decrypt + plaintext, err := gcm.Open(nil, nonce, ciphertextData, nil) + if err != nil { + return "", fmt.Errorf("decrypt: %w", err) + } + + return string(plaintext), nil +} diff --git a/backend/internal/repository/api_key_repo.go b/backend/internal/repository/api_key_repo.go index ab890844..1e5a62df 100644 --- a/backend/internal/repository/api_key_repo.go +++ b/backend/internal/repository/api_key_repo.go @@ -387,17 +387,20 @@ func userEntityToService(u *dbent.User) *service.User { return nil } return &service.User{ - ID: u.ID, - Email: u.Email, - Username: u.Username, - Notes: u.Notes, - PasswordHash: u.PasswordHash, - Role: u.Role, - Balance: u.Balance, - Concurrency: u.Concurrency, - Status: u.Status, - CreatedAt: u.CreatedAt, - UpdatedAt: u.UpdatedAt, + ID: u.ID, + Email: u.Email, + Username: u.Username, + Notes: u.Notes, + PasswordHash: u.PasswordHash, + Role: u.Role, + Balance: u.Balance, + Concurrency: u.Concurrency, + Status: u.Status, + TotpSecretEncrypted: u.TotpSecretEncrypted, + TotpEnabled: u.TotpEnabled, + TotpEnabledAt: u.TotpEnabledAt, + CreatedAt: u.CreatedAt, + UpdatedAt: u.UpdatedAt, } } diff --git a/backend/internal/repository/totp_cache.go b/backend/internal/repository/totp_cache.go new file mode 100644 index 00000000..2f4a8ab2 --- /dev/null +++ b/backend/internal/repository/totp_cache.go @@ -0,0 +1,149 @@ +package repository + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/redis/go-redis/v9" + + "github.com/Wei-Shaw/sub2api/internal/service" +) + +const ( + totpSetupKeyPrefix = "totp:setup:" + totpLoginKeyPrefix = "totp:login:" + totpAttemptsKeyPrefix = "totp:attempts:" + totpAttemptsTTL = 15 * time.Minute +) + +// TotpCache implements service.TotpCache using Redis +type TotpCache struct { + rdb *redis.Client +} + +// NewTotpCache creates a new TOTP cache +func NewTotpCache(rdb *redis.Client) service.TotpCache { + return &TotpCache{rdb: rdb} +} + +// GetSetupSession retrieves a TOTP setup session +func (c *TotpCache) GetSetupSession(ctx context.Context, userID int64) (*service.TotpSetupSession, error) { + key := fmt.Sprintf("%s%d", totpSetupKeyPrefix, userID) + data, err := c.rdb.Get(ctx, key).Bytes() + if err != nil { + if err == redis.Nil { + return nil, nil + } + return nil, fmt.Errorf("get setup session: %w", err) + } + + var session service.TotpSetupSession + if err := json.Unmarshal(data, &session); err != nil { + return nil, fmt.Errorf("unmarshal setup session: %w", err) + } + + return &session, nil +} + +// SetSetupSession stores a TOTP setup session +func (c *TotpCache) SetSetupSession(ctx context.Context, userID int64, session *service.TotpSetupSession, ttl time.Duration) error { + key := fmt.Sprintf("%s%d", totpSetupKeyPrefix, userID) + data, err := json.Marshal(session) + if err != nil { + return fmt.Errorf("marshal setup session: %w", err) + } + + if err := c.rdb.Set(ctx, key, data, ttl).Err(); err != nil { + return fmt.Errorf("set setup session: %w", err) + } + + return nil +} + +// DeleteSetupSession deletes a TOTP setup session +func (c *TotpCache) DeleteSetupSession(ctx context.Context, userID int64) error { + key := fmt.Sprintf("%s%d", totpSetupKeyPrefix, userID) + return c.rdb.Del(ctx, key).Err() +} + +// GetLoginSession retrieves a TOTP login session +func (c *TotpCache) GetLoginSession(ctx context.Context, tempToken string) (*service.TotpLoginSession, error) { + key := totpLoginKeyPrefix + tempToken + data, err := c.rdb.Get(ctx, key).Bytes() + if err != nil { + if err == redis.Nil { + return nil, nil + } + return nil, fmt.Errorf("get login session: %w", err) + } + + var session service.TotpLoginSession + if err := json.Unmarshal(data, &session); err != nil { + return nil, fmt.Errorf("unmarshal login session: %w", err) + } + + return &session, nil +} + +// SetLoginSession stores a TOTP login session +func (c *TotpCache) SetLoginSession(ctx context.Context, tempToken string, session *service.TotpLoginSession, ttl time.Duration) error { + key := totpLoginKeyPrefix + tempToken + data, err := json.Marshal(session) + if err != nil { + return fmt.Errorf("marshal login session: %w", err) + } + + if err := c.rdb.Set(ctx, key, data, ttl).Err(); err != nil { + return fmt.Errorf("set login session: %w", err) + } + + return nil +} + +// DeleteLoginSession deletes a TOTP login session +func (c *TotpCache) DeleteLoginSession(ctx context.Context, tempToken string) error { + key := totpLoginKeyPrefix + tempToken + return c.rdb.Del(ctx, key).Err() +} + +// IncrementVerifyAttempts increments the verify attempt counter +func (c *TotpCache) IncrementVerifyAttempts(ctx context.Context, userID int64) (int, error) { + key := fmt.Sprintf("%s%d", totpAttemptsKeyPrefix, userID) + + // Use pipeline for atomic increment and set TTL + pipe := c.rdb.Pipeline() + incrCmd := pipe.Incr(ctx, key) + pipe.Expire(ctx, key, totpAttemptsTTL) + + if _, err := pipe.Exec(ctx); err != nil { + return 0, fmt.Errorf("increment verify attempts: %w", err) + } + + count, err := incrCmd.Result() + if err != nil { + return 0, fmt.Errorf("get increment result: %w", err) + } + + return int(count), nil +} + +// GetVerifyAttempts gets the current verify attempt count +func (c *TotpCache) GetVerifyAttempts(ctx context.Context, userID int64) (int, error) { + key := fmt.Sprintf("%s%d", totpAttemptsKeyPrefix, userID) + count, err := c.rdb.Get(ctx, key).Int() + if err != nil { + if err == redis.Nil { + return 0, nil + } + return 0, fmt.Errorf("get verify attempts: %w", err) + } + return count, nil +} + +// ClearVerifyAttempts clears the verify attempt counter +func (c *TotpCache) ClearVerifyAttempts(ctx context.Context, userID int64) error { + key := fmt.Sprintf("%s%d", totpAttemptsKeyPrefix, userID) + return c.rdb.Del(ctx, key).Err() +} diff --git a/backend/internal/repository/user_repo.go b/backend/internal/repository/user_repo.go index 006a5464..fe5b645c 100644 --- a/backend/internal/repository/user_repo.go +++ b/backend/internal/repository/user_repo.go @@ -7,6 +7,7 @@ import ( "fmt" "sort" "strings" + "time" dbent "github.com/Wei-Shaw/sub2api/ent" dbuser "github.com/Wei-Shaw/sub2api/ent/user" @@ -466,3 +467,46 @@ func applyUserEntityToService(dst *service.User, src *dbent.User) { dst.CreatedAt = src.CreatedAt dst.UpdatedAt = src.UpdatedAt } + +// UpdateTotpSecret 更新用户的 TOTP 加密密钥 +func (r *userRepository) UpdateTotpSecret(ctx context.Context, userID int64, encryptedSecret *string) error { + client := clientFromContext(ctx, r.client) + update := client.User.UpdateOneID(userID) + if encryptedSecret == nil { + update = update.ClearTotpSecretEncrypted() + } else { + update = update.SetTotpSecretEncrypted(*encryptedSecret) + } + _, err := update.Save(ctx) + if err != nil { + return translatePersistenceError(err, service.ErrUserNotFound, nil) + } + return nil +} + +// EnableTotp 启用用户的 TOTP 双因素认证 +func (r *userRepository) EnableTotp(ctx context.Context, userID int64) error { + client := clientFromContext(ctx, r.client) + _, err := client.User.UpdateOneID(userID). + SetTotpEnabled(true). + SetTotpEnabledAt(time.Now()). + Save(ctx) + if err != nil { + return translatePersistenceError(err, service.ErrUserNotFound, nil) + } + return nil +} + +// DisableTotp 禁用用户的 TOTP 双因素认证 +func (r *userRepository) DisableTotp(ctx context.Context, userID int64) error { + client := clientFromContext(ctx, r.client) + _, err := client.User.UpdateOneID(userID). + SetTotpEnabled(false). + ClearTotpEnabledAt(). + ClearTotpSecretEncrypted(). + Save(ctx) + if err != nil { + return translatePersistenceError(err, service.ErrUserNotFound, nil) + } + return nil +} diff --git a/backend/internal/repository/wire.go b/backend/internal/repository/wire.go index 7a8d85f4..3e1c05fc 100644 --- a/backend/internal/repository/wire.go +++ b/backend/internal/repository/wire.go @@ -82,6 +82,10 @@ var ProviderSet = wire.NewSet( NewSchedulerCache, NewSchedulerOutboxRepository, NewProxyLatencyCache, + NewTotpCache, + + // Encryptors + NewAESEncryptor, // HTTP service ports (DI Strategy A: return interface directly) NewTurnstileVerifier, diff --git a/backend/internal/server/api_contract_test.go b/backend/internal/server/api_contract_test.go index 8a6eb195..1deab421 100644 --- a/backend/internal/server/api_contract_test.go +++ b/backend/internal/server/api_contract_test.go @@ -453,6 +453,8 @@ func TestAPIContracts(t *testing.T) { "email_verify_enabled": false, "promo_code_enabled": true, "password_reset_enabled": false, + "totp_enabled": false, + "totp_encryption_key_configured": false, "smtp_host": "smtp.example.com", "smtp_port": 587, "smtp_username": "user", @@ -596,7 +598,7 @@ func newContractDeps(t *testing.T) *contractDeps { settingService := service.NewSettingService(settingRepo, cfg) adminService := service.NewAdminService(userRepo, groupRepo, &accountRepo, proxyRepo, apiKeyRepo, redeemRepo, nil, nil, nil, nil) - authHandler := handler.NewAuthHandler(cfg, nil, userService, settingService, nil) + authHandler := handler.NewAuthHandler(cfg, nil, userService, settingService, nil, nil) apiKeyHandler := handler.NewAPIKeyHandler(apiKeyService) usageHandler := handler.NewUsageHandler(usageService, apiKeyService) adminSettingHandler := adminhandler.NewSettingHandler(settingService, nil, nil, nil) @@ -755,6 +757,18 @@ func (r *stubUserRepo) RemoveGroupFromAllowedGroups(ctx context.Context, groupID return 0, errors.New("not implemented") } +func (r *stubUserRepo) UpdateTotpSecret(ctx context.Context, userID int64, encryptedSecret *string) error { + return errors.New("not implemented") +} + +func (r *stubUserRepo) EnableTotp(ctx context.Context, userID int64) error { + return errors.New("not implemented") +} + +func (r *stubUserRepo) DisableTotp(ctx context.Context, userID int64) error { + return errors.New("not implemented") +} + type stubApiKeyCache struct{} func (stubApiKeyCache) GetCreateAttemptCount(ctx context.Context, userID int64) (int, error) { diff --git a/backend/internal/server/routes/auth.go b/backend/internal/server/routes/auth.go index 24690f5c..33a88e82 100644 --- a/backend/internal/server/routes/auth.go +++ b/backend/internal/server/routes/auth.go @@ -26,6 +26,7 @@ func RegisterAuthRoutes( { auth.POST("/register", h.Auth.Register) auth.POST("/login", h.Auth.Login) + auth.POST("/login/2fa", h.Auth.Login2FA) auth.POST("/send-verify-code", h.Auth.SendVerifyCode) // 优惠码验证接口添加速率限制:每分钟最多 10 次(Redis 故障时 fail-close) auth.POST("/validate-promo-code", rateLimiter.LimitWithOptions("validate-promo", 10, time.Minute, middleware.RateLimitOptions{ diff --git a/backend/internal/server/routes/user.go b/backend/internal/server/routes/user.go index ad2166fe..83cf31c4 100644 --- a/backend/internal/server/routes/user.go +++ b/backend/internal/server/routes/user.go @@ -22,6 +22,17 @@ func RegisterUserRoutes( user.GET("/profile", h.User.GetProfile) user.PUT("/password", h.User.ChangePassword) user.PUT("", h.User.UpdateProfile) + + // TOTP 双因素认证 + totp := user.Group("/totp") + { + totp.GET("/status", h.Totp.GetStatus) + totp.GET("/verification-method", h.Totp.GetVerificationMethod) + totp.POST("/send-code", h.Totp.SendVerifyCode) + totp.POST("/setup", h.Totp.InitiateSetup) + totp.POST("/enable", h.Totp.Enable) + totp.POST("/disable", h.Totp.Disable) + } } // API Key管理 diff --git a/backend/internal/service/admin_service_delete_test.go b/backend/internal/service/admin_service_delete_test.go index afa433af..6472ccbb 100644 --- a/backend/internal/service/admin_service_delete_test.go +++ b/backend/internal/service/admin_service_delete_test.go @@ -93,6 +93,18 @@ func (s *userRepoStub) RemoveGroupFromAllowedGroups(ctx context.Context, groupID panic("unexpected RemoveGroupFromAllowedGroups call") } +func (s *userRepoStub) UpdateTotpSecret(ctx context.Context, userID int64, encryptedSecret *string) error { + panic("unexpected UpdateTotpSecret call") +} + +func (s *userRepoStub) EnableTotp(ctx context.Context, userID int64) error { + panic("unexpected EnableTotp call") +} + +func (s *userRepoStub) DisableTotp(ctx context.Context, userID int64) error { + panic("unexpected DisableTotp call") +} + type groupRepoStub struct { affectedUserIDs []int64 deleteErr error diff --git a/backend/internal/service/domain_constants.go b/backend/internal/service/domain_constants.go index 0eaa23ca..31a34e00 100644 --- a/backend/internal/service/domain_constants.go +++ b/backend/internal/service/domain_constants.go @@ -88,6 +88,9 @@ const ( SettingKeyTurnstileSiteKey = "turnstile_site_key" // Turnstile Site Key SettingKeyTurnstileSecretKey = "turnstile_secret_key" // Turnstile Secret Key + // TOTP 双因素认证设置 + SettingKeyTotpEnabled = "totp_enabled" // 是否启用 TOTP 2FA 功能 + // LinuxDo Connect OAuth 登录设置 SettingKeyLinuxDoConnectEnabled = "linuxdo_connect_enabled" SettingKeyLinuxDoConnectClientID = "linuxdo_connect_client_id" diff --git a/backend/internal/service/email_service.go b/backend/internal/service/email_service.go index 1abc6bfd..44edf7f7 100644 --- a/backend/internal/service/email_service.go +++ b/backend/internal/service/email_service.go @@ -282,8 +282,8 @@ func (s *EmailService) VerifyCode(ctx context.Context, email, code string) error return ErrVerifyCodeMaxAttempts } - // 验证码不匹配 - if data.Code != code { + // 验证码不匹配 (constant-time comparison to prevent timing attacks) + if subtle.ConstantTimeCompare([]byte(data.Code), []byte(code)) != 1 { data.Attempts++ if err := s.cache.SetVerificationCode(ctx, email, data, verifyCodeTTL); err != nil { log.Printf("[Email] Failed to update verification attempt count: %v", err) diff --git a/backend/internal/service/setting_service.go b/backend/internal/service/setting_service.go index c2ef9a5d..2a1e7d33 100644 --- a/backend/internal/service/setting_service.go +++ b/backend/internal/service/setting_service.go @@ -62,6 +62,7 @@ func (s *SettingService) GetPublicSettings(ctx context.Context) (*PublicSettings SettingKeyEmailVerifyEnabled, SettingKeyPromoCodeEnabled, SettingKeyPasswordResetEnabled, + SettingKeyTotpEnabled, SettingKeyTurnstileEnabled, SettingKeyTurnstileSiteKey, SettingKeySiteName, @@ -96,6 +97,7 @@ func (s *SettingService) GetPublicSettings(ctx context.Context) (*PublicSettings EmailVerifyEnabled: emailVerifyEnabled, PromoCodeEnabled: settings[SettingKeyPromoCodeEnabled] != "false", // 默认启用 PasswordResetEnabled: passwordResetEnabled, + TotpEnabled: settings[SettingKeyTotpEnabled] == "true", TurnstileEnabled: settings[SettingKeyTurnstileEnabled] == "true", TurnstileSiteKey: settings[SettingKeyTurnstileSiteKey], SiteName: s.getStringOrDefault(settings, SettingKeySiteName, "Sub2API"), @@ -135,6 +137,7 @@ func (s *SettingService) GetPublicSettingsForInjection(ctx context.Context) (any EmailVerifyEnabled bool `json:"email_verify_enabled"` PromoCodeEnabled bool `json:"promo_code_enabled"` PasswordResetEnabled bool `json:"password_reset_enabled"` + TotpEnabled bool `json:"totp_enabled"` TurnstileEnabled bool `json:"turnstile_enabled"` TurnstileSiteKey string `json:"turnstile_site_key,omitempty"` SiteName string `json:"site_name"` @@ -152,6 +155,7 @@ func (s *SettingService) GetPublicSettingsForInjection(ctx context.Context) (any EmailVerifyEnabled: settings.EmailVerifyEnabled, PromoCodeEnabled: settings.PromoCodeEnabled, PasswordResetEnabled: settings.PasswordResetEnabled, + TotpEnabled: settings.TotpEnabled, TurnstileEnabled: settings.TurnstileEnabled, TurnstileSiteKey: settings.TurnstileSiteKey, SiteName: settings.SiteName, @@ -176,6 +180,7 @@ func (s *SettingService) UpdateSettings(ctx context.Context, settings *SystemSet updates[SettingKeyEmailVerifyEnabled] = strconv.FormatBool(settings.EmailVerifyEnabled) updates[SettingKeyPromoCodeEnabled] = strconv.FormatBool(settings.PromoCodeEnabled) updates[SettingKeyPasswordResetEnabled] = strconv.FormatBool(settings.PasswordResetEnabled) + updates[SettingKeyTotpEnabled] = strconv.FormatBool(settings.TotpEnabled) // 邮件服务设置(只有非空才更新密码) updates[SettingKeySMTPHost] = settings.SMTPHost @@ -285,6 +290,21 @@ func (s *SettingService) IsPasswordResetEnabled(ctx context.Context) bool { return value == "true" } +// IsTotpEnabled 检查是否启用 TOTP 双因素认证功能 +func (s *SettingService) IsTotpEnabled(ctx context.Context) bool { + value, err := s.settingRepo.GetValue(ctx, SettingKeyTotpEnabled) + if err != nil { + return false // 默认关闭 + } + return value == "true" +} + +// IsTotpEncryptionKeyConfigured 检查 TOTP 加密密钥是否已手动配置 +// 只有手动配置了密钥才允许在管理后台启用 TOTP 功能 +func (s *SettingService) IsTotpEncryptionKeyConfigured() bool { + return s.cfg.Totp.EncryptionKeyConfigured +} + // GetSiteName 获取网站名称 func (s *SettingService) GetSiteName(ctx context.Context) string { value, err := s.settingRepo.GetValue(ctx, SettingKeySiteName) @@ -369,6 +389,7 @@ func (s *SettingService) parseSettings(settings map[string]string) *SystemSettin EmailVerifyEnabled: emailVerifyEnabled, PromoCodeEnabled: settings[SettingKeyPromoCodeEnabled] != "false", // 默认启用 PasswordResetEnabled: emailVerifyEnabled && settings[SettingKeyPasswordResetEnabled] == "true", + TotpEnabled: settings[SettingKeyTotpEnabled] == "true", SMTPHost: settings[SettingKeySMTPHost], SMTPUsername: settings[SettingKeySMTPUsername], SMTPFrom: settings[SettingKeySMTPFrom], diff --git a/backend/internal/service/settings_view.go b/backend/internal/service/settings_view.go index 427e64d2..f10254e5 100644 --- a/backend/internal/service/settings_view.go +++ b/backend/internal/service/settings_view.go @@ -5,6 +5,7 @@ type SystemSettings struct { EmailVerifyEnabled bool PromoCodeEnabled bool PasswordResetEnabled bool + TotpEnabled bool // TOTP 双因素认证 SMTPHost string SMTPPort int @@ -62,6 +63,7 @@ type PublicSettings struct { EmailVerifyEnabled bool PromoCodeEnabled bool PasswordResetEnabled bool + TotpEnabled bool // TOTP 双因素认证 TurnstileEnabled bool TurnstileSiteKey string SiteName string diff --git a/backend/internal/service/totp_service.go b/backend/internal/service/totp_service.go new file mode 100644 index 00000000..5192fe3d --- /dev/null +++ b/backend/internal/service/totp_service.go @@ -0,0 +1,506 @@ +package service + +import ( + "context" + "crypto/rand" + "crypto/subtle" + "encoding/hex" + "fmt" + "log/slog" + "time" + + "github.com/pquerna/otp/totp" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" +) + +var ( + ErrTotpNotEnabled = infraerrors.BadRequest("TOTP_NOT_ENABLED", "totp feature is not enabled") + ErrTotpAlreadyEnabled = infraerrors.BadRequest("TOTP_ALREADY_ENABLED", "totp is already enabled for this account") + ErrTotpNotSetup = infraerrors.BadRequest("TOTP_NOT_SETUP", "totp is not set up for this account") + ErrTotpInvalidCode = infraerrors.BadRequest("TOTP_INVALID_CODE", "invalid totp code") + ErrTotpSetupExpired = infraerrors.BadRequest("TOTP_SETUP_EXPIRED", "totp setup session expired") + ErrTotpTooManyAttempts = infraerrors.TooManyRequests("TOTP_TOO_MANY_ATTEMPTS", "too many verification attempts, please try again later") + ErrVerifyCodeRequired = infraerrors.BadRequest("VERIFY_CODE_REQUIRED", "email verification code is required") + ErrPasswordRequired = infraerrors.BadRequest("PASSWORD_REQUIRED", "password is required") +) + +// TotpCache defines cache operations for TOTP service +type TotpCache interface { + // Setup session methods + GetSetupSession(ctx context.Context, userID int64) (*TotpSetupSession, error) + SetSetupSession(ctx context.Context, userID int64, session *TotpSetupSession, ttl time.Duration) error + DeleteSetupSession(ctx context.Context, userID int64) error + + // Login session methods (for 2FA login flow) + GetLoginSession(ctx context.Context, tempToken string) (*TotpLoginSession, error) + SetLoginSession(ctx context.Context, tempToken string, session *TotpLoginSession, ttl time.Duration) error + DeleteLoginSession(ctx context.Context, tempToken string) error + + // Rate limiting + IncrementVerifyAttempts(ctx context.Context, userID int64) (int, error) + GetVerifyAttempts(ctx context.Context, userID int64) (int, error) + ClearVerifyAttempts(ctx context.Context, userID int64) error +} + +// SecretEncryptor defines encryption operations for TOTP secrets +type SecretEncryptor interface { + Encrypt(plaintext string) (string, error) + Decrypt(ciphertext string) (string, error) +} + +// TotpSetupSession represents a TOTP setup session +type TotpSetupSession struct { + Secret string // Plain text TOTP secret (not encrypted yet) + SetupToken string // Random token to verify setup request + CreatedAt time.Time +} + +// TotpLoginSession represents a pending 2FA login session +type TotpLoginSession struct { + UserID int64 + Email string + TokenExpiry time.Time +} + +// TotpStatus represents the TOTP status for a user +type TotpStatus struct { + Enabled bool `json:"enabled"` + EnabledAt *time.Time `json:"enabled_at,omitempty"` + FeatureEnabled bool `json:"feature_enabled"` +} + +// TotpSetupResponse represents the response for initiating TOTP setup +type TotpSetupResponse struct { + Secret string `json:"secret"` + QRCodeURL string `json:"qr_code_url"` + SetupToken string `json:"setup_token"` + Countdown int `json:"countdown"` // seconds until setup expires +} + +const ( + totpSetupTTL = 5 * time.Minute + totpLoginTTL = 5 * time.Minute + totpAttemptsTTL = 15 * time.Minute + maxTotpAttempts = 5 + totpIssuer = "Sub2API" +) + +// TotpService handles TOTP operations +type TotpService struct { + userRepo UserRepository + encryptor SecretEncryptor + cache TotpCache + settingService *SettingService + emailService *EmailService + emailQueueService *EmailQueueService +} + +// NewTotpService creates a new TOTP service +func NewTotpService( + userRepo UserRepository, + encryptor SecretEncryptor, + cache TotpCache, + settingService *SettingService, + emailService *EmailService, + emailQueueService *EmailQueueService, +) *TotpService { + return &TotpService{ + userRepo: userRepo, + encryptor: encryptor, + cache: cache, + settingService: settingService, + emailService: emailService, + emailQueueService: emailQueueService, + } +} + +// GetStatus returns the TOTP status for a user +func (s *TotpService) GetStatus(ctx context.Context, userID int64) (*TotpStatus, error) { + featureEnabled := s.settingService.IsTotpEnabled(ctx) + + user, err := s.userRepo.GetByID(ctx, userID) + if err != nil { + return nil, fmt.Errorf("get user: %w", err) + } + + return &TotpStatus{ + Enabled: user.TotpEnabled, + EnabledAt: user.TotpEnabledAt, + FeatureEnabled: featureEnabled, + }, nil +} + +// InitiateSetup starts the TOTP setup process +// If email verification is enabled, emailCode is required; otherwise password is required +func (s *TotpService) InitiateSetup(ctx context.Context, userID int64, emailCode, password string) (*TotpSetupResponse, error) { + // Check if TOTP feature is enabled globally + if !s.settingService.IsTotpEnabled(ctx) { + return nil, ErrTotpNotEnabled + } + + // Get user and check if TOTP is already enabled + user, err := s.userRepo.GetByID(ctx, userID) + if err != nil { + return nil, fmt.Errorf("get user: %w", err) + } + + if user.TotpEnabled { + return nil, ErrTotpAlreadyEnabled + } + + // Verify identity based on email verification setting + if s.settingService.IsEmailVerifyEnabled(ctx) { + // Email verification enabled - verify email code + if emailCode == "" { + return nil, ErrVerifyCodeRequired + } + if err := s.emailService.VerifyCode(ctx, user.Email, emailCode); err != nil { + return nil, err + } + } else { + // Email verification disabled - verify password + if password == "" { + return nil, ErrPasswordRequired + } + if !user.CheckPassword(password) { + return nil, ErrPasswordIncorrect + } + } + + // Generate a new TOTP key + key, err := totp.Generate(totp.GenerateOpts{ + Issuer: totpIssuer, + AccountName: user.Email, + }) + if err != nil { + return nil, fmt.Errorf("generate totp key: %w", err) + } + + // Generate a random setup token + setupToken, err := generateRandomToken(32) + if err != nil { + return nil, fmt.Errorf("generate setup token: %w", err) + } + + // Store the setup session in cache + session := &TotpSetupSession{ + Secret: key.Secret(), + SetupToken: setupToken, + CreatedAt: time.Now(), + } + + if err := s.cache.SetSetupSession(ctx, userID, session, totpSetupTTL); err != nil { + return nil, fmt.Errorf("store setup session: %w", err) + } + + return &TotpSetupResponse{ + Secret: key.Secret(), + QRCodeURL: key.URL(), + SetupToken: setupToken, + Countdown: int(totpSetupTTL.Seconds()), + }, nil +} + +// CompleteSetup completes the TOTP setup by verifying the code +func (s *TotpService) CompleteSetup(ctx context.Context, userID int64, totpCode, setupToken string) error { + // Check if TOTP feature is enabled globally + if !s.settingService.IsTotpEnabled(ctx) { + return ErrTotpNotEnabled + } + + // Get the setup session + session, err := s.cache.GetSetupSession(ctx, userID) + if err != nil { + return ErrTotpSetupExpired + } + + if session == nil { + return ErrTotpSetupExpired + } + + // Verify the setup token (constant-time comparison) + if subtle.ConstantTimeCompare([]byte(session.SetupToken), []byte(setupToken)) != 1 { + return ErrTotpSetupExpired + } + + // Verify the TOTP code + if !totp.Validate(totpCode, session.Secret) { + return ErrTotpInvalidCode + } + + setupSecretPrefix := "N/A" + if len(session.Secret) >= 4 { + setupSecretPrefix = session.Secret[:4] + } + slog.Debug("totp_complete_setup_before_encrypt", + "user_id", userID, + "secret_len", len(session.Secret), + "secret_prefix", setupSecretPrefix) + + // Encrypt the secret + encryptedSecret, err := s.encryptor.Encrypt(session.Secret) + if err != nil { + return fmt.Errorf("encrypt totp secret: %w", err) + } + + slog.Debug("totp_complete_setup_encrypted", + "user_id", userID, + "encrypted_len", len(encryptedSecret)) + + // Verify encryption by decrypting + decrypted, decErr := s.encryptor.Decrypt(encryptedSecret) + if decErr != nil { + slog.Debug("totp_complete_setup_verify_failed", + "user_id", userID, + "error", decErr) + } else { + decryptedPrefix := "N/A" + if len(decrypted) >= 4 { + decryptedPrefix = decrypted[:4] + } + slog.Debug("totp_complete_setup_verified", + "user_id", userID, + "original_len", len(session.Secret), + "decrypted_len", len(decrypted), + "match", session.Secret == decrypted, + "decrypted_prefix", decryptedPrefix) + } + + // Update user with encrypted TOTP secret + if err := s.userRepo.UpdateTotpSecret(ctx, userID, &encryptedSecret); err != nil { + return fmt.Errorf("update totp secret: %w", err) + } + + // Enable TOTP for the user + if err := s.userRepo.EnableTotp(ctx, userID); err != nil { + return fmt.Errorf("enable totp: %w", err) + } + + // Clean up the setup session + _ = s.cache.DeleteSetupSession(ctx, userID) + + return nil +} + +// Disable disables TOTP for a user +// If email verification is enabled, emailCode is required; otherwise password is required +func (s *TotpService) Disable(ctx context.Context, userID int64, emailCode, password string) error { + // Get user + user, err := s.userRepo.GetByID(ctx, userID) + if err != nil { + return fmt.Errorf("get user: %w", err) + } + + if !user.TotpEnabled { + return ErrTotpNotSetup + } + + // Verify identity based on email verification setting + if s.settingService.IsEmailVerifyEnabled(ctx) { + // Email verification enabled - verify email code + if emailCode == "" { + return ErrVerifyCodeRequired + } + if err := s.emailService.VerifyCode(ctx, user.Email, emailCode); err != nil { + return err + } + } else { + // Email verification disabled - verify password + if password == "" { + return ErrPasswordRequired + } + if !user.CheckPassword(password) { + return ErrPasswordIncorrect + } + } + + // Disable TOTP + if err := s.userRepo.DisableTotp(ctx, userID); err != nil { + return fmt.Errorf("disable totp: %w", err) + } + + return nil +} + +// VerifyCode verifies a TOTP code for a user +func (s *TotpService) VerifyCode(ctx context.Context, userID int64, code string) error { + slog.Debug("totp_verify_code_called", + "user_id", userID, + "code_len", len(code)) + + // Check rate limiting + attempts, err := s.cache.GetVerifyAttempts(ctx, userID) + if err == nil && attempts >= maxTotpAttempts { + return ErrTotpTooManyAttempts + } + + // Get user + user, err := s.userRepo.GetByID(ctx, userID) + if err != nil { + slog.Debug("totp_verify_get_user_failed", + "user_id", userID, + "error", err) + return infraerrors.InternalServer("TOTP_VERIFY_ERROR", "failed to verify totp code") + } + + if !user.TotpEnabled || user.TotpSecretEncrypted == nil { + slog.Debug("totp_verify_not_setup", + "user_id", userID, + "enabled", user.TotpEnabled, + "has_secret", user.TotpSecretEncrypted != nil) + return ErrTotpNotSetup + } + + slog.Debug("totp_verify_encrypted_secret", + "user_id", userID, + "encrypted_len", len(*user.TotpSecretEncrypted)) + + // Decrypt the secret + secret, err := s.encryptor.Decrypt(*user.TotpSecretEncrypted) + if err != nil { + slog.Debug("totp_verify_decrypt_failed", + "user_id", userID, + "error", err) + return infraerrors.InternalServer("TOTP_VERIFY_ERROR", "failed to verify totp code") + } + + secretPrefix := "N/A" + if len(secret) >= 4 { + secretPrefix = secret[:4] + } + slog.Debug("totp_verify_decrypted", + "user_id", userID, + "secret_len", len(secret), + "secret_prefix", secretPrefix) + + // Verify the code + valid := totp.Validate(code, secret) + slog.Debug("totp_verify_result", + "user_id", userID, + "valid", valid, + "secret_len", len(secret), + "secret_prefix", secretPrefix, + "server_time", time.Now().UTC().Format(time.RFC3339)) + + if !valid { + // Increment failed attempts + _, _ = s.cache.IncrementVerifyAttempts(ctx, userID) + return ErrTotpInvalidCode + } + + // Clear attempt counter on success + _ = s.cache.ClearVerifyAttempts(ctx, userID) + + return nil +} + +// CreateLoginSession creates a temporary login session for 2FA +func (s *TotpService) CreateLoginSession(ctx context.Context, userID int64, email string) (string, error) { + // Generate a random temp token + tempToken, err := generateRandomToken(32) + if err != nil { + return "", fmt.Errorf("generate temp token: %w", err) + } + + session := &TotpLoginSession{ + UserID: userID, + Email: email, + TokenExpiry: time.Now().Add(totpLoginTTL), + } + + if err := s.cache.SetLoginSession(ctx, tempToken, session, totpLoginTTL); err != nil { + return "", fmt.Errorf("store login session: %w", err) + } + + return tempToken, nil +} + +// GetLoginSession retrieves a login session +func (s *TotpService) GetLoginSession(ctx context.Context, tempToken string) (*TotpLoginSession, error) { + return s.cache.GetLoginSession(ctx, tempToken) +} + +// DeleteLoginSession deletes a login session +func (s *TotpService) DeleteLoginSession(ctx context.Context, tempToken string) error { + return s.cache.DeleteLoginSession(ctx, tempToken) +} + +// IsTotpEnabledForUser checks if TOTP is enabled for a specific user +func (s *TotpService) IsTotpEnabledForUser(ctx context.Context, userID int64) (bool, error) { + user, err := s.userRepo.GetByID(ctx, userID) + if err != nil { + return false, fmt.Errorf("get user: %w", err) + } + return user.TotpEnabled, nil +} + +// MaskEmail masks an email address for display +func MaskEmail(email string) string { + if len(email) < 3 { + return "***" + } + + atIdx := -1 + for i, c := range email { + if c == '@' { + atIdx = i + break + } + } + + if atIdx == -1 || atIdx < 1 { + return email[:1] + "***" + } + + localPart := email[:atIdx] + domain := email[atIdx:] + + if len(localPart) <= 2 { + return localPart[:1] + "***" + domain + } + + return localPart[:1] + "***" + localPart[len(localPart)-1:] + domain +} + +// generateRandomToken generates a random hex-encoded token +func generateRandomToken(byteLength int) (string, error) { + b := make([]byte, byteLength) + if _, err := rand.Read(b); err != nil { + return "", err + } + return hex.EncodeToString(b), nil +} + +// VerificationMethod represents the method required for TOTP operations +type VerificationMethod struct { + Method string `json:"method"` // "email" or "password" +} + +// GetVerificationMethod returns the verification method for TOTP operations +func (s *TotpService) GetVerificationMethod(ctx context.Context) *VerificationMethod { + if s.settingService.IsEmailVerifyEnabled(ctx) { + return &VerificationMethod{Method: "email"} + } + return &VerificationMethod{Method: "password"} +} + +// SendVerifyCode sends an email verification code for TOTP operations +func (s *TotpService) SendVerifyCode(ctx context.Context, userID int64) error { + // Check if email verification is enabled + if !s.settingService.IsEmailVerifyEnabled(ctx) { + return infraerrors.BadRequest("EMAIL_VERIFY_NOT_ENABLED", "email verification is not enabled") + } + + // Get user email + user, err := s.userRepo.GetByID(ctx, userID) + if err != nil { + return fmt.Errorf("get user: %w", err) + } + + // Get site name for email + siteName := s.settingService.GetSiteName(ctx) + + // Send verification code via queue + return s.emailQueueService.EnqueueVerifyCode(user.Email, siteName) +} diff --git a/backend/internal/service/user.go b/backend/internal/service/user.go index c565607e..0f589eb3 100644 --- a/backend/internal/service/user.go +++ b/backend/internal/service/user.go @@ -21,6 +21,11 @@ type User struct { CreatedAt time.Time UpdatedAt time.Time + // TOTP 双因素认证字段 + TotpSecretEncrypted *string // AES-256-GCM 加密的 TOTP 密钥 + TotpEnabled bool // 是否启用 TOTP + TotpEnabledAt *time.Time // TOTP 启用时间 + APIKeys []APIKey Subscriptions []UserSubscription } diff --git a/backend/internal/service/user_service.go b/backend/internal/service/user_service.go index 1734914a..99bf7fd0 100644 --- a/backend/internal/service/user_service.go +++ b/backend/internal/service/user_service.go @@ -38,6 +38,11 @@ type UserRepository interface { UpdateConcurrency(ctx context.Context, id int64, amount int) error ExistsByEmail(ctx context.Context, email string) (bool, error) RemoveGroupFromAllowedGroups(ctx context.Context, groupID int64) (int64, error) + + // TOTP 相关方法 + UpdateTotpSecret(ctx context.Context, userID int64, encryptedSecret *string) error + EnableTotp(ctx context.Context, userID int64) error + DisableTotp(ctx context.Context, userID int64) error } // UpdateProfileRequest 更新用户资料请求 diff --git a/backend/internal/service/wire.go b/backend/internal/service/wire.go index 68a6e5c8..df86b2e7 100644 --- a/backend/internal/service/wire.go +++ b/backend/internal/service/wire.go @@ -271,4 +271,5 @@ var ProviderSet = wire.NewSet( NewAntigravityQuotaFetcher, NewUserAttributeService, NewUsageCache, + NewTotpService, ) diff --git a/backend/migrations/044_add_user_totp.sql b/backend/migrations/044_add_user_totp.sql new file mode 100644 index 00000000..6e157a68 --- /dev/null +++ b/backend/migrations/044_add_user_totp.sql @@ -0,0 +1,12 @@ +-- 为 users 表添加 TOTP 双因素认证字段 +ALTER TABLE users + ADD COLUMN IF NOT EXISTS totp_secret_encrypted TEXT DEFAULT NULL, + ADD COLUMN IF NOT EXISTS totp_enabled BOOLEAN NOT NULL DEFAULT FALSE, + ADD COLUMN IF NOT EXISTS totp_enabled_at TIMESTAMPTZ DEFAULT NULL; + +COMMENT ON COLUMN users.totp_secret_encrypted IS 'AES-256-GCM 加密的 TOTP 密钥'; +COMMENT ON COLUMN users.totp_enabled IS '是否启用 TOTP 双因素认证'; +COMMENT ON COLUMN users.totp_enabled_at IS 'TOTP 启用时间'; + +-- 创建索引以支持快速查询启用 2FA 的用户 +CREATE INDEX IF NOT EXISTS idx_users_totp_enabled ON users(totp_enabled) WHERE deleted_at IS NULL AND totp_enabled = true; diff --git a/deploy/.env.example b/deploy/.env.example index f21a3c62..1e9395a0 100644 --- a/deploy/.env.example +++ b/deploy/.env.example @@ -61,6 +61,18 @@ ADMIN_PASSWORD= JWT_SECRET= JWT_EXPIRE_HOUR=24 +# ----------------------------------------------------------------------------- +# TOTP (2FA) Configuration +# TOTP(双因素认证)配置 +# ----------------------------------------------------------------------------- +# IMPORTANT: Set a fixed encryption key for TOTP secrets. If left empty, a +# random key will be generated on each startup, causing all existing TOTP +# configurations to become invalid (users won't be able to login with 2FA). +# Generate a secure key: openssl rand -hex 32 +# 重要:设置固定的 TOTP 加密密钥。如果留空,每次启动将生成随机密钥, +# 导致现有的 TOTP 配置失效(用户无法使用双因素认证登录)。 +TOTP_ENCRYPTION_KEY= + # ----------------------------------------------------------------------------- # Configuration File (Optional) # ----------------------------------------------------------------------------- diff --git a/deploy/config.example.yaml b/deploy/config.example.yaml index 558b8ef0..98aba8f5 100644 --- a/deploy/config.example.yaml +++ b/deploy/config.example.yaml @@ -403,6 +403,21 @@ jwt: # 令牌过期时间(小时,最大 24) expire_hour: 24 +# ============================================================================= +# TOTP (2FA) Configuration +# TOTP 双因素认证配置 +# ============================================================================= +totp: + # IMPORTANT: Set a fixed encryption key for TOTP secrets. + # 重要:设置固定的 TOTP 加密密钥。 + # If left empty, a random key will be generated on each startup, causing all + # existing TOTP configurations to become invalid (users won't be able to + # login with 2FA). + # 如果留空,每次启动将生成随机密钥,导致现有的 TOTP 配置失效(用户无法使用 + # 双因素认证登录)。 + # Generate with / 生成命令: openssl rand -hex 32 + encryption_key: "" + # ============================================================================= # LinuxDo Connect OAuth Login (SSO) # LinuxDo Connect OAuth 登录(用于 Sub2API 用户登录) diff --git a/deploy/docker-compose.yml b/deploy/docker-compose.yml index 484df3a8..ac6008d2 100644 --- a/deploy/docker-compose.yml +++ b/deploy/docker-compose.yml @@ -79,6 +79,16 @@ services: - JWT_SECRET=${JWT_SECRET:-} - JWT_EXPIRE_HOUR=${JWT_EXPIRE_HOUR:-24} + # ======================================================================= + # TOTP (2FA) Configuration + # ======================================================================= + # IMPORTANT: Set a fixed encryption key for TOTP secrets. If left empty, + # a random key will be generated on each startup, causing all existing + # TOTP configurations to become invalid (users won't be able to login + # with 2FA). + # Generate a secure key: openssl rand -hex 32 + - TOTP_ENCRYPTION_KEY=${TOTP_ENCRYPTION_KEY:-} + # ======================================================================= # Timezone Configuration # This affects ALL time operations in the application: diff --git a/frontend/package-lock.json b/frontend/package-lock.json index e6c6144e..5c43a6a8 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -15,6 +15,7 @@ "driver.js": "^1.4.0", "file-saver": "^2.0.5", "pinia": "^2.1.7", + "qrcode": "^1.5.4", "vue": "^3.4.0", "vue-chartjs": "^5.3.0", "vue-i18n": "^9.14.5", @@ -25,6 +26,7 @@ "@types/file-saver": "^2.0.7", "@types/mdx": "^2.0.13", "@types/node": "^20.10.5", + "@types/qrcode": "^1.5.6", "@typescript-eslint/eslint-plugin": "^7.18.0", "@typescript-eslint/parser": "^7.18.0", "@vitejs/plugin-vue": "^5.2.3", @@ -1680,6 +1682,16 @@ "integrity": "sha512-dISoDXWWQwUquiKsyZ4Ng+HX2KsPL7LyHKHQwgGFEA3IaKac4Obd+h2a/a6waisAoepJlBcx9paWqjA8/HVjCw==", "license": "MIT" }, + "node_modules/@types/qrcode": { + "version": "1.5.6", + "resolved": "https://registry.npmmirror.com/@types/qrcode/-/qrcode-1.5.6.tgz", + "integrity": "sha512-te7NQcV2BOvdj2b1hCAHzAoMNuj65kNBMz0KBaxM6c3VGBOhU0dURQKOtH8CFNI/dsKkwlv32p26qYQTWoB5bw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, "node_modules/@types/web-bluetooth": { "version": "0.0.20", "resolved": "https://registry.npmjs.org/@types/web-bluetooth/-/web-bluetooth-0.0.20.tgz", @@ -2354,7 +2366,6 @@ "version": "5.0.1", "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "dev": true, "license": "MIT", "engines": { "node": ">=8" @@ -2364,7 +2375,6 @@ "version": "4.3.0", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, "license": "MIT", "dependencies": { "color-convert": "^2.0.1" @@ -2646,6 +2656,15 @@ "node": ">=6" } }, + "node_modules/camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmmirror.com/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, "node_modules/camelcase-css": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz", @@ -2784,6 +2803,51 @@ "node": ">= 6" } }, + "node_modules/cliui": { + "version": "6.0.0", + "resolved": "https://registry.npmmirror.com/cliui/-/cliui-6.0.0.tgz", + "integrity": "sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ==", + "license": "ISC", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "wrap-ansi": "^6.2.0" + } + }, + "node_modules/cliui/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmmirror.com/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "license": "MIT" + }, + "node_modules/cliui/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmmirror.com/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cliui/node_modules/wrap-ansi": { + "version": "6.2.0", + "resolved": "https://registry.npmmirror.com/wrap-ansi/-/wrap-ansi-6.2.0.tgz", + "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/clsx": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", @@ -2806,7 +2870,6 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, "license": "MIT", "dependencies": { "color-name": "~1.1.4" @@ -2819,7 +2882,6 @@ "version": "1.1.4", "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true, "license": "MIT" }, "node_modules/combined-stream": { @@ -2989,6 +3051,15 @@ } } }, + "node_modules/decamelize": { + "version": "1.2.0", + "resolved": "https://registry.npmmirror.com/decamelize/-/decamelize-1.2.0.tgz", + "integrity": "sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/decimal.js": { "version": "10.6.0", "resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.6.0.tgz", @@ -3029,6 +3100,12 @@ "dev": true, "license": "Apache-2.0" }, + "node_modules/dijkstrajs": { + "version": "1.0.3", + "resolved": "https://registry.npmmirror.com/dijkstrajs/-/dijkstrajs-1.0.3.tgz", + "integrity": "sha512-qiSlmBq9+BCdCA/L46dw8Uy93mloxsPSbwnm5yrKn2vMPiy8KyAskTF6zuV/j5BMsmOGZDPs7KjU+mjb670kfA==", + "license": "MIT" + }, "node_modules/dir-glob": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", @@ -3759,6 +3836,15 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmmirror.com/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "license": "ISC", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, "node_modules/get-intrinsic": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", @@ -4156,7 +4242,6 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "dev": true, "license": "MIT", "engines": { "node": ">=8" @@ -4883,6 +4968,15 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmmirror.com/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, "node_modules/package-json-from-dist": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz", @@ -4957,7 +5051,6 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", - "dev": true, "license": "MIT", "engines": { "node": ">=8" @@ -5093,6 +5186,15 @@ "node": ">= 6" } }, + "node_modules/pngjs": { + "version": "5.0.0", + "resolved": "https://registry.npmmirror.com/pngjs/-/pngjs-5.0.0.tgz", + "integrity": "sha512-40QW5YalBNfQo5yRYmiw7Yz6TKKVr3h6970B2YE+3fQpsWcrbj1PzJgxeJ19DRQjhMbKPIuMY8rFaXc8moolVw==", + "license": "MIT", + "engines": { + "node": ">=10.13.0" + } + }, "node_modules/polished": { "version": "4.3.1", "resolved": "https://registry.npmjs.org/polished/-/polished-4.3.1.tgz", @@ -5313,6 +5415,23 @@ "node": ">=6" } }, + "node_modules/qrcode": { + "version": "1.5.4", + "resolved": "https://registry.npmmirror.com/qrcode/-/qrcode-1.5.4.tgz", + "integrity": "sha512-1ca71Zgiu6ORjHqFBDpnSMTR2ReToX4l1Au1VFLyVeBTFavzQnv5JxMFr3ukHVKpSrSA2MCk0lNJSykjUfz7Zg==", + "license": "MIT", + "dependencies": { + "dijkstrajs": "^1.0.1", + "pngjs": "^5.0.0", + "yargs": "^15.3.1" + }, + "bin": { + "qrcode": "bin/qrcode" + }, + "engines": { + "node": ">=10.13.0" + } + }, "node_modules/querystringify": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz", @@ -5370,6 +5489,21 @@ "node": ">=8.10.0" } }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmmirror.com/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/require-main-filename": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/require-main-filename/-/require-main-filename-2.0.0.tgz", + "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==", + "license": "ISC" + }, "node_modules/requires-port": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", @@ -5543,6 +5677,12 @@ "node": ">=10" } }, + "node_modules/set-blocking": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/set-blocking/-/set-blocking-2.0.0.tgz", + "integrity": "sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==", + "license": "ISC" + }, "node_modules/shebang-command": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", @@ -5714,7 +5854,6 @@ "version": "6.0.1", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, "license": "MIT", "dependencies": { "ansi-regex": "^5.0.1" @@ -6715,6 +6854,12 @@ "node": ">= 8" } }, + "node_modules/which-module": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/which-module/-/which-module-2.0.1.tgz", + "integrity": "sha512-iBdZ57RDvnOR9AGBhML2vFZf7h8vmBjhoaZqODJBFWHVtKkDmKuHai3cx5PgVMrX5YDNp27AofYbAwctSS+vhQ==", + "license": "ISC" + }, "node_modules/why-is-node-running": { "version": "2.3.0", "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz", @@ -6928,6 +7073,12 @@ "dev": true, "license": "MIT" }, + "node_modules/y18n": { + "version": "4.0.3", + "resolved": "https://registry.npmmirror.com/y18n/-/y18n-4.0.3.tgz", + "integrity": "sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==", + "license": "ISC" + }, "node_modules/yaml": { "version": "1.10.2", "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", @@ -6937,6 +7088,113 @@ "node": ">= 6" } }, + "node_modules/yargs": { + "version": "15.4.1", + "resolved": "https://registry.npmmirror.com/yargs/-/yargs-15.4.1.tgz", + "integrity": "sha512-aePbxDmcYW++PaqBsJ+HYUFwCdv4LVvdnhBy78E57PIor8/OVvhMrADFFEDh8DHDFRv/O9i3lPhsENjO7QX0+A==", + "license": "MIT", + "dependencies": { + "cliui": "^6.0.0", + "decamelize": "^1.2.0", + "find-up": "^4.1.0", + "get-caller-file": "^2.0.1", + "require-directory": "^2.1.1", + "require-main-filename": "^2.0.0", + "set-blocking": "^2.0.0", + "string-width": "^4.2.0", + "which-module": "^2.0.0", + "y18n": "^4.0.0", + "yargs-parser": "^18.1.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/yargs-parser": { + "version": "18.1.3", + "resolved": "https://registry.npmmirror.com/yargs-parser/-/yargs-parser-18.1.3.tgz", + "integrity": "sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ==", + "license": "ISC", + "dependencies": { + "camelcase": "^5.0.0", + "decamelize": "^1.2.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/yargs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmmirror.com/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "license": "MIT" + }, + "node_modules/yargs/node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmmirror.com/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "license": "MIT", + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/yargs/node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmmirror.com/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "license": "MIT", + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/yargs/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmmirror.com/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "license": "MIT", + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/yargs/node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmmirror.com/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "license": "MIT", + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/yargs/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmmirror.com/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/yocto-queue": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", diff --git a/frontend/package.json b/frontend/package.json index c984cd96..8e1fdb4b 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -22,6 +22,7 @@ "driver.js": "^1.4.0", "file-saver": "^2.0.5", "pinia": "^2.1.7", + "qrcode": "^1.5.4", "vue": "^3.4.0", "vue-chartjs": "^5.3.0", "vue-i18n": "^9.14.5", @@ -32,6 +33,7 @@ "@types/file-saver": "^2.0.7", "@types/mdx": "^2.0.13", "@types/node": "^20.10.5", + "@types/qrcode": "^1.5.6", "@typescript-eslint/eslint-plugin": "^7.18.0", "@typescript-eslint/parser": "^7.18.0", "@vitejs/plugin-vue": "^5.2.3", diff --git a/frontend/pnpm-lock.yaml b/frontend/pnpm-lock.yaml index 1a808176..df82dcdb 100644 --- a/frontend/pnpm-lock.yaml +++ b/frontend/pnpm-lock.yaml @@ -29,6 +29,9 @@ importers: pinia: specifier: ^2.1.7 version: 2.3.1(typescript@5.6.3)(vue@3.5.26(typescript@5.6.3)) + qrcode: + specifier: ^1.5.4 + version: 1.5.4 vue: specifier: ^3.4.0 version: 3.5.26(typescript@5.6.3) @@ -54,6 +57,9 @@ importers: '@types/node': specifier: ^20.10.5 version: 20.19.27 + '@types/qrcode': + specifier: ^1.5.6 + version: 1.5.6 '@typescript-eslint/eslint-plugin': specifier: ^7.18.0 version: 7.18.0(@typescript-eslint/parser@7.18.0(eslint@8.57.1)(typescript@5.6.3))(eslint@8.57.1)(typescript@5.6.3) @@ -1239,56 +1245,67 @@ packages: resolution: {integrity: sha512-EHMUcDwhtdRGlXZsGSIuXSYwD5kOT9NVnx9sqzYiwAc91wfYOE1g1djOEDseZJKKqtHAHGwnGPQu3kytmfaXLQ==} cpu: [arm] os: [linux] + libc: [glibc] '@rollup/rollup-linux-arm-musleabihf@4.54.0': resolution: {integrity: sha512-+pBrqEjaakN2ySv5RVrj/qLytYhPKEUwk+e3SFU5jTLHIcAtqh2rLrd/OkbNuHJpsBgxsD8ccJt5ga/SeG0JmA==} cpu: [arm] os: [linux] + libc: [musl] '@rollup/rollup-linux-arm64-gnu@4.54.0': resolution: {integrity: sha512-NSqc7rE9wuUaRBsBp5ckQ5CVz5aIRKCwsoa6WMF7G01sX3/qHUw/z4pv+D+ahL1EIKy6Enpcnz1RY8pf7bjwng==} cpu: [arm64] os: [linux] + libc: [glibc] '@rollup/rollup-linux-arm64-musl@4.54.0': resolution: {integrity: sha512-gr5vDbg3Bakga5kbdpqx81m2n9IX8M6gIMlQQIXiLTNeQW6CucvuInJ91EuCJ/JYvc+rcLLsDFcfAD1K7fMofg==} cpu: [arm64] os: [linux] + libc: [musl] '@rollup/rollup-linux-loong64-gnu@4.54.0': resolution: {integrity: sha512-gsrtB1NA3ZYj2vq0Rzkylo9ylCtW/PhpLEivlgWe0bpgtX5+9j9EZa0wtZiCjgu6zmSeZWyI/e2YRX1URozpIw==} cpu: [loong64] os: [linux] + libc: [glibc] '@rollup/rollup-linux-ppc64-gnu@4.54.0': resolution: {integrity: sha512-y3qNOfTBStmFNq+t4s7Tmc9hW2ENtPg8FeUD/VShI7rKxNW7O4fFeaYbMsd3tpFlIg1Q8IapFgy7Q9i2BqeBvA==} cpu: [ppc64] os: [linux] + libc: [glibc] '@rollup/rollup-linux-riscv64-gnu@4.54.0': resolution: {integrity: sha512-89sepv7h2lIVPsFma8iwmccN7Yjjtgz0Rj/Ou6fEqg3HDhpCa+Et+YSufy27i6b0Wav69Qv4WBNl3Rs6pwhebQ==} cpu: [riscv64] os: [linux] + libc: [glibc] '@rollup/rollup-linux-riscv64-musl@4.54.0': resolution: {integrity: sha512-ZcU77ieh0M2Q8Ur7D5X7KvK+UxbXeDHwiOt/CPSBTI1fBmeDMivW0dPkdqkT4rOgDjrDDBUed9x4EgraIKoR2A==} cpu: [riscv64] os: [linux] + libc: [musl] '@rollup/rollup-linux-s390x-gnu@4.54.0': resolution: {integrity: sha512-2AdWy5RdDF5+4YfG/YesGDDtbyJlC9LHmL6rZw6FurBJ5n4vFGupsOBGfwMRjBYH7qRQowT8D/U4LoSvVwOhSQ==} cpu: [s390x] os: [linux] + libc: [glibc] '@rollup/rollup-linux-x64-gnu@4.54.0': resolution: {integrity: sha512-WGt5J8Ij/rvyqpFexxk3ffKqqbLf9AqrTBbWDk7ApGUzaIs6V+s2s84kAxklFwmMF/vBNGrVdYgbblCOFFezMQ==} cpu: [x64] os: [linux] + libc: [glibc] '@rollup/rollup-linux-x64-musl@4.54.0': resolution: {integrity: sha512-JzQmb38ATzHjxlPHuTH6tE7ojnMKM2kYNzt44LO/jJi8BpceEC8QuXYA908n8r3CNuG/B3BV8VR3Hi1rYtmPiw==} cpu: [x64] os: [linux] + libc: [musl] '@rollup/rollup-openharmony-arm64@4.54.0': resolution: {integrity: sha512-huT3fd0iC7jigGh7n3q/+lfPcXxBi+om/Rs3yiFxjvSxbSB6aohDFXbWvlspaqjeOh+hx7DDHS+5Es5qRkWkZg==} @@ -1479,6 +1496,9 @@ packages: '@types/parse-json@4.0.2': resolution: {integrity: sha512-dISoDXWWQwUquiKsyZ4Ng+HX2KsPL7LyHKHQwgGFEA3IaKac4Obd+h2a/a6waisAoepJlBcx9paWqjA8/HVjCw==} + '@types/qrcode@1.5.6': + resolution: {integrity: sha512-te7NQcV2BOvdj2b1hCAHzAoMNuj65kNBMz0KBaxM6c3VGBOhU0dURQKOtH8CFNI/dsKkwlv32p26qYQTWoB5bw==} + '@types/react@19.2.7': resolution: {integrity: sha512-MWtvHrGZLFttgeEj28VXHxpmwYbor/ATPYbBfSFZEIRK0ecCFLl2Qo55z52Hss+UV9CRN7trSeq1zbgx7YDWWg==} @@ -1832,6 +1852,10 @@ packages: resolution: {integrity: sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==} engines: {node: '>= 6'} + camelcase@5.3.1: + resolution: {integrity: sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==} + engines: {node: '>=6'} + caniuse-lite@1.0.30001761: resolution: {integrity: sha512-JF9ptu1vP2coz98+5051jZ4PwQgd2ni8A+gYSN7EA7dPKIMf0pDlSUxhdmVOaV3/fYK5uWBkgSXJaRLr4+3A6g==} @@ -1895,6 +1919,9 @@ packages: classnames@2.5.1: resolution: {integrity: sha512-saHYOzhIQs6wy2sVxTM6bUDsQO4F50V9RQ22qBpEdCW+I+/Wmke2HOl6lS6dTpdxVhb88/I6+Hs+438c3lfUow==} + cliui@6.0.0: + resolution: {integrity: sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ==} + clsx@1.2.1: resolution: {integrity: sha512-EcR6r5a8bj6pu3ycsa/E/cKVGuTgZJZdsyUYHOksG/UHIiKfjxzRxYJpyVBwYaQeOvghal9fcc4PidlgzugAQg==} engines: {node: '>=6'} @@ -2164,6 +2191,10 @@ packages: supports-color: optional: true + decamelize@1.2.0: + resolution: {integrity: sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==} + engines: {node: '>=0.10.0'} + decimal.js@10.6.0: resolution: {integrity: sha512-YpgQiITW3JXGntzdUmyUR1V812Hn8T1YVXhCu+wO3OpS4eU9l4YdD3qjyiKdV6mvV29zapkMeD390UVEf2lkUg==} @@ -2198,6 +2229,9 @@ packages: didyoumean@1.2.2: resolution: {integrity: sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==} + dijkstrajs@1.0.3: + resolution: {integrity: sha512-qiSlmBq9+BCdCA/L46dw8Uy93mloxsPSbwnm5yrKn2vMPiy8KyAskTF6zuV/j5BMsmOGZDPs7KjU+mjb670kfA==} + dir-glob@3.0.1: resolution: {integrity: sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==} engines: {node: '>=8'} @@ -2424,6 +2458,10 @@ packages: find-root@1.1.0: resolution: {integrity: sha512-NKfW6bec6GfKc0SGx1e07QZY9PE99u0Bft/0rzSD5k3sO/vwkVUpDUKVm5Gpp5Ue3YfShPFTX2070tDs5kB9Ng==} + find-up@4.1.0: + resolution: {integrity: sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==} + engines: {node: '>=8'} + find-up@5.0.0: resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==} engines: {node: '>=10'} @@ -2488,6 +2526,10 @@ packages: function-bind@1.1.2: resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==} + get-caller-file@2.0.5: + resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==} + engines: {node: 6.* || 8.* || >= 10.*} + get-east-asian-width@1.4.0: resolution: {integrity: sha512-QZjmEOC+IT1uk6Rx0sX22V6uHWVwbdbxf1faPqJ1QhLdGgsRGCZoyaQBm/piRdJy/D2um6hM1UP7ZEeQ4EkP+Q==} engines: {node: '>=18'} @@ -2856,6 +2898,10 @@ packages: lit@3.3.2: resolution: {integrity: sha512-NF9zbsP79l4ao2SNrH3NkfmFgN/hBYSQo90saIVI1o5GpjAdCPVstVzO1MrLOakHoEhYkrtRjPK6Ob521aoYWQ==} + locate-path@5.0.0: + resolution: {integrity: sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==} + engines: {node: '>=8'} + locate-path@6.0.0: resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==} engines: {node: '>=10'} @@ -3239,14 +3285,26 @@ packages: resolution: {integrity: sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==} engines: {node: '>= 0.8.0'} + p-limit@2.3.0: + resolution: {integrity: sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==} + engines: {node: '>=6'} + p-limit@3.1.0: resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==} engines: {node: '>=10'} + p-locate@4.1.0: + resolution: {integrity: sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==} + engines: {node: '>=8'} + p-locate@5.0.0: resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==} engines: {node: '>=10'} + p-try@2.2.0: + resolution: {integrity: sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==} + engines: {node: '>=6'} + package-json-from-dist@1.0.1: resolution: {integrity: sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==} @@ -3341,6 +3399,10 @@ packages: pkg-types@1.3.1: resolution: {integrity: sha512-/Jm5M4RvtBFVkKWRu2BLUTNP8/M2a+UwuAX+ae4770q1qVGtfjG+WTCupoZixokjmHiry8uI+dlY8KXYV5HVVQ==} + pngjs@5.0.0: + resolution: {integrity: sha512-40QW5YalBNfQo5yRYmiw7Yz6TKKVr3h6970B2YE+3fQpsWcrbj1PzJgxeJ19DRQjhMbKPIuMY8rFaXc8moolVw==} + engines: {node: '>=10.13.0'} + points-on-curve@0.2.0: resolution: {integrity: sha512-0mYKnYYe9ZcqMCWhUjItv/oHjvgEsfKvnUTg8sAtnHr3GVy7rGkXCb6d5cSyqrWqL4k81b9CPg3urd+T7aop3A==} @@ -3421,6 +3483,11 @@ packages: resolution: {integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==} engines: {node: '>=6'} + qrcode@1.5.4: + resolution: {integrity: sha512-1ca71Zgiu6ORjHqFBDpnSMTR2ReToX4l1Au1VFLyVeBTFavzQnv5JxMFr3ukHVKpSrSA2MCk0lNJSykjUfz7Zg==} + engines: {node: '>=10.13.0'} + hasBin: true + query-string@9.3.1: resolution: {integrity: sha512-5fBfMOcDi5SA9qj5jZhWAcTtDfKF5WFdd2uD9nVNlbxVv1baq65aALy6qofpNEGELHvisjjasxQp7BlM9gvMzw==} engines: {node: '>=18'} @@ -3664,6 +3731,13 @@ packages: remark-stringify@11.0.0: resolution: {integrity: sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==} + require-directory@2.1.1: + resolution: {integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==} + engines: {node: '>=0.10.0'} + + require-main-filename@2.0.0: + resolution: {integrity: sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==} + requires-port@1.0.0: resolution: {integrity: sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==} @@ -3739,6 +3813,9 @@ packages: engines: {node: '>=10'} hasBin: true + set-blocking@2.0.0: + resolution: {integrity: sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==} + set-value@2.0.1: resolution: {integrity: sha512-JxHc1weCN68wRY0fhCoXpyK55m/XPHafOmK4UWD7m2CI14GMcFypt4w/0+NV5f/ZMby2F6S2wwA7fgynh9gWSw==} engines: {node: '>=0.10.0'} @@ -4263,6 +4340,9 @@ packages: resolution: {integrity: sha512-De72GdQZzNTUBBChsXueQUnPKDkg/5A5zp7pFDuQAj5UFoENpiACU0wlCvzpAGnTkj++ihpKwKyYewn/XNUbKw==} engines: {node: '>=18'} + which-module@2.0.1: + resolution: {integrity: sha512-iBdZ57RDvnOR9AGBhML2vFZf7h8vmBjhoaZqODJBFWHVtKkDmKuHai3cx5PgVMrX5YDNp27AofYbAwctSS+vhQ==} + which@2.0.2: resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} engines: {node: '>= 8'} @@ -4285,6 +4365,10 @@ packages: resolution: {integrity: sha512-OELeY0Q61OXpdUfTp+oweA/vtLVg5VDOXh+3he3PNzLGG/y0oylSOC1xRVj0+l4vQ3tj/bB1HVHv1ocXkQceFA==} engines: {node: '>=0.8'} + wrap-ansi@6.2.0: + resolution: {integrity: sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==} + engines: {node: '>=8'} + wrap-ansi@7.0.0: resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==} engines: {node: '>=10'} @@ -4324,10 +4408,21 @@ packages: xmlchars@2.2.0: resolution: {integrity: sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==} + y18n@4.0.3: + resolution: {integrity: sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==} + yaml@1.10.2: resolution: {integrity: sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==} engines: {node: '>= 6'} + yargs-parser@18.1.3: + resolution: {integrity: sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ==} + engines: {node: '>=6'} + + yargs@15.4.1: + resolution: {integrity: sha512-aePbxDmcYW++PaqBsJ+HYUFwCdv4LVvdnhBy78E57PIor8/OVvhMrADFFEDh8DHDFRv/O9i3lPhsENjO7QX0+A==} + engines: {node: '>=8'} + yocto-queue@0.1.0: resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} engines: {node: '>=10'} @@ -5838,6 +5933,10 @@ snapshots: '@types/parse-json@4.0.2': {} + '@types/qrcode@1.5.6': + dependencies: + '@types/node': 20.19.27 + '@types/react@19.2.7': dependencies: csstype: 3.2.3 @@ -6321,6 +6420,8 @@ snapshots: camelcase-css@2.0.1: {} + camelcase@5.3.1: {} + caniuse-lite@1.0.30001761: {} ccount@2.0.1: {} @@ -6395,6 +6496,12 @@ snapshots: classnames@2.5.1: {} + cliui@6.0.0: + dependencies: + string-width: 4.2.3 + strip-ansi: 6.0.1 + wrap-ansi: 6.2.0 + clsx@1.2.1: {} clsx@2.1.1: {} @@ -6668,6 +6775,8 @@ snapshots: dependencies: ms: 2.1.3 + decamelize@1.2.0: {} + decimal.js@10.6.0: {} decode-named-character-reference@1.2.0: @@ -6694,6 +6803,8 @@ snapshots: didyoumean@1.2.2: {} + dijkstrajs@1.0.3: {} + dir-glob@3.0.1: dependencies: path-type: 4.0.0 @@ -6978,6 +7089,11 @@ snapshots: find-root@1.1.0: {} + find-up@4.1.0: + dependencies: + locate-path: 5.0.0 + path-exists: 4.0.0 + find-up@5.0.0: dependencies: locate-path: 6.0.0 @@ -7029,6 +7145,8 @@ snapshots: function-bind@1.1.2: {} + get-caller-file@2.0.5: {} + get-east-asian-width@1.4.0: {} get-intrinsic@1.3.0: @@ -7521,6 +7639,10 @@ snapshots: lit-element: 4.2.2 lit-html: 3.3.2 + locate-path@5.0.0: + dependencies: + p-locate: 4.1.0 + locate-path@6.0.0: dependencies: p-locate: 5.0.0 @@ -8194,14 +8316,24 @@ snapshots: type-check: 0.4.0 word-wrap: 1.2.5 + p-limit@2.3.0: + dependencies: + p-try: 2.2.0 + p-limit@3.1.0: dependencies: yocto-queue: 0.1.0 + p-locate@4.1.0: + dependencies: + p-limit: 2.3.0 + p-locate@5.0.0: dependencies: p-limit: 3.1.0 + p-try@2.2.0: {} + package-json-from-dist@1.0.1: {} package-manager-detector@1.6.0: {} @@ -8284,6 +8416,8 @@ snapshots: mlly: 1.8.0 pathe: 2.0.3 + pngjs@5.0.0: {} + points-on-curve@0.2.0: {} points-on-path@0.2.1: @@ -8352,6 +8486,12 @@ snapshots: punycode@2.3.1: {} + qrcode@1.5.4: + dependencies: + dijkstrajs: 1.0.3 + pngjs: 5.0.0 + yargs: 15.4.1 + query-string@9.3.1: dependencies: decode-uri-component: 0.4.1 @@ -8703,6 +8843,10 @@ snapshots: mdast-util-to-markdown: 2.1.2 unified: 11.0.5 + require-directory@2.1.1: {} + + require-main-filename@2.0.0: {} + requires-port@1.0.0: {} reselect@5.1.1: {} @@ -8788,6 +8932,8 @@ snapshots: semver@7.7.3: {} + set-blocking@2.0.0: {} + set-value@2.0.1: dependencies: extend-shallow: 2.0.1 @@ -9298,6 +9444,8 @@ snapshots: tr46: 5.1.1 webidl-conversions: 7.0.0 + which-module@2.0.1: {} + which@2.0.2: dependencies: isexe: 2.0.0 @@ -9313,6 +9461,12 @@ snapshots: word@0.3.0: {} + wrap-ansi@6.2.0: + dependencies: + ansi-styles: 4.3.0 + string-width: 4.2.3 + strip-ansi: 6.0.1 + wrap-ansi@7.0.0: dependencies: ansi-styles: 4.3.0 @@ -9345,8 +9499,29 @@ snapshots: xmlchars@2.2.0: {} + y18n@4.0.3: {} + yaml@1.10.2: {} + yargs-parser@18.1.3: + dependencies: + camelcase: 5.3.1 + decamelize: 1.2.0 + + yargs@15.4.1: + dependencies: + cliui: 6.0.0 + decamelize: 1.2.0 + find-up: 4.1.0 + get-caller-file: 2.0.5 + require-directory: 2.1.1 + require-main-filename: 2.0.0 + set-blocking: 2.0.0 + string-width: 4.2.3 + which-module: 2.0.1 + y18n: 4.0.3 + yargs-parser: 18.1.3 + yocto-queue@0.1.0: {} zustand@3.7.2(react@19.2.3): diff --git a/frontend/src/api/admin/settings.ts b/frontend/src/api/admin/settings.ts index 76624bb9..10ec4d8e 100644 --- a/frontend/src/api/admin/settings.ts +++ b/frontend/src/api/admin/settings.ts @@ -14,6 +14,8 @@ export interface SystemSettings { email_verify_enabled: boolean promo_code_enabled: boolean password_reset_enabled: boolean + totp_enabled: boolean // TOTP 双因素认证 + totp_encryption_key_configured: boolean // TOTP 加密密钥是否已配置 // Default settings default_balance: number default_concurrency: number @@ -68,6 +70,7 @@ export interface UpdateSettingsRequest { email_verify_enabled?: boolean promo_code_enabled?: boolean password_reset_enabled?: boolean + totp_enabled?: boolean // TOTP 双因素认证 default_balance?: number default_concurrency?: number site_name?: string diff --git a/frontend/src/api/auth.ts b/frontend/src/api/auth.ts index 8c669c3e..bbd5ed74 100644 --- a/frontend/src/api/auth.ts +++ b/frontend/src/api/auth.ts @@ -11,9 +11,23 @@ import type { CurrentUserResponse, SendVerifyCodeRequest, SendVerifyCodeResponse, - PublicSettings + PublicSettings, + TotpLoginResponse, + TotpLogin2FARequest } from '@/types' +/** + * Login response type - can be either full auth or 2FA required + */ +export type LoginResponse = AuthResponse | TotpLoginResponse + +/** + * Type guard to check if login response requires 2FA + */ +export function isTotp2FARequired(response: LoginResponse): response is TotpLoginResponse { + return 'requires_2fa' in response && response.requires_2fa === true +} + /** * Store authentication token in localStorage */ @@ -38,11 +52,28 @@ export function clearAuthToken(): void { /** * User login - * @param credentials - Username and password + * @param credentials - Email and password + * @returns Authentication response with token and user data, or 2FA required response + */ +export async function login(credentials: LoginRequest): Promise { + const { data } = await apiClient.post('/auth/login', credentials) + + // Only store token if 2FA is not required + if (!isTotp2FARequired(data)) { + setAuthToken(data.access_token) + localStorage.setItem('auth_user', JSON.stringify(data.user)) + } + + return data +} + +/** + * Complete login with 2FA code + * @param request - Temp token and TOTP code * @returns Authentication response with token and user data */ -export async function login(credentials: LoginRequest): Promise { - const { data } = await apiClient.post('/auth/login', credentials) +export async function login2FA(request: TotpLogin2FARequest): Promise { + const { data } = await apiClient.post('/auth/login/2fa', request) // Store token and user data setAuthToken(data.access_token) @@ -186,6 +217,8 @@ export async function resetPassword(request: ResetPasswordRequest): Promise { + const { data } = await apiClient.get('/user/totp/status') + return data +} + +/** + * Get verification method for TOTP operations + * @returns Method ('email' or 'password') required for setup/disable + */ +export async function getVerificationMethod(): Promise { + const { data } = await apiClient.get('/user/totp/verification-method') + return data +} + +/** + * Send email verification code for TOTP operations + * @returns Success response + */ +export async function sendVerifyCode(): Promise<{ success: boolean }> { + const { data } = await apiClient.post<{ success: boolean }>('/user/totp/send-code') + return data +} + +/** + * Initiate TOTP setup - generates secret and QR code + * @param request - Email code or password depending on verification method + * @returns Setup response with secret, QR code URL, and setup token + */ +export async function initiateSetup(request?: TotpSetupRequest): Promise { + const { data } = await apiClient.post('/user/totp/setup', request || {}) + return data +} + +/** + * Complete TOTP setup by verifying the code + * @param request - TOTP code and setup token + * @returns Enable response with success status and enabled timestamp + */ +export async function enable(request: TotpEnableRequest): Promise { + const { data } = await apiClient.post('/user/totp/enable', request) + return data +} + +/** + * Disable TOTP for current user + * @param request - Email code or password depending on verification method + * @returns Success response + */ +export async function disable(request: TotpDisableRequest): Promise<{ success: boolean }> { + const { data } = await apiClient.post<{ success: boolean }>('/user/totp/disable', request) + return data +} + +export const totpAPI = { + getStatus, + getVerificationMethod, + sendVerifyCode, + initiateSetup, + enable, + disable +} + +export default totpAPI diff --git a/frontend/src/components/auth/TotpLoginModal.vue b/frontend/src/components/auth/TotpLoginModal.vue new file mode 100644 index 00000000..03fa718d --- /dev/null +++ b/frontend/src/components/auth/TotpLoginModal.vue @@ -0,0 +1,176 @@ + + + diff --git a/frontend/src/components/user/profile/ProfileTotpCard.vue b/frontend/src/components/user/profile/ProfileTotpCard.vue new file mode 100644 index 00000000..77413e52 --- /dev/null +++ b/frontend/src/components/user/profile/ProfileTotpCard.vue @@ -0,0 +1,154 @@ + + + diff --git a/frontend/src/components/user/profile/TotpDisableDialog.vue b/frontend/src/components/user/profile/TotpDisableDialog.vue new file mode 100644 index 00000000..daca4067 --- /dev/null +++ b/frontend/src/components/user/profile/TotpDisableDialog.vue @@ -0,0 +1,179 @@ + + + diff --git a/frontend/src/components/user/profile/TotpSetupModal.vue b/frontend/src/components/user/profile/TotpSetupModal.vue new file mode 100644 index 00000000..3d9b79ec --- /dev/null +++ b/frontend/src/components/user/profile/TotpSetupModal.vue @@ -0,0 +1,400 @@ + + + diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index 1880e4cc..279bcef6 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -146,7 +146,10 @@ export default { balance: 'Balance', available: 'Available', copiedToClipboard: 'Copied to clipboard', + copied: 'Copied', copyFailed: 'Failed to copy', + verifying: 'Verifying...', + processing: 'Processing...', contactSupport: 'Contact Support', add: 'Add', invalidEmail: 'Please enter a valid email address', @@ -583,7 +586,46 @@ export default { passwordsNotMatch: 'New passwords do not match', passwordTooShort: 'Password must be at least 8 characters long', passwordChangeSuccess: 'Password changed successfully', - passwordChangeFailed: 'Failed to change password' + passwordChangeFailed: 'Failed to change password', + // TOTP 2FA + totp: { + title: 'Two-Factor Authentication (2FA)', + description: 'Enhance account security with Google Authenticator or similar apps', + enabled: 'Enabled', + enabledAt: 'Enabled at', + notEnabled: 'Not Enabled', + notEnabledHint: 'Enable two-factor authentication to enhance account security', + enable: 'Enable', + disable: 'Disable', + featureDisabled: 'Feature Unavailable', + featureDisabledHint: 'Two-factor authentication has not been enabled by the administrator', + setupTitle: 'Set Up Two-Factor Authentication', + setupStep1: 'Scan the QR code below with your authenticator app', + setupStep2: 'Enter the 6-digit code from your app', + manualEntry: "Can't scan? Enter the key manually:", + enterCode: 'Enter 6-digit code', + verify: 'Verify', + setupFailed: 'Failed to get setup information', + verifyFailed: 'Invalid code, please try again', + enableSuccess: 'Two-factor authentication enabled', + disableTitle: 'Disable Two-Factor Authentication', + disableWarning: 'After disabling, you will no longer need a verification code to log in. This may reduce your account security.', + enterPassword: 'Enter your current password to confirm', + confirmDisable: 'Confirm Disable', + disableSuccess: 'Two-factor authentication disabled', + disableFailed: 'Failed to disable, please check your password', + loginTitle: 'Two-Factor Authentication', + loginHint: 'Enter the 6-digit code from your authenticator app', + loginFailed: 'Verification failed, please try again', + // New translations for email verification + verifyEmailFirst: 'Please verify your email first', + verifyPasswordFirst: 'Please verify your identity first', + emailCode: 'Email Verification Code', + enterEmailCode: 'Enter 6-digit code', + sendCode: 'Send Code', + codeSent: 'Verification code sent to your email', + sendCodeFailed: 'Failed to send verification code' + } }, // Empty States @@ -2774,7 +2816,11 @@ export default { promoCode: 'Promo Code', promoCodeHint: 'Allow users to use promo codes during registration', passwordReset: 'Password Reset', - passwordResetHint: 'Allow users to reset their password via email' + passwordResetHint: 'Allow users to reset their password via email', + totp: 'Two-Factor Authentication (2FA)', + totpHint: 'Allow users to use authenticator apps like Google Authenticator', + totpKeyNotConfigured: + 'Please configure TOTP_ENCRYPTION_KEY in environment variables first. Generate a key with: openssl rand -hex 32' }, turnstile: { title: 'Cloudflare Turnstile', diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index 49b24abb..ea7ceb61 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -143,7 +143,10 @@ export default { balance: '余额', available: '可用', copiedToClipboard: '已复制到剪贴板', + copied: '已复制', copyFailed: '复制失败', + verifying: '验证中...', + processing: '处理中...', contactSupport: '联系客服', add: '添加', invalidEmail: '请输入有效的邮箱地址', @@ -579,7 +582,46 @@ export default { passwordsNotMatch: '两次输入的密码不一致', passwordTooShort: '密码至少需要 8 个字符', passwordChangeSuccess: '密码修改成功', - passwordChangeFailed: '密码修改失败' + passwordChangeFailed: '密码修改失败', + // TOTP 2FA + totp: { + title: '双因素认证 (2FA)', + description: '使用 Google Authenticator 等应用增强账户安全', + enabled: '已启用', + enabledAt: '启用时间', + notEnabled: '未启用', + notEnabledHint: '启用双因素认证可以增强账户安全性', + enable: '启用', + disable: '禁用', + featureDisabled: '功能未开放', + featureDisabledHint: '管理员尚未开放双因素认证功能', + setupTitle: '设置双因素认证', + setupStep1: '使用认证器应用扫描下方二维码', + setupStep2: '输入应用显示的 6 位验证码', + manualEntry: '无法扫码?手动输入密钥:', + enterCode: '输入 6 位验证码', + verify: '验证', + setupFailed: '获取设置信息失败', + verifyFailed: '验证码错误,请重试', + enableSuccess: '双因素认证已启用', + disableTitle: '禁用双因素认证', + disableWarning: '禁用后,登录时将不再需要验证码。这可能会降低您的账户安全性。', + enterPassword: '请输入当前密码确认', + confirmDisable: '确认禁用', + disableSuccess: '双因素认证已禁用', + disableFailed: '禁用失败,请检查密码是否正确', + loginTitle: '双因素认证', + loginHint: '请输入您认证器应用显示的 6 位验证码', + loginFailed: '验证失败,请重试', + // New translations for email verification + verifyEmailFirst: '请先验证您的邮箱', + verifyPasswordFirst: '请先验证您的身份', + emailCode: '邮箱验证码', + enterEmailCode: '请输入 6 位验证码', + sendCode: '发送验证码', + codeSent: '验证码已发送到您的邮箱', + sendCodeFailed: '发送验证码失败' + } }, // Empty States @@ -2927,7 +2969,11 @@ export default { promoCode: '优惠码', promoCodeHint: '允许用户在注册时使用优惠码', passwordReset: '忘记密码', - passwordResetHint: '允许用户通过邮箱重置密码' + passwordResetHint: '允许用户通过邮箱重置密码', + totp: '双因素认证 (2FA)', + totpHint: '允许用户使用 Google Authenticator 等应用进行二次验证', + totpKeyNotConfigured: + '请先在环境变量中配置 TOTP_ENCRYPTION_KEY。使用命令 openssl rand -hex 32 生成密钥。' }, turnstile: { title: 'Cloudflare Turnstile', diff --git a/frontend/src/stores/auth.ts b/frontend/src/stores/auth.ts index 4076e154..e4612f5e 100644 --- a/frontend/src/stores/auth.ts +++ b/frontend/src/stores/auth.ts @@ -5,8 +5,8 @@ import { defineStore } from 'pinia' import { ref, computed, readonly } from 'vue' -import { authAPI } from '@/api' -import type { User, LoginRequest, RegisterRequest } from '@/types' +import { authAPI, isTotp2FARequired, type LoginResponse } from '@/api' +import type { User, LoginRequest, RegisterRequest, AuthResponse } from '@/types' const AUTH_TOKEN_KEY = 'auth_token' const AUTH_USER_KEY = 'auth_user' @@ -91,32 +91,23 @@ export const useAuthStore = defineStore('auth', () => { /** * User login - * @param credentials - Login credentials (username and password) - * @returns Promise resolving to the authenticated user + * @param credentials - Login credentials (email and password) + * @returns Promise resolving to the login response (may require 2FA) * @throws Error if login fails */ - async function login(credentials: LoginRequest): Promise { + async function login(credentials: LoginRequest): Promise { try { const response = await authAPI.login(credentials) - // Store token and user - token.value = response.access_token - - // Extract run_mode if present - if (response.user.run_mode) { - runMode.value = response.user.run_mode + // If 2FA is required, return the response without setting auth state + if (isTotp2FARequired(response)) { + return response } - const { run_mode: _run_mode, ...userData } = response.user - user.value = userData - // Persist to localStorage - localStorage.setItem(AUTH_TOKEN_KEY, response.access_token) - localStorage.setItem(AUTH_USER_KEY, JSON.stringify(userData)) + // Set auth state from the response + setAuthFromResponse(response) - // Start auto-refresh interval - startAutoRefresh() - - return userData + return response } catch (error) { // Clear any partial state on error clearAuth() @@ -124,6 +115,47 @@ export const useAuthStore = defineStore('auth', () => { } } + /** + * Complete login with 2FA code + * @param tempToken - Temporary token from initial login + * @param totpCode - 6-digit TOTP code + * @returns Promise resolving to the authenticated user + * @throws Error if 2FA verification fails + */ + async function login2FA(tempToken: string, totpCode: string): Promise { + try { + const response = await authAPI.login2FA({ temp_token: tempToken, totp_code: totpCode }) + setAuthFromResponse(response) + return user.value! + } catch (error) { + clearAuth() + throw error + } + } + + /** + * Set auth state from an AuthResponse + * Internal helper function + */ + function setAuthFromResponse(response: AuthResponse): void { + // Store token and user + token.value = response.access_token + + // Extract run_mode if present + if (response.user.run_mode) { + runMode.value = response.user.run_mode + } + const { run_mode: _run_mode, ...userData } = response.user + user.value = userData + + // Persist to localStorage + localStorage.setItem(AUTH_TOKEN_KEY, response.access_token) + localStorage.setItem(AUTH_USER_KEY, JSON.stringify(userData)) + + // Start auto-refresh interval + startAutoRefresh() + } + /** * User registration * @param userData - Registration data (username, email, password) @@ -253,6 +285,7 @@ export const useAuthStore = defineStore('auth', () => { // Actions login, + login2FA, register, setToken, logout, diff --git a/frontend/src/types/index.ts b/frontend/src/types/index.ts index 6f4b3e50..0ab3c637 100644 --- a/frontend/src/types/index.ts +++ b/frontend/src/types/index.ts @@ -1108,3 +1108,52 @@ export interface UpdatePromoCodeRequest { expires_at?: number | null notes?: string } + +// ==================== TOTP (2FA) Types ==================== + +export interface TotpStatus { + enabled: boolean + enabled_at: number | null // Unix timestamp in seconds + feature_enabled: boolean +} + +export interface TotpSetupRequest { + email_code?: string + password?: string +} + +export interface TotpSetupResponse { + secret: string + qr_code_url: string + setup_token: string + countdown: number +} + +export interface TotpEnableRequest { + totp_code: string + setup_token: string +} + +export interface TotpEnableResponse { + success: boolean +} + +export interface TotpDisableRequest { + email_code?: string + password?: string +} + +export interface TotpVerificationMethod { + method: 'email' | 'password' +} + +export interface TotpLoginResponse { + requires_2fa: boolean + temp_token?: string + user_email_masked?: string +} + +export interface TotpLogin2FARequest { + temp_token: string + totp_code: string +} diff --git a/frontend/src/views/admin/SettingsView.vue b/frontend/src/views/admin/SettingsView.vue index ed095254..93b3c18f 100644 --- a/frontend/src/views/admin/SettingsView.vue +++ b/frontend/src/views/admin/SettingsView.vue @@ -354,6 +354,31 @@ + + +
+
+ +

+ {{ t('admin.settings.registration.totpHint') }} +

+ +

+ {{ t('admin.settings.registration.totpKeyNotConfigured') }} +

+
+ +
@@ -1046,6 +1071,8 @@ const form = reactive({ email_verify_enabled: false, promo_code_enabled: true, password_reset_enabled: false, + totp_enabled: false, + totp_encryption_key_configured: false, default_balance: 0, default_concurrency: 1, site_name: 'Sub2API', @@ -1170,6 +1197,7 @@ async function saveSettings() { email_verify_enabled: form.email_verify_enabled, promo_code_enabled: form.promo_code_enabled, password_reset_enabled: form.password_reset_enabled, + totp_enabled: form.totp_enabled, default_balance: form.default_balance, default_concurrency: form.default_concurrency, site_name: form.site_name, diff --git a/frontend/src/views/auth/LoginView.vue b/frontend/src/views/auth/LoginView.vue index 3232048a..20370108 100644 --- a/frontend/src/views/auth/LoginView.vue +++ b/frontend/src/views/auth/LoginView.vue @@ -163,6 +163,16 @@

+ + + + From e12dd079fd29a30d0f3a5bc96d69c8e27ef5195c Mon Sep 17 00:00:00 2001 From: shaw Date: Wed, 28 Jan 2026 17:26:32 +0800 Subject: [PATCH 129/147] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E8=B0=83=E5=BA=A6?= =?UTF-8?q?=E5=99=A8=E7=A9=BA=E7=BC=93=E5=AD=98=E5=AF=BC=E8=87=B4=E7=9A=84?= =?UTF-8?q?=E7=AB=9E=E6=80=81=E6=9D=A1=E4=BB=B6bug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 当新分组创建后立即绑定账号时,调度器会错误地将空快照视为有效缓存命中, 导致返回没有可调度的账号。现在空快照会触发数据库回退查询。 --- backend/internal/repository/scheduler_cache.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/backend/internal/repository/scheduler_cache.go b/backend/internal/repository/scheduler_cache.go index 13b22107..4f447e4f 100644 --- a/backend/internal/repository/scheduler_cache.go +++ b/backend/internal/repository/scheduler_cache.go @@ -58,7 +58,9 @@ func (c *schedulerCache) GetSnapshot(ctx context.Context, bucket service.Schedul return nil, false, err } if len(ids) == 0 { - return []*service.Account{}, true, nil + // 空快照视为缓存未命中,触发数据库回退查询 + // 这解决了新分组创建后立即绑定账号时的竞态条件问题 + return nil, false, nil } keys := make([]string, 0, len(ids)) From cadca752c492ef923c66b7c7b4c50a97c16cc1b5 Mon Sep 17 00:00:00 2001 From: shaw Date: Wed, 28 Jan 2026 18:35:20 +0800 Subject: [PATCH 130/147] =?UTF-8?q?=E4=BF=AE=E5=A4=8DSSE=E6=B5=81=E5=BC=8F?= =?UTF-8?q?=E5=93=8D=E5=BA=94=E4=B8=ADusage=E6=95=B0=E6=8D=AE=E8=A2=AB?= =?UTF-8?q?=E8=A6=86=E7=9B=96=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/service/gateway_service.go | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 5819f15a..2e3ba93e 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -3372,12 +3372,21 @@ func (s *GatewayService) parseSSEUsage(data string, usage *ClaudeUsage) { } `json:"usage"` } if json.Unmarshal([]byte(data), &msgDelta) == nil && msgDelta.Type == "message_delta" { - // message_delta 是推理结束后的最终统计,应完全覆盖 message_start 的数据 - // 这对于 Claude API 和 GLM 等兼容 API 都是正确的行为 - usage.InputTokens = msgDelta.Usage.InputTokens - usage.OutputTokens = msgDelta.Usage.OutputTokens - usage.CacheCreationInputTokens = msgDelta.Usage.CacheCreationInputTokens - usage.CacheReadInputTokens = msgDelta.Usage.CacheReadInputTokens + // message_delta 仅覆盖存在且非0的字段 + // 避免覆盖 message_start 中已有的值(如 input_tokens) + // Claude API 的 message_delta 通常只包含 output_tokens + if msgDelta.Usage.InputTokens > 0 { + usage.InputTokens = msgDelta.Usage.InputTokens + } + if msgDelta.Usage.OutputTokens > 0 { + usage.OutputTokens = msgDelta.Usage.OutputTokens + } + if msgDelta.Usage.CacheCreationInputTokens > 0 { + usage.CacheCreationInputTokens = msgDelta.Usage.CacheCreationInputTokens + } + if msgDelta.Usage.CacheReadInputTokens > 0 { + usage.CacheReadInputTokens = msgDelta.Usage.CacheReadInputTokens + } } } From 31f817d189c6db22940c8b836c50f48073dae61a Mon Sep 17 00:00:00 2001 From: cyhhao Date: Thu, 29 Jan 2026 01:28:43 +0800 Subject: [PATCH 131/147] fix: add newline separation for Claude Code system prompt --- backend/internal/service/account_test_service.go | 2 +- backend/internal/service/gateway_service.go | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/backend/internal/service/account_test_service.go b/backend/internal/service/account_test_service.go index 46376c69..3290fe52 100644 --- a/backend/internal/service/account_test_service.go +++ b/backend/internal/service/account_test_service.go @@ -123,7 +123,7 @@ func createTestPayload(modelID string) (map[string]any, error) { "system": []map[string]any{ { "type": "text", - "text": "You are Claude Code, Anthropic's official CLI for Claude.", + "text": claudeCodeSystemPrompt, "cache_control": map[string]string{ "type": "ephemeral", }, diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index b46e856e..b1507245 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -39,7 +39,9 @@ const ( claudeAPICountTokensURL = "https://api.anthropic.com/v1/messages/count_tokens?beta=true" stickySessionTTL = time.Hour // 粘性会话TTL defaultMaxLineSize = 40 * 1024 * 1024 - claudeCodeSystemPrompt = "You are Claude Code, Anthropic's official CLI for Claude." + // Keep a trailing blank line so that when upstream concatenates system strings, + // the injected Claude Code banner doesn't run into the next system instruction. + claudeCodeSystemPrompt = "You are Claude Code, Anthropic's official CLI for Claude.\n\n" maxCacheControlBlocks = 4 // Anthropic API 允许的最大 cache_control 块数量 ) @@ -2479,7 +2481,8 @@ func injectClaudeCodePrompt(body []byte, system any) []byte { case nil: newSystem = []any{claudeCodeBlock} case string: - if v == "" || v == claudeCodeSystemPrompt { + // Be tolerant of older/newer clients that may differ only by trailing whitespace/newlines. + if strings.TrimSpace(v) == "" || strings.TrimSpace(v) == strings.TrimSpace(claudeCodeSystemPrompt) { newSystem = []any{claudeCodeBlock} } else { newSystem = []any{claudeCodeBlock, map[string]any{"type": "text", "text": v}} @@ -2489,7 +2492,7 @@ func injectClaudeCodePrompt(body []byte, system any) []byte { newSystem = append(newSystem, claudeCodeBlock) for _, item := range v { if m, ok := item.(map[string]any); ok { - if text, ok := m["text"].(string); ok && text == claudeCodeSystemPrompt { + if text, ok := m["text"].(string); ok && strings.TrimSpace(text) == strings.TrimSpace(claudeCodeSystemPrompt) { continue } } From 4d566f68b687cf09e7f523d4b8a3342ccbaa2553 Mon Sep 17 00:00:00 2001 From: cyhhao Date: Thu, 29 Jan 2026 01:34:58 +0800 Subject: [PATCH 132/147] chore: gofmt --- backend/internal/service/gateway_service.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index b1507245..01663ae7 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -42,7 +42,7 @@ const ( // Keep a trailing blank line so that when upstream concatenates system strings, // the injected Claude Code banner doesn't run into the next system instruction. claudeCodeSystemPrompt = "You are Claude Code, Anthropic's official CLI for Claude.\n\n" - maxCacheControlBlocks = 4 // Anthropic API 允许的最大 cache_control 块数量 + maxCacheControlBlocks = 4 // Anthropic API 允许的最大 cache_control 块数量 ) func (s *GatewayService) debugModelRoutingEnabled() bool { From 723e54013a2196daa19371db1884e0c016b61b6a Mon Sep 17 00:00:00 2001 From: cyhhao Date: Thu, 29 Jan 2026 01:49:51 +0800 Subject: [PATCH 133/147] fix(oauth): mimic Claude Code metadata and beta headers --- backend/internal/pkg/claude/constants.go | 7 ++- .../service/gateway_oauth_metadata_test.go | 62 +++++++++++++++++++ backend/internal/service/gateway_service.go | 17 +++-- 3 files changed, 79 insertions(+), 7 deletions(-) create mode 100644 backend/internal/service/gateway_oauth_metadata_test.go diff --git a/backend/internal/pkg/claude/constants.go b/backend/internal/pkg/claude/constants.go index 0c6e9b4c..fb95ffe2 100644 --- a/backend/internal/pkg/claude/constants.go +++ b/backend/internal/pkg/claude/constants.go @@ -16,7 +16,12 @@ const ( const DefaultBetaHeader = BetaClaudeCode + "," + BetaOAuth + "," + BetaInterleavedThinking + "," + BetaFineGrainedToolStreaming // MessageBetaHeaderNoTools /v1/messages 在无工具时的 beta header -const MessageBetaHeaderNoTools = BetaOAuth + "," + BetaInterleavedThinking +// +// NOTE: Claude Code OAuth credentials are scoped to Claude Code. When we "mimic" +// Claude Code for non-Claude-Code clients, we must include the claude-code beta +// even if the request doesn't use tools, otherwise upstream may reject the +// request as a non-Claude-Code API request. +const MessageBetaHeaderNoTools = BetaClaudeCode + "," + BetaOAuth + "," + BetaInterleavedThinking // MessageBetaHeaderWithTools /v1/messages 在有工具时的 beta header const MessageBetaHeaderWithTools = BetaClaudeCode + "," + BetaOAuth + "," + BetaInterleavedThinking diff --git a/backend/internal/service/gateway_oauth_metadata_test.go b/backend/internal/service/gateway_oauth_metadata_test.go new file mode 100644 index 00000000..ed6f1887 --- /dev/null +++ b/backend/internal/service/gateway_oauth_metadata_test.go @@ -0,0 +1,62 @@ +package service + +import ( + "regexp" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestBuildOAuthMetadataUserID_FallbackWithoutAccountUUID(t *testing.T) { + svc := &GatewayService{} + + parsed := &ParsedRequest{ + Model: "claude-sonnet-4-5", + Stream: true, + MetadataUserID: "", + System: nil, + Messages: nil, + } + + account := &Account{ + ID: 123, + Type: AccountTypeOAuth, + Extra: map[string]any{}, // intentionally missing account_uuid / claude_user_id + } + + fp := &Fingerprint{ClientID: "deadbeef"} // should be used as user id in legacy format + + got := svc.buildOAuthMetadataUserID(parsed, account, fp) + require.NotEmpty(t, got) + + // Legacy format: user_{client}_account__session_{uuid} + re := regexp.MustCompile(`^user_[a-zA-Z0-9]+_account__session_[a-f0-9-]{36}$`) + require.True(t, re.MatchString(got), "unexpected user_id format: %s", got) +} + +func TestBuildOAuthMetadataUserID_UsesAccountUUIDWhenPresent(t *testing.T) { + svc := &GatewayService{} + + parsed := &ParsedRequest{ + Model: "claude-sonnet-4-5", + Stream: true, + MetadataUserID: "", + } + + account := &Account{ + ID: 123, + Type: AccountTypeOAuth, + Extra: map[string]any{ + "account_uuid": "acc-uuid", + "claude_user_id": "clientid123", + "anthropic_user_id": "", + }, + } + + got := svc.buildOAuthMetadataUserID(parsed, account, nil) + require.NotEmpty(t, got) + + // New format: user_{client}_account_{account_uuid}_session_{uuid} + re := regexp.MustCompile(`^user_clientid123_account_acc-uuid_session_[a-f0-9-]{36}$`) + require.True(t, re.MatchString(got), "unexpected user_id format: %s", got) +} diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 01663ae7..1ebd1246 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -795,17 +795,15 @@ func (s *GatewayService) buildOAuthMetadataUserID(parsed *ParsedRequest, account if parsed.MetadataUserID != "" { return "" } - accountUUID := account.GetExtraString("account_uuid") - if accountUUID == "" { - return "" - } userID := strings.TrimSpace(account.GetClaudeUserID()) if userID == "" && fp != nil { userID = fp.ClientID } if userID == "" { - return "" + // Fall back to a random, well-formed client id so we can still satisfy + // Claude Code OAuth requirements when account metadata is incomplete. + userID = generateClientID() } sessionHash := s.GenerateSessionHash(parsed) @@ -814,7 +812,14 @@ func (s *GatewayService) buildOAuthMetadataUserID(parsed *ParsedRequest, account seed := fmt.Sprintf("%d::%s", account.ID, sessionHash) sessionID = generateSessionUUID(seed) } - return fmt.Sprintf("user_%s_account_%s_session_%s", userID, accountUUID, sessionID) + + // Prefer the newer format that includes account_uuid (if present), + // otherwise fall back to the legacy Claude Code format. + accountUUID := strings.TrimSpace(account.GetExtraString("account_uuid")) + if accountUUID != "" { + return fmt.Sprintf("user_%s_account_%s_session_%s", userID, accountUUID, sessionID) + } + return fmt.Sprintf("user_%s_account__session_%s", userID, sessionID) } func generateSessionUUID(seed string) string { From be3b788b8fd0b9a6715c6c9cfeddfaed4fa9ff65 Mon Sep 17 00:00:00 2001 From: cyhhao Date: Thu, 29 Jan 2026 02:03:54 +0800 Subject: [PATCH 134/147] fix: also prefix next system block with Claude Code banner --- .../internal/service/gateway_prompt_test.go | 9 +++++--- backend/internal/service/gateway_service.go | 22 ++++++++++++++++++- 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/backend/internal/service/gateway_prompt_test.go b/backend/internal/service/gateway_prompt_test.go index b056f8fa..52c75d1d 100644 --- a/backend/internal/service/gateway_prompt_test.go +++ b/backend/internal/service/gateway_prompt_test.go @@ -2,6 +2,7 @@ package service import ( "encoding/json" + "strings" "testing" "github.com/stretchr/testify/require" @@ -134,6 +135,8 @@ func TestSystemIncludesClaudeCodePrompt(t *testing.T) { } func TestInjectClaudeCodePrompt(t *testing.T) { + claudePrefix := strings.TrimSpace(claudeCodeSystemPrompt) + tests := []struct { name string body string @@ -162,7 +165,7 @@ func TestInjectClaudeCodePrompt(t *testing.T) { system: "Custom prompt", wantSystemLen: 2, wantFirstText: claudeCodeSystemPrompt, - wantSecondText: "Custom prompt", + wantSecondText: claudePrefix + "\n\nCustom prompt", }, { name: "string system equals Claude Code prompt", @@ -178,7 +181,7 @@ func TestInjectClaudeCodePrompt(t *testing.T) { // Claude Code + Custom = 2 wantSystemLen: 2, wantFirstText: claudeCodeSystemPrompt, - wantSecondText: "Custom", + wantSecondText: claudePrefix + "\n\nCustom", }, { name: "array system with existing Claude Code prompt (should dedupe)", @@ -190,7 +193,7 @@ func TestInjectClaudeCodePrompt(t *testing.T) { // Claude Code at start + Other = 2 (deduped) wantSystemLen: 2, wantFirstText: claudeCodeSystemPrompt, - wantSecondText: "Other", + wantSecondText: claudePrefix + "\n\nOther", }, { name: "empty array", diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 1ebd1246..c23b4f36 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -2479,6 +2479,10 @@ func injectClaudeCodePrompt(body []byte, system any) []byte { "text": claudeCodeSystemPrompt, "cache_control": map[string]string{"type": "ephemeral"}, } + // Opencode plugin applies an extra safeguard: it not only prepends the Claude Code + // banner, it also prefixes the next system instruction with the same banner plus + // a blank line. This helps when upstream concatenates system instructions. + claudeCodePrefix := strings.TrimSpace(claudeCodeSystemPrompt) var newSystem []any @@ -2490,16 +2494,32 @@ func injectClaudeCodePrompt(body []byte, system any) []byte { if strings.TrimSpace(v) == "" || strings.TrimSpace(v) == strings.TrimSpace(claudeCodeSystemPrompt) { newSystem = []any{claudeCodeBlock} } else { - newSystem = []any{claudeCodeBlock, map[string]any{"type": "text", "text": v}} + // Mirror opencode behavior: keep the banner as a separate system entry, + // but also prefix the next system text with the banner. + merged := v + if !strings.HasPrefix(v, claudeCodePrefix) { + merged = claudeCodePrefix + "\n\n" + v + } + newSystem = []any{claudeCodeBlock, map[string]any{"type": "text", "text": merged}} } case []any: newSystem = make([]any, 0, len(v)+1) newSystem = append(newSystem, claudeCodeBlock) + prefixedNext := false for _, item := range v { if m, ok := item.(map[string]any); ok { if text, ok := m["text"].(string); ok && strings.TrimSpace(text) == strings.TrimSpace(claudeCodeSystemPrompt) { continue } + // Prefix the first subsequent text system block once. + if !prefixedNext { + if blockType, _ := m["type"].(string); blockType == "text" { + if text, ok := m["text"].(string); ok && strings.TrimSpace(text) != "" && !strings.HasPrefix(text, claudeCodePrefix) { + m["text"] = claudeCodePrefix + "\n\n" + text + prefixedNext = true + } + } + } } newSystem = append(newSystem, item) } From 4d40fb6b602a0469bbdaa56bd047493a9d712f32 Mon Sep 17 00:00:00 2001 From: cyhhao Date: Thu, 29 Jan 2026 02:36:28 +0800 Subject: [PATCH 135/147] fix(oauth): merge anthropic-beta and force Claude Code headers in mimic mode --- backend/internal/service/gateway_beta_test.go | 23 +++++++ backend/internal/service/gateway_service.go | 66 +++++++++++++++++-- 2 files changed, 84 insertions(+), 5 deletions(-) create mode 100644 backend/internal/service/gateway_beta_test.go diff --git a/backend/internal/service/gateway_beta_test.go b/backend/internal/service/gateway_beta_test.go new file mode 100644 index 00000000..dd58c183 --- /dev/null +++ b/backend/internal/service/gateway_beta_test.go @@ -0,0 +1,23 @@ +package service + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestMergeAnthropicBeta(t *testing.T) { + got := mergeAnthropicBeta( + []string{"oauth-2025-04-20", "interleaved-thinking-2025-05-14"}, + "foo, oauth-2025-04-20,bar, foo", + ) + require.Equal(t, "oauth-2025-04-20,interleaved-thinking-2025-05-14,foo,bar", got) +} + +func TestMergeAnthropicBeta_EmptyIncoming(t *testing.T) { + got := mergeAnthropicBeta( + []string{"oauth-2025-04-20", "interleaved-thinking-2025-05-14"}, + "", + ) + require.Equal(t, "oauth-2025-04-20,interleaved-thinking-2025-05-14", got) +} diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index c23b4f36..c666c96a 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -3230,12 +3230,18 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex // 处理 anthropic-beta header(OAuth 账号需要包含 oauth beta) if tokenType == "oauth" { if mimicClaudeCode { - // 非 Claude Code 客户端:按 Claude Code 规则生成 beta header + // 非 Claude Code 客户端:按 opencode 的策略处理: + // - 强制 Claude Code 指纹相关请求头(尤其是 user-agent/x-stainless/x-app) + // - 保留 incoming beta 的同时,确保 OAuth 所需 beta 存在 + applyClaudeCodeMimicHeaders(req, reqStream) + + incomingBeta := req.Header.Get("anthropic-beta") + requiredBetas := []string{claude.BetaOAuth, claude.BetaInterleavedThinking} + // Tools 场景更严格,保留 claude-code beta 以提高 Claude Code 识别成功率。 if requestHasTools(body) { - req.Header.Set("anthropic-beta", claude.MessageBetaHeaderWithTools) - } else { - req.Header.Set("anthropic-beta", claude.MessageBetaHeaderNoTools) + requiredBetas = append([]string{claude.BetaClaudeCode}, requiredBetas...) } + req.Header.Set("anthropic-beta", mergeAnthropicBeta(requiredBetas, incomingBeta)) } else { // Claude Code 客户端:尽量透传原始 header,仅补齐 oauth beta clientBetaHeader := req.Header.Get("anthropic-beta") @@ -3353,6 +3359,52 @@ func applyClaudeOAuthHeaderDefaults(req *http.Request, isStream bool) { } } +func mergeAnthropicBeta(required []string, incoming string) string { + seen := make(map[string]struct{}, len(required)+8) + out := make([]string, 0, len(required)+8) + + add := func(v string) { + v = strings.TrimSpace(v) + if v == "" { + return + } + if _, ok := seen[v]; ok { + return + } + seen[v] = struct{}{} + out = append(out, v) + } + + for _, r := range required { + add(r) + } + for _, p := range strings.Split(incoming, ",") { + add(p) + } + return strings.Join(out, ",") +} + +// applyClaudeCodeMimicHeaders forces "Claude Code-like" request headers. +// This mirrors opencode-anthropic-auth behavior: do not trust downstream +// headers when using Claude Code-scoped OAuth credentials. +func applyClaudeCodeMimicHeaders(req *http.Request, isStream bool) { + if req == nil { + return + } + // Start with the standard defaults (fill missing). + applyClaudeOAuthHeaderDefaults(req, isStream) + // Then force key headers to match Claude Code fingerprint regardless of what the client sent. + for key, value := range claude.DefaultHeaders { + if value == "" { + continue + } + req.Header.Set(key, value) + } + if isStream { + req.Header.Set("x-stainless-helper-method", "stream") + } +} + func truncateForLog(b []byte, maxBytes int) string { if maxBytes <= 0 { maxBytes = 2048 @@ -4600,7 +4652,11 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con // OAuth 账号:处理 anthropic-beta header if tokenType == "oauth" { if mimicClaudeCode { - req.Header.Set("anthropic-beta", claude.CountTokensBetaHeader) + applyClaudeCodeMimicHeaders(req, false) + + incomingBeta := req.Header.Get("anthropic-beta") + requiredBetas := []string{claude.BetaClaudeCode, claude.BetaOAuth, claude.BetaInterleavedThinking, claude.BetaTokenCounting} + req.Header.Set("anthropic-beta", mergeAnthropicBeta(requiredBetas, incomingBeta)) } else { clientBetaHeader := req.Header.Get("anthropic-beta") if clientBetaHeader == "" { From c37fe91672796d2d1f44f2d1d21a91edc3232a10 Mon Sep 17 00:00:00 2001 From: cyhhao Date: Thu, 29 Jan 2026 02:52:26 +0800 Subject: [PATCH 136/147] fix(oauth): update Claude CLI fingerprint headers --- backend/internal/pkg/claude/constants.go | 8 +++++--- backend/internal/service/identity_service.go | 6 +++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/backend/internal/pkg/claude/constants.go b/backend/internal/pkg/claude/constants.go index fb95ffe2..8b3441dc 100644 --- a/backend/internal/pkg/claude/constants.go +++ b/backend/internal/pkg/claude/constants.go @@ -40,13 +40,15 @@ const APIKeyHaikuBetaHeader = BetaInterleavedThinking // DefaultHeaders 是 Claude Code 客户端默认请求头。 var DefaultHeaders = map[string]string{ - "User-Agent": "claude-cli/2.1.2 (external, cli)", + // Keep these in sync with recent Claude CLI traffic to reduce the chance + // that Claude Code-scoped OAuth credentials are rejected as "non-CLI" usage. + "User-Agent": "claude-cli/2.1.22 (external, cli)", "X-Stainless-Lang": "js", "X-Stainless-Package-Version": "0.70.0", "X-Stainless-OS": "Linux", - "X-Stainless-Arch": "x64", + "X-Stainless-Arch": "arm64", "X-Stainless-Runtime": "node", - "X-Stainless-Runtime-Version": "v24.3.0", + "X-Stainless-Runtime-Version": "v24.13.0", "X-Stainless-Retry-Count": "0", "X-Stainless-Timeout": "600", "X-App": "cli", diff --git a/backend/internal/service/identity_service.go b/backend/internal/service/identity_service.go index 4e227fea..a620ac4d 100644 --- a/backend/internal/service/identity_service.go +++ b/backend/internal/service/identity_service.go @@ -26,13 +26,13 @@ var ( // 默认指纹值(当客户端未提供时使用) var defaultFingerprint = Fingerprint{ - UserAgent: "claude-cli/2.1.2 (external, cli)", + UserAgent: "claude-cli/2.1.22 (external, cli)", StainlessLang: "js", StainlessPackageVersion: "0.70.0", StainlessOS: "Linux", - StainlessArch: "x64", + StainlessArch: "arm64", StainlessRuntime: "node", - StainlessRuntimeVersion: "v24.3.0", + StainlessRuntimeVersion: "v24.13.0", } // Fingerprint represents account fingerprint data From d98648f03ba9a0b4308d9b7aeed1e45416ddaf71 Mon Sep 17 00:00:00 2001 From: cyhhao Date: Thu, 29 Jan 2026 03:03:40 +0800 Subject: [PATCH 137/147] fix: rewrite OpenCode identity sentence to Claude Code --- .../internal/service/gateway_sanitize_test.go | 20 +++++++++++++++++++ backend/internal/service/gateway_service.go | 8 ++++++++ 2 files changed, 28 insertions(+) create mode 100644 backend/internal/service/gateway_sanitize_test.go diff --git a/backend/internal/service/gateway_sanitize_test.go b/backend/internal/service/gateway_sanitize_test.go new file mode 100644 index 00000000..3b0a07c9 --- /dev/null +++ b/backend/internal/service/gateway_sanitize_test.go @@ -0,0 +1,20 @@ +package service + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSanitizeOpenCodeText_RewritesCanonicalSentence(t *testing.T) { + in := "You are OpenCode, the best coding agent on the planet." + got := sanitizeOpenCodeText(in) + require.Equal(t, strings.TrimSpace(claudeCodeSystemPrompt), got) +} + +func TestSanitizeOpenCodeText_RewritesOpenCodeKeywords(t *testing.T) { + in := "OpenCode and opencode are mentioned." + got := sanitizeOpenCodeText(in) + require.Equal(t, "Claude Code and Claude are mentioned.", got) +} diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index c666c96a..e17d0f0c 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -559,6 +559,14 @@ func sanitizeOpenCodeText(text string) string { if text == "" { return text } + // Some clients include a fixed OpenCode identity sentence. Anthropic may treat + // this as a non-Claude-Code fingerprint, so rewrite it to the canonical + // Claude Code banner before generic "OpenCode"/"opencode" replacements. + text = strings.ReplaceAll( + text, + "You are OpenCode, the best coding agent on the planet.", + strings.TrimSpace(claudeCodeSystemPrompt), + ) text = strings.ReplaceAll(text, "OpenCode", "Claude Code") text = opencodeTextRe.ReplaceAllString(text, "Claude") return text From 63412a9fcc4f9569d8338cd5ce3befbfc13604a3 Mon Sep 17 00:00:00 2001 From: cyhhao Date: Thu, 29 Jan 2026 03:13:14 +0800 Subject: [PATCH 138/147] chore(debug): log Claude mimic fingerprint --- backend/internal/service/gateway_service.go | 128 ++++++++++++++++++++ 1 file changed, 128 insertions(+) diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index e17d0f0c..44abdb0a 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -50,6 +50,11 @@ func (s *GatewayService) debugModelRoutingEnabled() bool { return v == "1" || v == "true" || v == "yes" || v == "on" } +func (s *GatewayService) debugClaudeMimicEnabled() bool { + v := strings.ToLower(strings.TrimSpace(os.Getenv("SUB2API_DEBUG_CLAUDE_MIMIC"))) + return v == "1" || v == "true" || v == "yes" || v == "on" +} + func shortSessionHash(sessionHash string) string { if sessionHash == "" { return "" @@ -60,6 +65,121 @@ func shortSessionHash(sessionHash string) string { return sessionHash[:8] } +func redactAuthHeaderValue(v string) string { + v = strings.TrimSpace(v) + if v == "" { + return "" + } + // Keep scheme for debugging, redact secret. + if strings.HasPrefix(strings.ToLower(v), "bearer ") { + return "Bearer [redacted]" + } + return "[redacted]" +} + +func safeHeaderValueForLog(key string, v string) string { + key = strings.ToLower(strings.TrimSpace(key)) + switch key { + case "authorization", "x-api-key": + return redactAuthHeaderValue(v) + default: + return strings.TrimSpace(v) + } +} + +func extractSystemPreviewFromBody(body []byte) string { + if len(body) == 0 { + return "" + } + sys := gjson.GetBytes(body, "system") + if !sys.Exists() { + return "" + } + + switch { + case sys.IsArray(): + for _, item := range sys.Array() { + if !item.IsObject() { + continue + } + if strings.EqualFold(item.Get("type").String(), "text") { + if t := item.Get("text").String(); strings.TrimSpace(t) != "" { + return t + } + } + } + return "" + case sys.Type == gjson.String: + return sys.String() + default: + return "" + } +} + +func logClaudeMimicDebug(req *http.Request, body []byte, account *Account, tokenType string, mimicClaudeCode bool) { + if req == nil { + return + } + + // Only log a minimal fingerprint to avoid leaking user content. + interesting := []string{ + "user-agent", + "x-app", + "anthropic-dangerous-direct-browser-access", + "anthropic-version", + "anthropic-beta", + "x-stainless-lang", + "x-stainless-package-version", + "x-stainless-os", + "x-stainless-arch", + "x-stainless-runtime", + "x-stainless-runtime-version", + "x-stainless-retry-count", + "x-stainless-timeout", + "authorization", + "x-api-key", + "content-type", + "accept", + "x-stainless-helper-method", + } + + h := make([]string, 0, len(interesting)) + for _, k := range interesting { + if v := req.Header.Get(k); v != "" { + h = append(h, fmt.Sprintf("%s=%q", k, safeHeaderValueForLog(k, v))) + } + } + + metaUserID := strings.TrimSpace(gjson.GetBytes(body, "metadata.user_id").String()) + sysPreview := strings.TrimSpace(extractSystemPreviewFromBody(body)) + + // Truncate preview to keep logs sane. + if len(sysPreview) > 300 { + sysPreview = sysPreview[:300] + "..." + } + sysPreview = strings.ReplaceAll(sysPreview, "\n", "\\n") + sysPreview = strings.ReplaceAll(sysPreview, "\r", "\\r") + + aid := int64(0) + aname := "" + if account != nil { + aid = account.ID + aname = account.Name + } + + log.Printf( + "[ClaudeMimicDebug] url=%s account=%d(%s) tokenType=%s mimic=%t meta.user_id=%q system.preview=%q headers={%s}", + req.URL.String(), + aid, + aname, + tokenType, + mimicClaudeCode, + metaUserID, + sysPreview, + strings.Join(h, " "), + ) +} + // sseDataRe matches SSE data lines with optional whitespace after colon. // Some upstream APIs return non-standard "data:" without space (should be "data: "). var ( @@ -3264,6 +3384,10 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex } } + if s.debugClaudeMimicEnabled() { + logClaudeMimicDebug(req, body, account, tokenType, mimicClaudeCode) + } + return req, nil } @@ -4686,6 +4810,10 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con } } + if s.debugClaudeMimicEnabled() { + logClaudeMimicDebug(req, body, account, tokenType, mimicClaudeCode) + } + return req, nil } From 91079d3f15a66ecd9460daa122f6b8dc65c3957b Mon Sep 17 00:00:00 2001 From: cyhhao Date: Thu, 29 Jan 2026 15:17:46 +0800 Subject: [PATCH 139/147] chore(debug): emit Claude mimic fingerprint on credential-scope error --- backend/internal/service/gateway_service.go | 64 +++++++++++++++++++-- 1 file changed, 60 insertions(+), 4 deletions(-) diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 44abdb0a..b3bbfd94 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -45,6 +45,10 @@ const ( maxCacheControlBlocks = 4 // Anthropic API 允许的最大 cache_control 块数量 ) +const ( + claudeMimicDebugInfoKey = "claude_mimic_debug_info" +) + func (s *GatewayService) debugModelRoutingEnabled() bool { v := strings.ToLower(strings.TrimSpace(os.Getenv("SUB2API_DEBUG_MODEL_ROUTING"))) return v == "1" || v == "true" || v == "yes" || v == "on" @@ -116,9 +120,9 @@ func extractSystemPreviewFromBody(body []byte) string { } } -func logClaudeMimicDebug(req *http.Request, body []byte, account *Account, tokenType string, mimicClaudeCode bool) { +func buildClaudeMimicDebugLine(req *http.Request, body []byte, account *Account, tokenType string, mimicClaudeCode bool) string { if req == nil { - return + return "" } // Only log a minimal fingerprint to avoid leaking user content. @@ -167,8 +171,8 @@ func logClaudeMimicDebug(req *http.Request, body []byte, account *Account, token aname = account.Name } - log.Printf( - "[ClaudeMimicDebug] url=%s account=%d(%s) tokenType=%s mimic=%t meta.user_id=%q system.preview=%q headers={%s}", + return fmt.Sprintf( + "url=%s account=%d(%s) tokenType=%s mimic=%t meta.user_id=%q system.preview=%q headers={%s}", req.URL.String(), aid, aname, @@ -180,6 +184,23 @@ func logClaudeMimicDebug(req *http.Request, body []byte, account *Account, token ) } +func logClaudeMimicDebug(req *http.Request, body []byte, account *Account, tokenType string, mimicClaudeCode bool) { + line := buildClaudeMimicDebugLine(req, body, account, tokenType, mimicClaudeCode) + if line == "" { + return + } + log.Printf("[ClaudeMimicDebug] %s", line) +} + +func isClaudeCodeCredentialScopeError(msg string) bool { + m := strings.ToLower(strings.TrimSpace(msg)) + if m == "" { + return false + } + return strings.Contains(m, "only authorized for use with claude code") && + strings.Contains(m, "cannot be used for other api requests") +} + // sseDataRe matches SSE data lines with optional whitespace after colon. // Some upstream APIs return non-standard "data:" without space (should be "data: "). var ( @@ -3384,6 +3405,11 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex } } + // Always capture a compact fingerprint line for later error diagnostics. + // We only print it when needed (or when the explicit debug flag is enabled). + if c != nil && tokenType == "oauth" { + c.Set(claudeMimicDebugInfoKey, buildClaudeMimicDebugLine(req, body, account, tokenType, mimicClaudeCode)) + } if s.debugClaudeMimicEnabled() { logClaudeMimicDebug(req, body, account, tokenType, mimicClaudeCode) } @@ -3640,6 +3666,20 @@ func (s *GatewayService) handleErrorResponse(ctx context.Context, resp *http.Res upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(body)) upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + // Print a compact upstream request fingerprint when we hit the Claude Code OAuth + // credential scope error. This avoids requiring env-var tweaks in a fixed deploy. + if isClaudeCodeCredentialScopeError(upstreamMsg) && c != nil { + if v, ok := c.Get(claudeMimicDebugInfoKey); ok { + if line, ok := v.(string); ok && strings.TrimSpace(line) != "" { + log.Printf("[ClaudeMimicDebugOnError] status=%d request_id=%s %s", + resp.StatusCode, + resp.Header.Get("x-request-id"), + line, + ) + } + } + } + // Enrich Ops error logs with upstream status + message, and optionally a truncated body snippet. upstreamDetail := "" if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { @@ -3769,6 +3809,19 @@ func (s *GatewayService) handleRetryExhaustedError(ctx context.Context, resp *ht upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(respBody)) upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + + if isClaudeCodeCredentialScopeError(upstreamMsg) && c != nil { + if v, ok := c.Get(claudeMimicDebugInfoKey); ok { + if line, ok := v.(string); ok && strings.TrimSpace(line) != "" { + log.Printf("[ClaudeMimicDebugOnError] status=%d request_id=%s %s", + resp.StatusCode, + resp.Header.Get("x-request-id"), + line, + ) + } + } + } + upstreamDetail := "" if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes @@ -4810,6 +4863,9 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con } } + if c != nil && tokenType == "oauth" { + c.Set(claudeMimicDebugInfoKey, buildClaudeMimicDebugLine(req, body, account, tokenType, mimicClaudeCode)) + } if s.debugClaudeMimicEnabled() { logClaudeMimicDebug(req, body, account, tokenType, mimicClaudeCode) } From 8375094c69a1e70d8dfbe02303357081e2606166 Mon Sep 17 00:00:00 2001 From: cyhhao Date: Thu, 29 Jan 2026 15:31:29 +0800 Subject: [PATCH 140/147] fix(oauth): match Claude CLI accept header and beta set --- backend/internal/service/gateway_service.go | 31 +++++++++++++++++---- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index b3bbfd94..8363ba66 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -3385,12 +3385,12 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex applyClaudeCodeMimicHeaders(req, reqStream) incomingBeta := req.Header.Get("anthropic-beta") + // Match real Claude CLI traffic (per mitmproxy reports): + // messages requests typically use only oauth + interleaved-thinking. + // Also drop claude-code beta if a downstream client added it. requiredBetas := []string{claude.BetaOAuth, claude.BetaInterleavedThinking} - // Tools 场景更严格,保留 claude-code beta 以提高 Claude Code 识别成功率。 - if requestHasTools(body) { - requiredBetas = append([]string{claude.BetaClaudeCode}, requiredBetas...) - } - req.Header.Set("anthropic-beta", mergeAnthropicBeta(requiredBetas, incomingBeta)) + drop := map[string]struct{}{claude.BetaClaudeCode: {}} + req.Header.Set("anthropic-beta", mergeAnthropicBetaDropping(requiredBetas, incomingBeta, drop)) } else { // Claude Code 客户端:尽量透传原始 header,仅补齐 oauth beta clientBetaHeader := req.Header.Get("anthropic-beta") @@ -3542,6 +3542,25 @@ func mergeAnthropicBeta(required []string, incoming string) string { return strings.Join(out, ",") } +func mergeAnthropicBetaDropping(required []string, incoming string, drop map[string]struct{}) string { + merged := mergeAnthropicBeta(required, incoming) + if merged == "" || len(drop) == 0 { + return merged + } + out := make([]string, 0, 8) + for _, p := range strings.Split(merged, ",") { + p = strings.TrimSpace(p) + if p == "" { + continue + } + if _, ok := drop[p]; ok { + continue + } + out = append(out, p) + } + return strings.Join(out, ",") +} + // applyClaudeCodeMimicHeaders forces "Claude Code-like" request headers. // This mirrors opencode-anthropic-auth behavior: do not trust downstream // headers when using Claude Code-scoped OAuth credentials. @@ -3558,6 +3577,8 @@ func applyClaudeCodeMimicHeaders(req *http.Request, isStream bool) { } req.Header.Set(key, value) } + // Real Claude CLI uses Accept: application/json (even for streaming). + req.Header.Set("accept", "application/json") if isStream { req.Header.Set("x-stainless-helper-method", "stream") } From fa454b1b99f2c6866f756773c96f91cad8353173 Mon Sep 17 00:00:00 2001 From: cyhhao Date: Thu, 29 Jan 2026 15:37:07 +0800 Subject: [PATCH 141/147] fix: align Claude Code system banner with opencode latest --- backend/internal/service/gateway_service.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 8363ba66..47ea8593 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -39,9 +39,10 @@ const ( claudeAPICountTokensURL = "https://api.anthropic.com/v1/messages/count_tokens?beta=true" stickySessionTTL = time.Hour // 粘性会话TTL defaultMaxLineSize = 40 * 1024 * 1024 - // Keep a trailing blank line so that when upstream concatenates system strings, - // the injected Claude Code banner doesn't run into the next system instruction. - claudeCodeSystemPrompt = "You are Claude Code, Anthropic's official CLI for Claude.\n\n" + // Canonical Claude Code banner. Keep it EXACT (no trailing whitespace/newlines) + // to match real Claude CLI traffic as closely as possible. When we need a visual + // separator between system blocks, we add "\n\n" at concatenation time. + claudeCodeSystemPrompt = "You are Claude Code, Anthropic's official CLI for Claude." maxCacheControlBlocks = 4 // Anthropic API 允许的最大 cache_control 块数量 ) From ba16ace697c6b2b65ca6c4e84818f04dd28aeabd Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Fri, 30 Jan 2026 08:14:52 +0800 Subject: [PATCH 142/147] chore: upgrade Antigravity User-Agent to 1.15.8 --- backend/internal/pkg/antigravity/oauth.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/internal/pkg/antigravity/oauth.go b/backend/internal/pkg/antigravity/oauth.go index ee2a6c1a..c7d657b9 100644 --- a/backend/internal/pkg/antigravity/oauth.go +++ b/backend/internal/pkg/antigravity/oauth.go @@ -33,7 +33,7 @@ const ( "https://www.googleapis.com/auth/experimentsandconfigs" // User-Agent(与 Antigravity-Manager 保持一致) - UserAgent = "antigravity/1.11.9 windows/amd64" + UserAgent = "antigravity/1.15.8 windows/amd64" // Session 过期时间 SessionTTL = 30 * time.Minute From 6599b366dc17abe62fdd79683b7ee71a06888667 Mon Sep 17 00:00:00 2001 From: shaw Date: Fri, 30 Jan 2026 08:53:53 +0800 Subject: [PATCH 143/147] =?UTF-8?q?fix:=20=E5=8D=87=E7=BA=A7Go=E7=89=88?= =?UTF-8?q?=E6=9C=AC=E8=87=B31.25.6=E4=BF=AE=E5=A4=8D=E6=A0=87=E5=87=86?= =?UTF-8?q?=E5=BA=93=E5=AE=89=E5=85=A8=E6=BC=8F=E6=B4=9E?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 修复GO-2026-4341和GO-2026-4340两个标准库漏洞 --- .github/workflows/security-scan.yml | 2 +- Dockerfile | 2 +- backend/go.mod | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/security-scan.yml b/.github/workflows/security-scan.yml index 160a0df9..dfb8e37e 100644 --- a/.github/workflows/security-scan.yml +++ b/.github/workflows/security-scan.yml @@ -22,7 +22,7 @@ jobs: cache-dependency-path: backend/go.sum - name: Verify Go version run: | - go version | grep -q 'go1.25.5' + go version | grep -q 'go1.25.6' - name: Run govulncheck working-directory: backend run: | diff --git a/Dockerfile b/Dockerfile index b3320300..3d4b5094 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,7 +7,7 @@ # ============================================================================= ARG NODE_IMAGE=node:24-alpine -ARG GOLANG_IMAGE=golang:1.25.5-alpine +ARG GOLANG_IMAGE=golang:1.25.6-alpine ARG ALPINE_IMAGE=alpine:3.20 ARG GOPROXY=https://goproxy.cn,direct ARG GOSUMDB=sum.golang.google.cn diff --git a/backend/go.mod b/backend/go.mod index ad7d76b6..4c3e6246 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -1,6 +1,6 @@ module github.com/Wei-Shaw/sub2api -go 1.25.5 +go 1.25.6 require ( entgo.io/ent v0.14.5 From 4d8f2db92494a29b6b74d220493b02760c48befb Mon Sep 17 00:00:00 2001 From: shaw Date: Fri, 30 Jan 2026 08:57:37 +0800 Subject: [PATCH 144/147] =?UTF-8?q?fix:=20=E6=9B=B4=E6=96=B0=E6=89=80?= =?UTF-8?q?=E6=9C=89CI=20workflow=E7=9A=84Go=E7=89=88=E6=9C=AC=E9=AA=8C?= =?UTF-8?q?=E8=AF=81=E8=87=B31.25.6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .github/workflows/backend-ci.yml | 4 ++-- .github/workflows/release.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/backend-ci.yml b/.github/workflows/backend-ci.yml index 3ea8860a..e5624f86 100644 --- a/.github/workflows/backend-ci.yml +++ b/.github/workflows/backend-ci.yml @@ -19,7 +19,7 @@ jobs: cache: true - name: Verify Go version run: | - go version | grep -q 'go1.25.5' + go version | grep -q 'go1.25.6' - name: Unit tests working-directory: backend run: make test-unit @@ -38,7 +38,7 @@ jobs: cache: true - name: Verify Go version run: | - go version | grep -q 'go1.25.5' + go version | grep -q 'go1.25.6' - name: golangci-lint uses: golangci/golangci-lint-action@v9 with: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 0415000d..f45c1a0b 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -115,7 +115,7 @@ jobs: - name: Verify Go version run: | - go version | grep -q 'go1.25.5' + go version | grep -q 'go1.25.6' # Docker setup for GoReleaser - name: Set up QEMU From fe17058700a2664597ef02b27e6995e35a7804bc Mon Sep 17 00:00:00 2001 From: cyhhao Date: Sat, 31 Jan 2026 01:40:38 +0800 Subject: [PATCH 145/147] refactor: limit OpenCode keyword replacement to tool descriptions --- .../internal/service/gateway_sanitize_test.go | 6 ++--- backend/internal/service/gateway_service.go | 22 +++++++++++++++---- 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/backend/internal/service/gateway_sanitize_test.go b/backend/internal/service/gateway_sanitize_test.go index 3b0a07c9..a70c1a00 100644 --- a/backend/internal/service/gateway_sanitize_test.go +++ b/backend/internal/service/gateway_sanitize_test.go @@ -9,12 +9,12 @@ import ( func TestSanitizeOpenCodeText_RewritesCanonicalSentence(t *testing.T) { in := "You are OpenCode, the best coding agent on the planet." - got := sanitizeOpenCodeText(in) + got := sanitizeSystemText(in) require.Equal(t, strings.TrimSpace(claudeCodeSystemPrompt), got) } -func TestSanitizeOpenCodeText_RewritesOpenCodeKeywords(t *testing.T) { +func TestSanitizeToolText_RewritesOpenCodeKeywords(t *testing.T) { in := "OpenCode and opencode are mentioned." - got := sanitizeOpenCodeText(in) + got := sanitizeToolText(in) require.Equal(t, "Claude Code and Claude are mentioned.", got) } diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 47ea8593..40354e48 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -697,7 +697,10 @@ func normalizeParamNameForOpenCode(name string, cache map[string]string) string return name } -func sanitizeOpenCodeText(text string) string { +// sanitizeSystemText rewrites only the fixed OpenCode identity sentence (if present). +// We intentionally avoid broad keyword replacement in system prompts to prevent +// accidentally changing user-provided instructions. +func sanitizeSystemText(text string) string { if text == "" { return text } @@ -709,6 +712,17 @@ func sanitizeOpenCodeText(text string) string { "You are OpenCode, the best coding agent on the planet.", strings.TrimSpace(claudeCodeSystemPrompt), ) + return text +} + +// sanitizeToolText is intentionally more aggressive than sanitizeSystemText because +// tool descriptions are not user chat content, and some upstreams may flag "opencode" +// strings as non-Claude-Code fingerprints. +func sanitizeToolText(text string) string { + if text == "" { + return text + } + text = sanitizeSystemText(text) text = strings.ReplaceAll(text, "OpenCode", "Claude Code") text = opencodeTextRe.ReplaceAllString(text, "Claude") return text @@ -720,7 +734,7 @@ func sanitizeToolDescription(description string) string { } description = toolDescAbsPathRe.ReplaceAllString(description, "[path]") description = toolDescWinPathRe.ReplaceAllString(description, "[path]") - return sanitizeOpenCodeText(description) + return sanitizeToolText(description) } func normalizeToolInputSchema(inputSchema any, cache map[string]string) { @@ -795,7 +809,7 @@ func normalizeClaudeOAuthRequestBody(body []byte, modelID string, opts claudeOAu if system, ok := req["system"]; ok { switch v := system.(type) { case string: - sanitized := sanitizeOpenCodeText(v) + sanitized := sanitizeSystemText(v) if sanitized != v { req["system"] = sanitized } @@ -812,7 +826,7 @@ func normalizeClaudeOAuthRequestBody(body []byte, modelID string, opts claudeOAu if !ok || text == "" { continue } - sanitized := sanitizeOpenCodeText(text) + sanitized := sanitizeSystemText(text) if sanitized != text { block["text"] = sanitized } From 3a34746668f22bd7a96d8c29a84aeb2a08f88bef Mon Sep 17 00:00:00 2001 From: cyhhao Date: Sat, 31 Jan 2026 02:01:51 +0800 Subject: [PATCH 146/147] refactor: stop rewriting tool descriptions; keep only system sentence rewrite --- .../internal/service/gateway_sanitize_test.go | 7 ++++--- backend/internal/service/gateway_service.go | 17 +++-------------- 2 files changed, 7 insertions(+), 17 deletions(-) diff --git a/backend/internal/service/gateway_sanitize_test.go b/backend/internal/service/gateway_sanitize_test.go index a70c1a00..8fa971ca 100644 --- a/backend/internal/service/gateway_sanitize_test.go +++ b/backend/internal/service/gateway_sanitize_test.go @@ -13,8 +13,9 @@ func TestSanitizeOpenCodeText_RewritesCanonicalSentence(t *testing.T) { require.Equal(t, strings.TrimSpace(claudeCodeSystemPrompt), got) } -func TestSanitizeToolText_RewritesOpenCodeKeywords(t *testing.T) { +func TestSanitizeToolDescription_DoesNotRewriteKeywords(t *testing.T) { in := "OpenCode and opencode are mentioned." - got := sanitizeToolText(in) - require.Equal(t, "Claude Code and Claude are mentioned.", got) + got := sanitizeToolDescription(in) + // We no longer rewrite tool descriptions; only redact obvious path leaks. + require.Equal(t, in, got) } diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 40354e48..703804a4 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -715,26 +715,15 @@ func sanitizeSystemText(text string) string { return text } -// sanitizeToolText is intentionally more aggressive than sanitizeSystemText because -// tool descriptions are not user chat content, and some upstreams may flag "opencode" -// strings as non-Claude-Code fingerprints. -func sanitizeToolText(text string) string { - if text == "" { - return text - } - text = sanitizeSystemText(text) - text = strings.ReplaceAll(text, "OpenCode", "Claude Code") - text = opencodeTextRe.ReplaceAllString(text, "Claude") - return text -} - func sanitizeToolDescription(description string) string { if description == "" { return description } description = toolDescAbsPathRe.ReplaceAllString(description, "[path]") description = toolDescWinPathRe.ReplaceAllString(description, "[path]") - return sanitizeToolText(description) + // Intentionally do NOT rewrite tool descriptions (OpenCode/Claude strings). + // Tool names/skill names may rely on exact wording, and rewriting can be misleading. + return description } func normalizeToolInputSchema(inputSchema any, cache map[string]string) { From adb77af1d973ea5dcf234a0776187ef888a3514b Mon Sep 17 00:00:00 2001 From: cyhhao Date: Sat, 31 Jan 2026 02:07:57 +0800 Subject: [PATCH 147/147] fix: satisfy golangci-lint (nil checks, remove unused helpers) --- backend/internal/service/gateway_service.go | 35 +++++++++------------ 1 file changed, 15 insertions(+), 20 deletions(-) diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 703804a4..3d39e37c 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -215,7 +215,6 @@ var ( modelFieldRe = regexp.MustCompile(`"model"\s*:\s*"([^"]+)"`) toolDescAbsPathRe = regexp.MustCompile(`/\/?(?:home|Users|tmp|var|opt|usr|etc)\/[^\s,\)"'\]]+`) toolDescWinPathRe = regexp.MustCompile(`(?i)[A-Z]:\\[^\s,\)"'\]]+`) - opencodeTextRe = regexp.MustCompile(`(?i)opencode`) claudeToolNameOverrides = map[string]string{ "bash": "Bash", @@ -3320,11 +3319,16 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex } } + clientHeaders := http.Header{} + if c != nil && c.Request != nil { + clientHeaders = c.Request.Header + } + // OAuth账号:应用统一指纹 var fingerprint *Fingerprint if account.IsOAuth() && s.identityService != nil { // 1. 获取或创建指纹(包含随机生成的ClientID) - fp, err := s.identityService.GetOrCreateFingerprint(ctx, account.ID, c.Request.Header) + fp, err := s.identityService.GetOrCreateFingerprint(ctx, account.ID, clientHeaders) if err != nil { log.Printf("Warning: failed to get fingerprint for account %d: %v", account.ID, err) // 失败时降级为透传原始headers @@ -3355,7 +3359,7 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex } // 白名单透传headers - for key, values := range c.Request.Header { + for key, values := range clientHeaders { lowerKey := strings.ToLower(key) if allowedHeaders[lowerKey] { for _, v := range values { @@ -3479,20 +3483,6 @@ func requestNeedsBetaFeatures(body []byte) bool { return false } -func requestHasTools(body []byte) bool { - tools := gjson.GetBytes(body, "tools") - if !tools.Exists() { - return false - } - if tools.IsArray() { - return len(tools.Array()) > 0 - } - if tools.IsObject() { - return len(tools.Map()) > 0 - } - return false -} - func defaultAPIKeyBetaHeader(body []byte) string { modelID := gjson.GetBytes(body, "model").String() if strings.Contains(strings.ToLower(modelID), "haiku") { @@ -4804,10 +4794,15 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con } } + clientHeaders := http.Header{} + if c != nil && c.Request != nil { + clientHeaders = c.Request.Header + } + // OAuth 账号:应用统一指纹和重写 userID // 如果启用了会话ID伪装,会在重写后替换 session 部分为固定值 if account.IsOAuth() && s.identityService != nil { - fp, err := s.identityService.GetOrCreateFingerprint(ctx, account.ID, c.Request.Header) + fp, err := s.identityService.GetOrCreateFingerprint(ctx, account.ID, clientHeaders) if err == nil { accountUUID := account.GetExtraString("account_uuid") if accountUUID != "" && fp.ClientID != "" { @@ -4831,7 +4826,7 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con } // 白名单透传 headers - for key, values := range c.Request.Header { + for key, values := range clientHeaders { lowerKey := strings.ToLower(key) if allowedHeaders[lowerKey] { for _, v := range values { @@ -4842,7 +4837,7 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con // OAuth 账号:应用指纹到请求头 if account.IsOAuth() && s.identityService != nil { - fp, _ := s.identityService.GetOrCreateFingerprint(ctx, account.ID, c.Request.Header) + fp, _ := s.identityService.GetOrCreateFingerprint(ctx, account.ID, clientHeaders) if fp != nil { s.identityService.ApplyFingerprint(req, fp) }