mirror of
https://gitee.com/wanwujie/sub2api
synced 2026-04-06 00:10:21 +08:00
Compare commits
63 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
21f349c032 | ||
|
|
28e36f7925 | ||
|
|
6c02076333 | ||
|
|
7414bdf0e3 | ||
|
|
e6326b2929 | ||
|
|
17cdcebd04 | ||
|
|
a14babdc73 | ||
|
|
aadc6a763a | ||
|
|
f16af8bf88 | ||
|
|
5ceaef4500 | ||
|
|
1ac7219a92 | ||
|
|
d4cc9871c4 | ||
|
|
961c30e7c0 | ||
|
|
13e85b3147 | ||
|
|
50a3c7fa0b | ||
|
|
2005fc97a8 | ||
|
|
0772d9250e | ||
|
|
045cba78b4 | ||
|
|
8989d0d4b6 | ||
|
|
c521117b99 | ||
|
|
e0f52a8ab8 | ||
|
|
6c23fadf7e | ||
|
|
869952d113 | ||
|
|
07ab051ee4 | ||
|
|
f2d98fc0c7 | ||
|
|
2b41cec840 | ||
|
|
6cf77040e7 | ||
|
|
20b70bc5fd | ||
|
|
4905e7193a | ||
|
|
9c1f4b8e72 | ||
|
|
9857c17631 | ||
|
|
7e34bb946f | ||
|
|
47b748851b | ||
|
|
a6f99cf534 | ||
|
|
a120a6bc32 | ||
|
|
d557d1a190 | ||
|
|
e0286e5085 | ||
|
|
4b41e898a4 | ||
|
|
668e164793 | ||
|
|
fa2e6188d0 | ||
|
|
7fde9ebbc2 | ||
|
|
aef7c3b9bb | ||
|
|
a0b76bd608 | ||
|
|
c1fab7f8d8 | ||
|
|
f42c8f2abe | ||
|
|
aa5846b282 | ||
|
|
594a0ade38 | ||
|
|
d45cc23171 | ||
|
|
d795734352 | ||
|
|
4da9fdd1d5 | ||
|
|
6b218caa21 | ||
|
|
5c138007d0 | ||
|
|
1acfc46f46 | ||
|
|
fbffb08aae | ||
|
|
8640a62319 | ||
|
|
fa782e70a4 | ||
|
|
afd72abc6e | ||
|
|
71f72e167e | ||
|
|
6595c7601e | ||
|
|
67c0506290 | ||
|
|
6447be4534 | ||
|
|
3741617ebd | ||
|
|
ab4e8b2cf0 |
7
.gitattributes
vendored
7
.gitattributes
vendored
@@ -4,6 +4,13 @@ backend/migrations/*.sql text eol=lf
|
||||
# Go 源代码文件
|
||||
*.go text eol=lf
|
||||
|
||||
# 前端 源代码文件
|
||||
*.ts text eol=lf
|
||||
*.tsx text eol=lf
|
||||
*.js text eol=lf
|
||||
*.jsx text eol=lf
|
||||
*.vue text eol=lf
|
||||
|
||||
# Shell 脚本
|
||||
*.sh text eol=lf
|
||||
|
||||
|
||||
@@ -47,6 +47,8 @@ dockers:
|
||||
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:latest"
|
||||
dockerfile: Dockerfile.goreleaser
|
||||
use: buildx
|
||||
extra_files:
|
||||
- deploy/docker-entrypoint.sh
|
||||
build_flag_templates:
|
||||
- "--platform=linux/amd64"
|
||||
- "--label=org.opencontainers.image.version={{ .Version }}"
|
||||
|
||||
@@ -63,6 +63,8 @@ dockers:
|
||||
- "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-amd64"
|
||||
dockerfile: Dockerfile.goreleaser
|
||||
use: buildx
|
||||
extra_files:
|
||||
- deploy/docker-entrypoint.sh
|
||||
build_flag_templates:
|
||||
- "--platform=linux/amd64"
|
||||
- "--label=org.opencontainers.image.version={{ .Version }}"
|
||||
@@ -76,6 +78,8 @@ dockers:
|
||||
- "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-arm64"
|
||||
dockerfile: Dockerfile.goreleaser
|
||||
use: buildx
|
||||
extra_files:
|
||||
- deploy/docker-entrypoint.sh
|
||||
build_flag_templates:
|
||||
- "--platform=linux/arm64"
|
||||
- "--label=org.opencontainers.image.version={{ .Version }}"
|
||||
@@ -89,6 +93,8 @@ dockers:
|
||||
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-amd64"
|
||||
dockerfile: Dockerfile.goreleaser
|
||||
use: buildx
|
||||
extra_files:
|
||||
- deploy/docker-entrypoint.sh
|
||||
build_flag_templates:
|
||||
- "--platform=linux/amd64"
|
||||
- "--label=org.opencontainers.image.version={{ .Version }}"
|
||||
@@ -102,6 +108,8 @@ dockers:
|
||||
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-arm64"
|
||||
dockerfile: Dockerfile.goreleaser
|
||||
use: buildx
|
||||
extra_files:
|
||||
- deploy/docker-entrypoint.sh
|
||||
build_flag_templates:
|
||||
- "--platform=linux/arm64"
|
||||
- "--label=org.opencontainers.image.version={{ .Version }}"
|
||||
|
||||
11
Dockerfile
11
Dockerfile
@@ -92,6 +92,7 @@ LABEL org.opencontainers.image.source="https://github.com/Wei-Shaw/sub2api"
|
||||
RUN apk add --no-cache \
|
||||
ca-certificates \
|
||||
tzdata \
|
||||
su-exec \
|
||||
libpq \
|
||||
zstd-libs \
|
||||
lz4-libs \
|
||||
@@ -120,8 +121,9 @@ COPY --from=backend-builder --chown=sub2api:sub2api /app/backend/resources /app/
|
||||
# Create data directory
|
||||
RUN mkdir -p /app/data && chown sub2api:sub2api /app/data
|
||||
|
||||
# Switch to non-root user
|
||||
USER sub2api
|
||||
# Copy entrypoint script (fixes volume permissions then drops to sub2api)
|
||||
COPY deploy/docker-entrypoint.sh /app/docker-entrypoint.sh
|
||||
RUN chmod +x /app/docker-entrypoint.sh
|
||||
|
||||
# Expose port (can be overridden by SERVER_PORT env var)
|
||||
EXPOSE 8080
|
||||
@@ -130,5 +132,6 @@ EXPOSE 8080
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=10s --retries=3 \
|
||||
CMD wget -q -T 5 -O /dev/null http://localhost:${SERVER_PORT:-8080}/health || exit 1
|
||||
|
||||
# Run the application
|
||||
ENTRYPOINT ["/app/sub2api"]
|
||||
# Run the application (entrypoint fixes /app/data ownership then execs as sub2api)
|
||||
ENTRYPOINT ["/app/docker-entrypoint.sh"]
|
||||
CMD ["/app/sub2api"]
|
||||
|
||||
@@ -21,6 +21,7 @@ RUN apk add --no-cache \
|
||||
ca-certificates \
|
||||
tzdata \
|
||||
curl \
|
||||
su-exec \
|
||||
libpq \
|
||||
zstd-libs \
|
||||
lz4-libs \
|
||||
@@ -47,11 +48,15 @@ COPY sub2api /app/sub2api
|
||||
# Create data directory
|
||||
RUN mkdir -p /app/data && chown -R sub2api:sub2api /app
|
||||
|
||||
USER sub2api
|
||||
# Copy entrypoint script (fixes volume permissions then drops to sub2api)
|
||||
COPY deploy/docker-entrypoint.sh /app/docker-entrypoint.sh
|
||||
RUN chmod +x /app/docker-entrypoint.sh
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=10s --retries=3 \
|
||||
CMD curl -f http://localhost:${SERVER_PORT:-8080}/health || exit 1
|
||||
|
||||
ENTRYPOINT ["/app/sub2api"]
|
||||
# Run the application (entrypoint fixes /app/data ownership then execs as sub2api)
|
||||
ENTRYPOINT ["/app/docker-entrypoint.sh"]
|
||||
CMD ["/app/sub2api"]
|
||||
|
||||
30
README.md
30
README.md
@@ -8,27 +8,31 @@
|
||||
[](https://redis.io/)
|
||||
[](https://www.docker.com/)
|
||||
|
||||
<a href="https://trendshift.io/repositories/21823" target="_blank"><img src="https://trendshift.io/api/badge/repositories/21823" alt="Wei-Shaw%2Fsub2api | Trendshift" width="250" height="55"/></a>
|
||||
|
||||
**AI API Gateway Platform for Subscription Quota Distribution**
|
||||
|
||||
English | [中文](README_CN.md)
|
||||
|
||||
</div>
|
||||
|
||||
> **Sub2API officially uses only the domains `sub2api.org` and `pincc.ai`. Other websites using the Sub2API name may be third-party deployments or services and are not affiliated with this project. Please verify and exercise your own judgment.**
|
||||
|
||||
---
|
||||
|
||||
## Demo
|
||||
|
||||
Try Sub2API online: **https://demo.sub2api.org/**
|
||||
Try Sub2API online: **[https://demo.sub2api.org/](https://demo.sub2api.org/)**
|
||||
|
||||
Demo credentials (shared demo environment; **not** created automatically for self-hosted installs):
|
||||
|
||||
| Email | Password |
|
||||
|-------|----------|
|
||||
| admin@sub2api.com | admin123 |
|
||||
| admin@sub2api.org | admin123 |
|
||||
|
||||
## Overview
|
||||
|
||||
Sub2API is an AI API gateway platform designed to distribute and manage API quotas from AI product subscriptions (like Claude Code $200/month). Users can access upstream AI services through platform-generated API Keys, while the platform handles authentication, billing, load balancing, and request forwarding.
|
||||
Sub2API is an AI API gateway platform designed to distribute and manage API quotas from AI product subscriptions. Users can access upstream AI services through platform-generated API Keys, while the platform handles authentication, billing, load balancing, and request forwarding.
|
||||
|
||||
## Features
|
||||
|
||||
@@ -41,6 +45,15 @@ Sub2API is an AI API gateway platform designed to distribute and manage API quot
|
||||
- **Admin Dashboard** - Web interface for monitoring and management
|
||||
- **External System Integration** - Embed external systems (e.g. payment, ticketing) via iframe to extend the admin dashboard
|
||||
|
||||
## Don't Want to Self-Host?
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td width="180" align="center" valign="middle"><a href="https://shop.pincc.ai/"><img src="assets/partners/logos/pincc-logo.png" alt="pincc" width="120"></a></td>
|
||||
<td valign="middle"><b><a href="https://shop.pincc.ai/">PinCC</a></b> is the official relay service built on Sub2API, offering stable access to Claude Code, Codex, Gemini and other popular models — ready to use, no deployment or maintenance required.</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
## Ecosystem
|
||||
|
||||
Community projects that extend or integrate with Sub2API:
|
||||
@@ -61,10 +74,15 @@ Community projects that extend or integrate with Sub2API:
|
||||
|
||||
---
|
||||
|
||||
## Documentation
|
||||
## Nginx Reverse Proxy Note
|
||||
|
||||
- Dependency Security: `docs/dependency-security.md`
|
||||
- Admin Payment Integration API: `docs/ADMIN_PAYMENT_INTEGRATION_API.md`
|
||||
When using Nginx as a reverse proxy for Sub2API (or CRS) with Codex CLI, add the following to the `http` block in your Nginx configuration:
|
||||
|
||||
```nginx
|
||||
underscores_in_headers on;
|
||||
```
|
||||
|
||||
Nginx drops headers containing underscores by default (e.g. `session_id`), which breaks sticky session routing in multi-account setups.
|
||||
|
||||
---
|
||||
|
||||
|
||||
33
README_CN.md
33
README_CN.md
@@ -8,27 +8,30 @@
|
||||
[](https://redis.io/)
|
||||
[](https://www.docker.com/)
|
||||
|
||||
<a href="https://trendshift.io/repositories/21823" target="_blank"><img src="https://trendshift.io/api/badge/repositories/21823" alt="Wei-Shaw%2Fsub2api | Trendshift" width="250" height="55"/></a>
|
||||
|
||||
**AI API 网关平台 - 订阅配额分发管理**
|
||||
|
||||
[English](README.md) | 中文
|
||||
|
||||
</div>
|
||||
|
||||
> **Sub2API 官方仅使用 `sub2api.org` 与 `pincc.ai` 两个域名。其他使用 Sub2API 名义的网站可能为第三方部署或服务,与本项目无关,请自行甄别。**
|
||||
---
|
||||
|
||||
## 在线体验
|
||||
|
||||
体验地址:**https://v2.pincc.ai/**
|
||||
体验地址:**[https://demo.sub2api.org/](https://demo.sub2api.org/)**
|
||||
|
||||
演示账号(共享演示环境;自建部署不会自动创建该账号):
|
||||
|
||||
| 邮箱 | 密码 |
|
||||
|------|------|
|
||||
| admin@sub2api.com | admin123 |
|
||||
| admin@sub2api.org | admin123 |
|
||||
|
||||
## 项目概述
|
||||
|
||||
Sub2API 是一个 AI API 网关平台,用于分发和管理 AI 产品订阅(如 Claude Code $200/月)的 API 配额。用户通过平台生成的 API Key 调用上游 AI 服务,平台负责鉴权、计费、负载均衡和请求转发。
|
||||
Sub2API 是一个 AI API 网关平台,用于分发和管理 AI 产品订阅的 API 配额。用户通过平台生成的 API Key 调用上游 AI 服务,平台负责鉴权、计费、负载均衡和请求转发。
|
||||
|
||||
## 核心功能
|
||||
|
||||
@@ -41,6 +44,15 @@ Sub2API 是一个 AI API 网关平台,用于分发和管理 AI 产品订阅(
|
||||
- **管理后台** - Web 界面进行监控和管理
|
||||
- **外部系统集成** - 支持通过 iframe 嵌入外部系统(如支付、工单等),扩展管理后台功能
|
||||
|
||||
## 不想自建?试试官方中转
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td width="180" align="center" valign="middle"><a href="https://shop.pincc.ai/"><img src="assets/partners/logos/pincc-logo.png" alt="pincc" width="120"></a></td>
|
||||
<td valign="middle"><b><a href="https://shop.pincc.ai/">PinCC</a></b> 是基于 Sub2API 搭建的官方中转服务,提供 Claude Code、Codex、Gemini 等主流模型的稳定中转,开箱即用,免去自建部署与运维烦恼。</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
## 生态项目
|
||||
|
||||
围绕 Sub2API 的社区扩展与集成项目:
|
||||
@@ -61,17 +73,18 @@ Sub2API 是一个 AI API 网关平台,用于分发和管理 AI 产品订阅(
|
||||
|
||||
---
|
||||
|
||||
## 文档
|
||||
## Nginx 反向代理注意事项
|
||||
|
||||
- 依赖安全:`docs/dependency-security.md`
|
||||
通过 Nginx 反向代理 Sub2API(或 CRS 服务)并搭配 Codex CLI 使用时,需要在 Nginx 配置的 `http` 块中添加:
|
||||
|
||||
```nginx
|
||||
underscores_in_headers on;
|
||||
```
|
||||
|
||||
Nginx 默认会丢弃名称中含下划线的请求头(如 `session_id`),这会导致多账号环境下的粘性会话功能失效。
|
||||
|
||||
---
|
||||
|
||||
## OpenAI Responses 兼容注意事项
|
||||
|
||||
- 当请求包含 `function_call_output` 时,需要携带 `previous_response_id`,或在 `input` 中包含带 `call_id` 的 `tool_call`/`function_call`,或带非空 `id` 且与 `function_call_output.call_id` 匹配的 `item_reference`。
|
||||
- 若依赖上游历史记录,网关会强制 `store=true` 并需要复用 `previous_response_id`,以避免出现 “No tool call found for function call output” 错误。
|
||||
|
||||
## 部署方式
|
||||
|
||||
### 方式一:脚本安装(推荐)
|
||||
|
||||
BIN
assets/partners/logos/pincc-logo.png
Normal file
BIN
assets/partners/logos/pincc-logo.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 171 KiB |
@@ -110,7 +110,6 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
||||
concurrencyCache := repository.ProvideConcurrencyCache(redisClient, configConfig)
|
||||
concurrencyService := service.ProvideConcurrencyService(concurrencyCache, accountRepository, configConfig)
|
||||
adminUserHandler := admin.NewUserHandler(adminService, concurrencyService)
|
||||
groupHandler := admin.NewGroupHandler(adminService)
|
||||
claudeOAuthClient := repository.NewClaudeOAuthClient()
|
||||
oAuthService := service.NewOAuthService(proxyRepository, claudeOAuthClient)
|
||||
openAIOAuthClient := repository.NewOpenAIOAuthClient()
|
||||
@@ -143,6 +142,8 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
||||
crsSyncService := service.NewCRSSyncService(accountRepository, proxyRepository, oAuthService, openAIOAuthService, geminiOAuthService, configConfig)
|
||||
sessionLimitCache := repository.ProvideSessionLimitCache(redisClient, configConfig)
|
||||
rpmCache := repository.NewRPMCache(redisClient)
|
||||
groupCapacityService := service.NewGroupCapacityService(accountRepository, groupRepository, concurrencyService, sessionLimitCache, rpmCache)
|
||||
groupHandler := admin.NewGroupHandler(adminService, dashboardService, groupCapacityService)
|
||||
accountHandler := admin.NewAccountHandler(adminService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, rateLimitService, accountUsageService, accountTestService, concurrencyService, crsSyncService, sessionLimitCache, rpmCache, compositeTokenCacheInvalidator)
|
||||
adminAnnouncementHandler := admin.NewAnnouncementHandler(announcementService)
|
||||
dataManagementService := service.NewDataManagementService()
|
||||
|
||||
@@ -17,7 +17,7 @@ func setupAdminRouter() (*gin.Engine, *stubAdminService) {
|
||||
adminSvc := newStubAdminService()
|
||||
|
||||
userHandler := NewUserHandler(adminSvc, nil)
|
||||
groupHandler := NewGroupHandler(adminSvc)
|
||||
groupHandler := NewGroupHandler(adminSvc, nil, nil)
|
||||
proxyHandler := NewProxyHandler(adminSvc)
|
||||
redeemHandler := NewRedeemHandler(adminSvc, nil)
|
||||
|
||||
|
||||
@@ -98,12 +98,12 @@ func (h *BackupHandler) CreateBackup(c *gin.Context) {
|
||||
expireDays = *req.ExpireDays
|
||||
}
|
||||
|
||||
record, err := h.backupService.CreateBackup(c.Request.Context(), "manual", expireDays)
|
||||
record, err := h.backupService.StartBackup(c.Request.Context(), "manual", expireDays)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, record)
|
||||
response.Accepted(c, record)
|
||||
}
|
||||
|
||||
func (h *BackupHandler) ListBackups(c *gin.Context) {
|
||||
@@ -196,9 +196,10 @@ func (h *BackupHandler) RestoreBackup(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.backupService.RestoreBackup(c.Request.Context(), backupID); err != nil {
|
||||
record, err := h.backupService.StartRestore(c.Request.Context(), backupID)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, gin.H{"restored": true})
|
||||
response.Accepted(c, record)
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/response"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/timezone"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/usagestats"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
@@ -604,3 +605,41 @@ func (h *DashboardHandler) GetBatchAPIKeysUsage(c *gin.Context) {
|
||||
c.Header("X-Snapshot-Cache", "miss")
|
||||
response.Success(c, payload)
|
||||
}
|
||||
|
||||
// GetUserBreakdown handles getting per-user usage breakdown within a dimension.
|
||||
// GET /api/v1/admin/dashboard/user-breakdown
|
||||
// Query params: start_date, end_date, group_id, model, endpoint, endpoint_type, limit
|
||||
func (h *DashboardHandler) GetUserBreakdown(c *gin.Context) {
|
||||
startTime, endTime := parseTimeRange(c)
|
||||
|
||||
dim := usagestats.UserBreakdownDimension{}
|
||||
if v := c.Query("group_id"); v != "" {
|
||||
if id, err := strconv.ParseInt(v, 10, 64); err == nil {
|
||||
dim.GroupID = id
|
||||
}
|
||||
}
|
||||
dim.Model = c.Query("model")
|
||||
dim.Endpoint = c.Query("endpoint")
|
||||
dim.EndpointType = c.DefaultQuery("endpoint_type", "inbound")
|
||||
|
||||
limit := 50
|
||||
if v := c.Query("limit"); v != "" {
|
||||
if n, err := strconv.Atoi(v); err == nil && n > 0 && n <= 200 {
|
||||
limit = n
|
||||
}
|
||||
}
|
||||
|
||||
stats, err := h.dashboardService.GetUserBreakdownStats(
|
||||
c.Request.Context(), startTime, endTime, dim, limit,
|
||||
)
|
||||
if err != nil {
|
||||
response.Error(c, 500, "Failed to get user breakdown stats")
|
||||
return
|
||||
}
|
||||
|
||||
response.Success(c, gin.H{
|
||||
"users": stats,
|
||||
"start_date": startTime.Format("2006-01-02"),
|
||||
"end_date": endTime.Add(-24 * time.Hour).Format("2006-01-02"),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -0,0 +1,203 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/usagestats"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// --- mock repo ---
|
||||
|
||||
type userBreakdownRepoCapture struct {
|
||||
service.UsageLogRepository
|
||||
capturedDim usagestats.UserBreakdownDimension
|
||||
capturedLimit int
|
||||
result []usagestats.UserBreakdownItem
|
||||
}
|
||||
|
||||
func (r *userBreakdownRepoCapture) GetUserBreakdownStats(
|
||||
_ context.Context, _, _ time.Time,
|
||||
dim usagestats.UserBreakdownDimension, limit int,
|
||||
) ([]usagestats.UserBreakdownItem, error) {
|
||||
r.capturedDim = dim
|
||||
r.capturedLimit = limit
|
||||
if r.result != nil {
|
||||
return r.result, nil
|
||||
}
|
||||
return []usagestats.UserBreakdownItem{}, nil
|
||||
}
|
||||
|
||||
func newUserBreakdownRouter(repo *userBreakdownRepoCapture) *gin.Engine {
|
||||
gin.SetMode(gin.TestMode)
|
||||
svc := service.NewDashboardService(repo, nil, nil, nil)
|
||||
h := NewDashboardHandler(svc, nil)
|
||||
router := gin.New()
|
||||
router.GET("/admin/dashboard/user-breakdown", h.GetUserBreakdown)
|
||||
return router
|
||||
}
|
||||
|
||||
// --- tests ---
|
||||
|
||||
func TestGetUserBreakdown_GroupIDFilter(t *testing.T) {
|
||||
repo := &userBreakdownRepoCapture{}
|
||||
router := newUserBreakdownRouter(repo)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet,
|
||||
"/admin/dashboard/user-breakdown?start_date=2026-03-01&end_date=2026-03-16&group_id=42", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
require.Equal(t, http.StatusOK, w.Code)
|
||||
require.Equal(t, int64(42), repo.capturedDim.GroupID)
|
||||
require.Empty(t, repo.capturedDim.Model)
|
||||
require.Empty(t, repo.capturedDim.Endpoint)
|
||||
require.Equal(t, 50, repo.capturedLimit) // default limit
|
||||
}
|
||||
|
||||
func TestGetUserBreakdown_ModelFilter(t *testing.T) {
|
||||
repo := &userBreakdownRepoCapture{}
|
||||
router := newUserBreakdownRouter(repo)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet,
|
||||
"/admin/dashboard/user-breakdown?start_date=2026-03-01&end_date=2026-03-16&model=claude-opus-4-6", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
require.Equal(t, http.StatusOK, w.Code)
|
||||
require.Equal(t, "claude-opus-4-6", repo.capturedDim.Model)
|
||||
require.Equal(t, int64(0), repo.capturedDim.GroupID)
|
||||
}
|
||||
|
||||
func TestGetUserBreakdown_EndpointFilter(t *testing.T) {
|
||||
repo := &userBreakdownRepoCapture{}
|
||||
router := newUserBreakdownRouter(repo)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet,
|
||||
"/admin/dashboard/user-breakdown?start_date=2026-03-01&end_date=2026-03-16&endpoint=/v1/messages&endpoint_type=upstream", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
require.Equal(t, http.StatusOK, w.Code)
|
||||
require.Equal(t, "/v1/messages", repo.capturedDim.Endpoint)
|
||||
require.Equal(t, "upstream", repo.capturedDim.EndpointType)
|
||||
}
|
||||
|
||||
func TestGetUserBreakdown_DefaultEndpointType(t *testing.T) {
|
||||
repo := &userBreakdownRepoCapture{}
|
||||
router := newUserBreakdownRouter(repo)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet,
|
||||
"/admin/dashboard/user-breakdown?start_date=2026-03-01&end_date=2026-03-16&endpoint=/chat", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
require.Equal(t, http.StatusOK, w.Code)
|
||||
require.Equal(t, "inbound", repo.capturedDim.EndpointType)
|
||||
}
|
||||
|
||||
func TestGetUserBreakdown_CustomLimit(t *testing.T) {
|
||||
repo := &userBreakdownRepoCapture{}
|
||||
router := newUserBreakdownRouter(repo)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet,
|
||||
"/admin/dashboard/user-breakdown?start_date=2026-03-01&end_date=2026-03-16&model=test&limit=100", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
require.Equal(t, http.StatusOK, w.Code)
|
||||
require.Equal(t, 100, repo.capturedLimit)
|
||||
}
|
||||
|
||||
func TestGetUserBreakdown_LimitClamped(t *testing.T) {
|
||||
repo := &userBreakdownRepoCapture{}
|
||||
router := newUserBreakdownRouter(repo)
|
||||
|
||||
// limit > 200 should fall back to default 50
|
||||
req := httptest.NewRequest(http.MethodGet,
|
||||
"/admin/dashboard/user-breakdown?start_date=2026-03-01&end_date=2026-03-16&model=test&limit=999", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
require.Equal(t, http.StatusOK, w.Code)
|
||||
require.Equal(t, 50, repo.capturedLimit)
|
||||
}
|
||||
|
||||
func TestGetUserBreakdown_ResponseFormat(t *testing.T) {
|
||||
repo := &userBreakdownRepoCapture{
|
||||
result: []usagestats.UserBreakdownItem{
|
||||
{UserID: 1, Email: "alice@test.com", Requests: 100, TotalTokens: 50000, Cost: 1.5, ActualCost: 1.2},
|
||||
{UserID: 2, Email: "bob@test.com", Requests: 50, TotalTokens: 25000, Cost: 0.8, ActualCost: 0.6},
|
||||
},
|
||||
}
|
||||
router := newUserBreakdownRouter(repo)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet,
|
||||
"/admin/dashboard/user-breakdown?start_date=2026-03-01&end_date=2026-03-16&group_id=1", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
require.Equal(t, http.StatusOK, w.Code)
|
||||
|
||||
var resp struct {
|
||||
Code int `json:"code"`
|
||||
Data struct {
|
||||
Users []usagestats.UserBreakdownItem `json:"users"`
|
||||
StartDate string `json:"start_date"`
|
||||
EndDate string `json:"end_date"`
|
||||
} `json:"data"`
|
||||
}
|
||||
err := json.Unmarshal(w.Body.Bytes(), &resp)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, resp.Code)
|
||||
require.Len(t, resp.Data.Users, 2)
|
||||
require.Equal(t, int64(1), resp.Data.Users[0].UserID)
|
||||
require.Equal(t, "alice@test.com", resp.Data.Users[0].Email)
|
||||
require.Equal(t, int64(100), resp.Data.Users[0].Requests)
|
||||
require.InDelta(t, 1.2, resp.Data.Users[0].ActualCost, 0.001)
|
||||
require.Equal(t, "2026-03-01", resp.Data.StartDate)
|
||||
require.Equal(t, "2026-03-16", resp.Data.EndDate)
|
||||
}
|
||||
|
||||
func TestGetUserBreakdown_EmptyResult(t *testing.T) {
|
||||
repo := &userBreakdownRepoCapture{}
|
||||
router := newUserBreakdownRouter(repo)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet,
|
||||
"/admin/dashboard/user-breakdown?start_date=2026-03-01&end_date=2026-03-16&group_id=999", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
require.Equal(t, http.StatusOK, w.Code)
|
||||
|
||||
var resp struct {
|
||||
Data struct {
|
||||
Users []usagestats.UserBreakdownItem `json:"users"`
|
||||
} `json:"data"`
|
||||
}
|
||||
err := json.Unmarshal(w.Body.Bytes(), &resp)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, resp.Data.Users)
|
||||
}
|
||||
|
||||
func TestGetUserBreakdown_NoFilters(t *testing.T) {
|
||||
repo := &userBreakdownRepoCapture{}
|
||||
router := newUserBreakdownRouter(repo)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet,
|
||||
"/admin/dashboard/user-breakdown?start_date=2026-03-01&end_date=2026-03-16", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
require.Equal(t, http.StatusOK, w.Code)
|
||||
require.Equal(t, int64(0), repo.capturedDim.GroupID)
|
||||
require.Empty(t, repo.capturedDim.Model)
|
||||
require.Empty(t, repo.capturedDim.Endpoint)
|
||||
}
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
|
||||
"github.com/Wei-Shaw/sub2api/internal/handler/dto"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/response"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/timezone"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
@@ -16,7 +17,9 @@ import (
|
||||
|
||||
// GroupHandler handles admin group management
|
||||
type GroupHandler struct {
|
||||
adminService service.AdminService
|
||||
adminService service.AdminService
|
||||
dashboardService *service.DashboardService
|
||||
groupCapacityService *service.GroupCapacityService
|
||||
}
|
||||
|
||||
type optionalLimitField struct {
|
||||
@@ -69,9 +72,11 @@ func (f optionalLimitField) ToServiceInput() *float64 {
|
||||
}
|
||||
|
||||
// NewGroupHandler creates a new admin group handler
|
||||
func NewGroupHandler(adminService service.AdminService) *GroupHandler {
|
||||
func NewGroupHandler(adminService service.AdminService, dashboardService *service.DashboardService, groupCapacityService *service.GroupCapacityService) *GroupHandler {
|
||||
return &GroupHandler{
|
||||
adminService: adminService,
|
||||
adminService: adminService,
|
||||
dashboardService: dashboardService,
|
||||
groupCapacityService: groupCapacityService,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -363,6 +368,33 @@ func (h *GroupHandler) GetStats(c *gin.Context) {
|
||||
_ = groupID // TODO: implement actual stats
|
||||
}
|
||||
|
||||
// GetUsageSummary returns today's and cumulative cost for all groups.
|
||||
// GET /api/v1/admin/groups/usage-summary?timezone=Asia/Shanghai
|
||||
func (h *GroupHandler) GetUsageSummary(c *gin.Context) {
|
||||
userTZ := c.Query("timezone")
|
||||
now := timezone.NowInUserLocation(userTZ)
|
||||
todayStart := timezone.StartOfDayInUserLocation(now, userTZ)
|
||||
|
||||
results, err := h.dashboardService.GetGroupUsageSummary(c.Request.Context(), todayStart)
|
||||
if err != nil {
|
||||
response.Error(c, 500, "Failed to get group usage summary")
|
||||
return
|
||||
}
|
||||
|
||||
response.Success(c, results)
|
||||
}
|
||||
|
||||
// GetCapacitySummary returns aggregated capacity (concurrency/sessions/RPM) for all active groups.
|
||||
// GET /api/v1/admin/groups/capacity-summary
|
||||
func (h *GroupHandler) GetCapacitySummary(c *gin.Context) {
|
||||
results, err := h.groupCapacityService.GetAllGroupCapacity(c.Request.Context())
|
||||
if err != nil {
|
||||
response.Error(c, 500, "Failed to get group capacity summary")
|
||||
return
|
||||
}
|
||||
response.Success(c, results)
|
||||
}
|
||||
|
||||
// GetGroupAPIKeys handles getting API keys in a group
|
||||
// GET /api/v1/admin/groups/:id/api-keys
|
||||
func (h *GroupHandler) GetGroupAPIKeys(c *gin.Context) {
|
||||
|
||||
@@ -77,12 +77,13 @@ func (h *SubscriptionHandler) List(c *gin.Context) {
|
||||
}
|
||||
}
|
||||
status := c.Query("status")
|
||||
platform := c.Query("platform")
|
||||
|
||||
// Parse sorting parameters
|
||||
sortBy := c.DefaultQuery("sort_by", "created_at")
|
||||
sortOrder := c.DefaultQuery("sort_order", "desc")
|
||||
|
||||
subscriptions, pagination, err := h.subscriptionService.List(c.Request.Context(), page, pageSize, userID, groupID, status, sortBy, sortOrder)
|
||||
subscriptions, pagination, err := h.subscriptionService.List(c.Request.Context(), page, pageSize, userID, groupID, status, platform, sortBy, sortOrder)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
|
||||
@@ -135,14 +135,16 @@ func GroupFromServiceAdmin(g *service.Group) *AdminGroup {
|
||||
return nil
|
||||
}
|
||||
out := &AdminGroup{
|
||||
Group: groupFromServiceBase(g),
|
||||
ModelRouting: g.ModelRouting,
|
||||
ModelRoutingEnabled: g.ModelRoutingEnabled,
|
||||
MCPXMLInject: g.MCPXMLInject,
|
||||
DefaultMappedModel: g.DefaultMappedModel,
|
||||
SupportedModelScopes: g.SupportedModelScopes,
|
||||
AccountCount: g.AccountCount,
|
||||
SortOrder: g.SortOrder,
|
||||
Group: groupFromServiceBase(g),
|
||||
ModelRouting: g.ModelRouting,
|
||||
ModelRoutingEnabled: g.ModelRoutingEnabled,
|
||||
MCPXMLInject: g.MCPXMLInject,
|
||||
DefaultMappedModel: g.DefaultMappedModel,
|
||||
SupportedModelScopes: g.SupportedModelScopes,
|
||||
AccountCount: g.AccountCount,
|
||||
ActiveAccountCount: g.ActiveAccountCount,
|
||||
RateLimitedAccountCount: g.RateLimitedAccountCount,
|
||||
SortOrder: g.SortOrder,
|
||||
}
|
||||
if len(g.AccountGroups) > 0 {
|
||||
out.AccountGroups = make([]AccountGroup, 0, len(g.AccountGroups))
|
||||
|
||||
@@ -122,9 +122,11 @@ type AdminGroup struct {
|
||||
DefaultMappedModel string `json:"default_mapped_model"`
|
||||
|
||||
// 支持的模型系列(仅 antigravity 平台使用)
|
||||
SupportedModelScopes []string `json:"supported_model_scopes"`
|
||||
AccountGroups []AccountGroup `json:"account_groups,omitempty"`
|
||||
AccountCount int64 `json:"account_count,omitempty"`
|
||||
SupportedModelScopes []string `json:"supported_model_scopes"`
|
||||
AccountGroups []AccountGroup `json:"account_groups,omitempty"`
|
||||
AccountCount int64 `json:"account_count,omitempty"`
|
||||
ActiveAccountCount int64 `json:"active_account_count,omitempty"`
|
||||
RateLimitedAccountCount int64 `json:"rate_limited_account_count,omitempty"`
|
||||
|
||||
// 分组排序
|
||||
SortOrder int `json:"sort_order"`
|
||||
|
||||
@@ -76,7 +76,7 @@ func (f *fakeGroupRepo) ListActiveByPlatform(context.Context, string) ([]service
|
||||
return nil, nil
|
||||
}
|
||||
func (f *fakeGroupRepo) ExistsByName(context.Context, string) (bool, error) { return false, nil }
|
||||
func (f *fakeGroupRepo) GetAccountCount(context.Context, int64) (int64, error) { return 0, nil }
|
||||
func (f *fakeGroupRepo) GetAccountCount(context.Context, int64) (int64, int64, error) { return 0, 0, nil }
|
||||
func (f *fakeGroupRepo) DeleteAccountGroupsByGroupID(context.Context, int64) (int64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
@@ -136,7 +136,7 @@ func validClaudeCodeBodyJSON() []byte {
|
||||
return []byte(`{
|
||||
"model":"claude-3-5-sonnet-20241022",
|
||||
"system":[{"text":"You are Claude Code, Anthropic's official CLI for Claude."}],
|
||||
"metadata":{"user_id":"user_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa_account__session_abc-123"}
|
||||
"metadata":{"user_id":"user_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa_account__session_aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"}
|
||||
}`)
|
||||
}
|
||||
|
||||
@@ -190,7 +190,7 @@ func TestSetClaudeCodeClientContext_ReuseParsedRequestAndContextCache(t *testing
|
||||
System: []any{
|
||||
map[string]any{"text": "You are Claude Code, Anthropic's official CLI for Claude."},
|
||||
},
|
||||
MetadataUserID: "user_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa_account__session_abc-123",
|
||||
MetadataUserID: "user_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa_account__session_aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
|
||||
}
|
||||
|
||||
// body 非法 JSON,如果函数复用 parsedReq 成功则仍应判定为 Claude Code。
|
||||
@@ -209,7 +209,7 @@ func TestSetClaudeCodeClientContext_ReuseParsedRequestAndContextCache(t *testing
|
||||
"system": []any{
|
||||
map[string]any{"text": "You are Claude Code, Anthropic's official CLI for Claude."},
|
||||
},
|
||||
"metadata": map[string]any{"user_id": "user_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa_account__session_abc-123"},
|
||||
"metadata": map[string]any{"user_id": "user_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa_account__session_aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"},
|
||||
})
|
||||
|
||||
SetClaudeCodeClientContext(c, []byte(`{invalid`), nil)
|
||||
|
||||
@@ -273,8 +273,8 @@ func (r *stubGroupRepo) ListActiveByPlatform(ctx context.Context, platform strin
|
||||
func (r *stubGroupRepo) ExistsByName(ctx context.Context, name string) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
func (r *stubGroupRepo) GetAccountCount(ctx context.Context, groupID int64) (int64, error) {
|
||||
return 0, nil
|
||||
func (r *stubGroupRepo) GetAccountCount(ctx context.Context, groupID int64) (int64, int64, error) {
|
||||
return 0, 0, nil
|
||||
}
|
||||
func (r *stubGroupRepo) DeleteAccountGroupsByGroupID(ctx context.Context, groupID int64) (int64, error) {
|
||||
return 0, nil
|
||||
@@ -345,6 +345,12 @@ func (s *stubUsageLogRepo) GetUpstreamEndpointStatsWithFilters(ctx context.Conte
|
||||
func (s *stubUsageLogRepo) GetGroupStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, requestType *int16, stream *bool, billingType *int8) ([]usagestats.GroupStat, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (s *stubUsageLogRepo) GetUserBreakdownStats(ctx context.Context, startTime, endTime time.Time, dim usagestats.UserBreakdownDimension, limit int) ([]usagestats.UserBreakdownItem, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (s *stubUsageLogRepo) GetAllGroupUsageSummary(ctx context.Context, todayStart time.Time) ([]usagestats.GroupUsageSummary, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (s *stubUsageLogRepo) GetAPIKeyUsageTrend(ctx context.Context, startTime, endTime time.Time, granularity string, limit int) ([]usagestats.APIKeyUsageTrendPoint, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -49,8 +49,8 @@ const (
|
||||
antigravityDailyBaseURL = "https://daily-cloudcode-pa.sandbox.googleapis.com"
|
||||
)
|
||||
|
||||
// defaultUserAgentVersion 可通过环境变量 ANTIGRAVITY_USER_AGENT_VERSION 配置,默认 1.20.4
|
||||
var defaultUserAgentVersion = "1.20.4"
|
||||
// defaultUserAgentVersion 可通过环境变量 ANTIGRAVITY_USER_AGENT_VERSION 配置,默认 1.20.5
|
||||
var defaultUserAgentVersion = "1.20.5"
|
||||
|
||||
// defaultClientSecret 可通过环境变量 ANTIGRAVITY_OAUTH_CLIENT_SECRET 配置
|
||||
var defaultClientSecret = "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"
|
||||
|
||||
@@ -690,7 +690,7 @@ func TestConstants_值正确(t *testing.T) {
|
||||
if RedirectURI != "http://localhost:8085/callback" {
|
||||
t.Errorf("RedirectURI 不匹配: got %s", RedirectURI)
|
||||
}
|
||||
if GetUserAgent() != "antigravity/1.20.4 windows/amd64" {
|
||||
if GetUserAgent() != "antigravity/1.20.5 windows/amd64" {
|
||||
t.Errorf("UserAgent 不匹配: got %s", GetUserAgent())
|
||||
}
|
||||
if SessionTTL != 30*time.Minute {
|
||||
|
||||
@@ -47,6 +47,15 @@ func Created(c *gin.Context, data any) {
|
||||
})
|
||||
}
|
||||
|
||||
// Accepted 返回异步接受响应 (HTTP 202)
|
||||
func Accepted(c *gin.Context, data any) {
|
||||
c.JSON(http.StatusAccepted, Response{
|
||||
Code: 0,
|
||||
Message: "accepted",
|
||||
Data: data,
|
||||
})
|
||||
}
|
||||
|
||||
// Error 返回错误响应
|
||||
func Error(c *gin.Context, statusCode int, message string) {
|
||||
c.JSON(statusCode, Response{
|
||||
|
||||
@@ -90,6 +90,13 @@ type EndpointStat struct {
|
||||
ActualCost float64 `json:"actual_cost"` // 实际扣除
|
||||
}
|
||||
|
||||
// GroupUsageSummary represents today's and cumulative cost for a single group.
|
||||
type GroupUsageSummary struct {
|
||||
GroupID int64 `json:"group_id"`
|
||||
TodayCost float64 `json:"today_cost"`
|
||||
TotalCost float64 `json:"total_cost"`
|
||||
}
|
||||
|
||||
// GroupStat represents usage statistics for a single group
|
||||
type GroupStat struct {
|
||||
GroupID int64 `json:"group_id"`
|
||||
@@ -129,6 +136,24 @@ type UserSpendingRankingResponse struct {
|
||||
TotalTokens int64 `json:"total_tokens"`
|
||||
}
|
||||
|
||||
// UserBreakdownItem represents per-user usage breakdown within a dimension (group, model, endpoint).
|
||||
type UserBreakdownItem struct {
|
||||
UserID int64 `json:"user_id"`
|
||||
Email string `json:"email"`
|
||||
Requests int64 `json:"requests"`
|
||||
TotalTokens int64 `json:"total_tokens"`
|
||||
Cost float64 `json:"cost"` // 标准计费
|
||||
ActualCost float64 `json:"actual_cost"` // 实际扣除
|
||||
}
|
||||
|
||||
// UserBreakdownDimension specifies the dimension to filter for user breakdown.
|
||||
type UserBreakdownDimension struct {
|
||||
GroupID int64 // filter by group_id (>0 to enable)
|
||||
Model string // filter by model name (non-empty to enable)
|
||||
Endpoint string // filter by endpoint value (non-empty to enable)
|
||||
EndpointType string // "inbound", "upstream", or "path"
|
||||
}
|
||||
|
||||
// APIKeyUsageTrendPoint represents API key usage trend data point
|
||||
type APIKeyUsageTrendPoint struct {
|
||||
Date string `json:"date"`
|
||||
|
||||
@@ -57,6 +57,7 @@ func NewS3BackupStoreFactory() service.BackupObjectStoreFactory {
|
||||
|
||||
func (s *S3BackupStore) Upload(ctx context.Context, key string, body io.Reader, contentType string) (int64, error) {
|
||||
// 读取全部内容以获取大小(S3 PutObject 需要知道内容长度)
|
||||
// 注意:阿里云 OSS 不兼容 s3manager 分片上传的签名方式,因此使用 PutObject
|
||||
data, err := io.ReadAll(body)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("read body: %w", err)
|
||||
|
||||
@@ -20,6 +20,11 @@ const (
|
||||
billingCacheTTL = 5 * time.Minute
|
||||
billingCacheJitter = 30 * time.Second
|
||||
rateLimitCacheTTL = 7 * 24 * time.Hour // 7 days matches the longest window
|
||||
|
||||
// Rate limit window durations — must match service.RateLimitWindow* constants.
|
||||
rateLimitWindow5h = 5 * time.Hour
|
||||
rateLimitWindow1d = 24 * time.Hour
|
||||
rateLimitWindow7d = 7 * 24 * time.Hour
|
||||
)
|
||||
|
||||
// jitteredTTL 返回带随机抖动的 TTL,防止缓存雪崩
|
||||
@@ -90,17 +95,40 @@ var (
|
||||
return 1
|
||||
`)
|
||||
|
||||
// updateRateLimitUsageScript atomically increments all three rate limit usage counters.
|
||||
// Returns 0 if the key doesn't exist (cache miss), 1 on success.
|
||||
// updateRateLimitUsageScript atomically increments all three rate limit usage counters
|
||||
// with window expiration checking. If a window has expired, its usage is reset to cost
|
||||
// (instead of accumulated) and the window timestamp is updated, matching the DB-side
|
||||
// IncrementRateLimitUsage semantics.
|
||||
//
|
||||
// ARGV: [1]=cost, [2]=ttl_seconds, [3]=now_unix, [4]=window_5h_seconds, [5]=window_1d_seconds, [6]=window_7d_seconds
|
||||
updateRateLimitUsageScript = redis.NewScript(`
|
||||
local exists = redis.call('EXISTS', KEYS[1])
|
||||
if exists == 0 then
|
||||
return 0
|
||||
end
|
||||
local cost = tonumber(ARGV[1])
|
||||
redis.call('HINCRBYFLOAT', KEYS[1], 'usage_5h', cost)
|
||||
redis.call('HINCRBYFLOAT', KEYS[1], 'usage_1d', cost)
|
||||
redis.call('HINCRBYFLOAT', KEYS[1], 'usage_7d', cost)
|
||||
local now = tonumber(ARGV[3])
|
||||
local win5h = tonumber(ARGV[4])
|
||||
local win1d = tonumber(ARGV[5])
|
||||
local win7d = tonumber(ARGV[6])
|
||||
|
||||
-- Helper: check if window is expired and update usage + window accordingly
|
||||
-- Returns nothing, modifies the hash in-place.
|
||||
local function update_window(usage_field, window_field, window_duration)
|
||||
local w = tonumber(redis.call('HGET', KEYS[1], window_field) or 0)
|
||||
if w == 0 or (now - w) >= window_duration then
|
||||
-- Window expired or never started: reset usage to cost, start new window
|
||||
redis.call('HSET', KEYS[1], usage_field, tostring(cost))
|
||||
redis.call('HSET', KEYS[1], window_field, tostring(now))
|
||||
else
|
||||
-- Window still valid: accumulate
|
||||
redis.call('HINCRBYFLOAT', KEYS[1], usage_field, cost)
|
||||
end
|
||||
end
|
||||
|
||||
update_window('usage_5h', 'window_5h', win5h)
|
||||
update_window('usage_1d', 'window_1d', win1d)
|
||||
update_window('usage_7d', 'window_7d', win7d)
|
||||
redis.call('EXPIRE', KEYS[1], ARGV[2])
|
||||
return 1
|
||||
`)
|
||||
@@ -280,7 +308,15 @@ func (c *billingCache) SetAPIKeyRateLimit(ctx context.Context, keyID int64, data
|
||||
|
||||
func (c *billingCache) UpdateAPIKeyRateLimitUsage(ctx context.Context, keyID int64, cost float64) error {
|
||||
key := billingRateLimitKey(keyID)
|
||||
_, err := updateRateLimitUsageScript.Run(ctx, c.rdb, []string{key}, cost, int(rateLimitCacheTTL.Seconds())).Result()
|
||||
now := time.Now().Unix()
|
||||
_, err := updateRateLimitUsageScript.Run(ctx, c.rdb, []string{key},
|
||||
cost,
|
||||
int(rateLimitCacheTTL.Seconds()),
|
||||
now,
|
||||
int(rateLimitWindow5h.Seconds()),
|
||||
int(rateLimitWindow1d.Seconds()),
|
||||
int(rateLimitWindow7d.Seconds()),
|
||||
).Result()
|
||||
if err != nil && !errors.Is(err, redis.Nil) {
|
||||
log.Printf("Warning: update rate limit usage cache failed for api key %d: %v", keyID, err)
|
||||
return err
|
||||
|
||||
@@ -88,8 +88,9 @@ func (r *groupRepository) GetByID(ctx context.Context, id int64) (*service.Group
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
count, _ := r.GetAccountCount(ctx, out.ID)
|
||||
out.AccountCount = count
|
||||
total, active, _ := r.GetAccountCount(ctx, out.ID)
|
||||
out.AccountCount = total
|
||||
out.ActiveAccountCount = active
|
||||
return out, nil
|
||||
}
|
||||
|
||||
@@ -256,7 +257,10 @@ func (r *groupRepository) ListWithFilters(ctx context.Context, params pagination
|
||||
counts, err := r.loadAccountCounts(ctx, groupIDs)
|
||||
if err == nil {
|
||||
for i := range outGroups {
|
||||
outGroups[i].AccountCount = counts[outGroups[i].ID]
|
||||
c := counts[outGroups[i].ID]
|
||||
outGroups[i].AccountCount = c.Total
|
||||
outGroups[i].ActiveAccountCount = c.Active
|
||||
outGroups[i].RateLimitedAccountCount = c.RateLimited
|
||||
}
|
||||
}
|
||||
|
||||
@@ -283,7 +287,10 @@ func (r *groupRepository) ListActive(ctx context.Context) ([]service.Group, erro
|
||||
counts, err := r.loadAccountCounts(ctx, groupIDs)
|
||||
if err == nil {
|
||||
for i := range outGroups {
|
||||
outGroups[i].AccountCount = counts[outGroups[i].ID]
|
||||
c := counts[outGroups[i].ID]
|
||||
outGroups[i].AccountCount = c.Total
|
||||
outGroups[i].ActiveAccountCount = c.Active
|
||||
outGroups[i].RateLimitedAccountCount = c.RateLimited
|
||||
}
|
||||
}
|
||||
|
||||
@@ -310,7 +317,10 @@ func (r *groupRepository) ListActiveByPlatform(ctx context.Context, platform str
|
||||
counts, err := r.loadAccountCounts(ctx, groupIDs)
|
||||
if err == nil {
|
||||
for i := range outGroups {
|
||||
outGroups[i].AccountCount = counts[outGroups[i].ID]
|
||||
c := counts[outGroups[i].ID]
|
||||
outGroups[i].AccountCount = c.Total
|
||||
outGroups[i].ActiveAccountCount = c.Active
|
||||
outGroups[i].RateLimitedAccountCount = c.RateLimited
|
||||
}
|
||||
}
|
||||
|
||||
@@ -369,12 +379,20 @@ func (r *groupRepository) ExistsByIDs(ctx context.Context, ids []int64) (map[int
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (r *groupRepository) GetAccountCount(ctx context.Context, groupID int64) (int64, error) {
|
||||
var count int64
|
||||
if err := scanSingleRow(ctx, r.sql, "SELECT COUNT(*) FROM account_groups WHERE group_id = $1", []any{groupID}, &count); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return count, nil
|
||||
func (r *groupRepository) GetAccountCount(ctx context.Context, groupID int64) (total int64, active int64, err error) {
|
||||
var rateLimited int64
|
||||
err = scanSingleRow(ctx, r.sql,
|
||||
`SELECT COUNT(*),
|
||||
COUNT(*) FILTER (WHERE a.status = 'active' AND a.schedulable = true),
|
||||
COUNT(*) FILTER (WHERE a.status = 'active' AND (
|
||||
a.rate_limit_reset_at > NOW() OR
|
||||
a.overload_until > NOW() OR
|
||||
a.temp_unschedulable_until > NOW()
|
||||
))
|
||||
FROM account_groups ag JOIN accounts a ON a.id = ag.account_id
|
||||
WHERE ag.group_id = $1`,
|
||||
[]any{groupID}, &total, &active, &rateLimited)
|
||||
return
|
||||
}
|
||||
|
||||
func (r *groupRepository) DeleteAccountGroupsByGroupID(ctx context.Context, groupID int64) (int64, error) {
|
||||
@@ -500,15 +518,32 @@ func (r *groupRepository) DeleteCascade(ctx context.Context, id int64) ([]int64,
|
||||
return affectedUserIDs, nil
|
||||
}
|
||||
|
||||
func (r *groupRepository) loadAccountCounts(ctx context.Context, groupIDs []int64) (counts map[int64]int64, err error) {
|
||||
counts = make(map[int64]int64, len(groupIDs))
|
||||
type groupAccountCounts struct {
|
||||
Total int64
|
||||
Active int64
|
||||
RateLimited int64
|
||||
}
|
||||
|
||||
func (r *groupRepository) loadAccountCounts(ctx context.Context, groupIDs []int64) (counts map[int64]groupAccountCounts, err error) {
|
||||
counts = make(map[int64]groupAccountCounts, len(groupIDs))
|
||||
if len(groupIDs) == 0 {
|
||||
return counts, nil
|
||||
}
|
||||
|
||||
rows, err := r.sql.QueryContext(
|
||||
ctx,
|
||||
"SELECT group_id, COUNT(*) FROM account_groups WHERE group_id = ANY($1) GROUP BY group_id",
|
||||
`SELECT ag.group_id,
|
||||
COUNT(*) AS total,
|
||||
COUNT(*) FILTER (WHERE a.status = 'active' AND a.schedulable = true) AS active,
|
||||
COUNT(*) FILTER (WHERE a.status = 'active' AND (
|
||||
a.rate_limit_reset_at > NOW() OR
|
||||
a.overload_until > NOW() OR
|
||||
a.temp_unschedulable_until > NOW()
|
||||
)) AS rate_limited
|
||||
FROM account_groups ag
|
||||
JOIN accounts a ON a.id = ag.account_id
|
||||
WHERE ag.group_id = ANY($1)
|
||||
GROUP BY ag.group_id`,
|
||||
pq.Array(groupIDs),
|
||||
)
|
||||
if err != nil {
|
||||
@@ -523,11 +558,11 @@ func (r *groupRepository) loadAccountCounts(ctx context.Context, groupIDs []int6
|
||||
|
||||
for rows.Next() {
|
||||
var groupID int64
|
||||
var count int64
|
||||
if err = rows.Scan(&groupID, &count); err != nil {
|
||||
var c groupAccountCounts
|
||||
if err = rows.Scan(&groupID, &c.Total, &c.Active, &c.RateLimited); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
counts[groupID] = count
|
||||
counts[groupID] = c
|
||||
}
|
||||
if err = rows.Err(); err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -603,7 +603,7 @@ func (s *GroupRepoSuite) TestGetAccountCount() {
|
||||
_, err = s.tx.ExecContext(s.ctx, "INSERT INTO account_groups (account_id, group_id, priority, created_at) VALUES ($1, $2, $3, NOW())", a2, group.ID, 2)
|
||||
s.Require().NoError(err)
|
||||
|
||||
count, err := s.repo.GetAccountCount(s.ctx, group.ID)
|
||||
count, _, err := s.repo.GetAccountCount(s.ctx, group.ID)
|
||||
s.Require().NoError(err, "GetAccountCount")
|
||||
s.Require().Equal(int64(2), count)
|
||||
}
|
||||
@@ -619,7 +619,7 @@ func (s *GroupRepoSuite) TestGetAccountCount_Empty() {
|
||||
}
|
||||
s.Require().NoError(s.repo.Create(s.ctx, group))
|
||||
|
||||
count, err := s.repo.GetAccountCount(s.ctx, group.ID)
|
||||
count, _, err := s.repo.GetAccountCount(s.ctx, group.ID)
|
||||
s.Require().NoError(err)
|
||||
s.Require().Zero(count)
|
||||
}
|
||||
@@ -651,7 +651,7 @@ func (s *GroupRepoSuite) TestDeleteAccountGroupsByGroupID() {
|
||||
s.Require().NoError(err, "DeleteAccountGroupsByGroupID")
|
||||
s.Require().Equal(int64(1), affected, "expected 1 affected row")
|
||||
|
||||
count, err := s.repo.GetAccountCount(s.ctx, g.ID)
|
||||
count, _, err := s.repo.GetAccountCount(s.ctx, g.ID)
|
||||
s.Require().NoError(err, "GetAccountCount")
|
||||
s.Require().Equal(int64(0), count, "expected 0 account groups")
|
||||
}
|
||||
@@ -692,7 +692,7 @@ func (s *GroupRepoSuite) TestDeleteAccountGroupsByGroupID_MultipleAccounts() {
|
||||
s.Require().NoError(err)
|
||||
s.Require().Equal(int64(3), affected)
|
||||
|
||||
count, _ := s.repo.GetAccountCount(s.ctx, g.ID)
|
||||
count, _, _ := s.repo.GetAccountCount(s.ctx, g.ID)
|
||||
s.Require().Zero(count)
|
||||
}
|
||||
|
||||
|
||||
@@ -3000,6 +3000,120 @@ func (r *usageLogRepository) GetGroupStatsWithFilters(ctx context.Context, start
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// GetUserBreakdownStats returns per-user usage breakdown within a specific dimension.
|
||||
func (r *usageLogRepository) GetUserBreakdownStats(ctx context.Context, startTime, endTime time.Time, dim usagestats.UserBreakdownDimension, limit int) (results []usagestats.UserBreakdownItem, err error) {
|
||||
query := `
|
||||
SELECT
|
||||
COALESCE(ul.user_id, 0) as user_id,
|
||||
COALESCE(u.email, '') as email,
|
||||
COUNT(*) as requests,
|
||||
COALESCE(SUM(ul.input_tokens + ul.output_tokens + ul.cache_creation_tokens + ul.cache_read_tokens), 0) as total_tokens,
|
||||
COALESCE(SUM(ul.total_cost), 0) as cost,
|
||||
COALESCE(SUM(ul.actual_cost), 0) as actual_cost
|
||||
FROM usage_logs ul
|
||||
LEFT JOIN users u ON u.id = ul.user_id
|
||||
WHERE ul.created_at >= $1 AND ul.created_at < $2
|
||||
`
|
||||
args := []any{startTime, endTime}
|
||||
|
||||
if dim.GroupID > 0 {
|
||||
query += fmt.Sprintf(" AND ul.group_id = $%d", len(args)+1)
|
||||
args = append(args, dim.GroupID)
|
||||
}
|
||||
if dim.Model != "" {
|
||||
query += fmt.Sprintf(" AND ul.model = $%d", len(args)+1)
|
||||
args = append(args, dim.Model)
|
||||
}
|
||||
if dim.Endpoint != "" {
|
||||
col := resolveEndpointColumn(dim.EndpointType)
|
||||
query += fmt.Sprintf(" AND %s = $%d", col, len(args)+1)
|
||||
args = append(args, dim.Endpoint)
|
||||
}
|
||||
|
||||
query += " GROUP BY ul.user_id, u.email ORDER BY actual_cost DESC"
|
||||
if limit > 0 {
|
||||
query += fmt.Sprintf(" LIMIT %d", limit)
|
||||
}
|
||||
|
||||
rows, err := r.sql.QueryContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if closeErr := rows.Close(); closeErr != nil && err == nil {
|
||||
err = closeErr
|
||||
results = nil
|
||||
}
|
||||
}()
|
||||
|
||||
results = make([]usagestats.UserBreakdownItem, 0)
|
||||
for rows.Next() {
|
||||
var row usagestats.UserBreakdownItem
|
||||
if err := rows.Scan(
|
||||
&row.UserID,
|
||||
&row.Email,
|
||||
&row.Requests,
|
||||
&row.TotalTokens,
|
||||
&row.Cost,
|
||||
&row.ActualCost,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results = append(results, row)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// GetAllGroupUsageSummary returns today's and cumulative actual_cost for every group.
|
||||
// todayStart is the start-of-day in the caller's timezone (UTC-based).
|
||||
// TODO(perf): This query scans ALL usage_logs rows for total_cost aggregation.
|
||||
// When usage_logs exceeds ~1M rows, consider adding a short-lived cache (30s)
|
||||
// or a materialized view / pre-aggregation table for cumulative costs.
|
||||
func (r *usageLogRepository) GetAllGroupUsageSummary(ctx context.Context, todayStart time.Time) ([]usagestats.GroupUsageSummary, error) {
|
||||
query := `
|
||||
SELECT
|
||||
g.id AS group_id,
|
||||
COALESCE(SUM(ul.actual_cost), 0) AS total_cost,
|
||||
COALESCE(SUM(CASE WHEN ul.created_at >= $1 THEN ul.actual_cost ELSE 0 END), 0) AS today_cost
|
||||
FROM groups g
|
||||
LEFT JOIN usage_logs ul ON ul.group_id = g.id
|
||||
GROUP BY g.id
|
||||
`
|
||||
|
||||
rows, err := r.sql.QueryContext(ctx, query, todayStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
var results []usagestats.GroupUsageSummary
|
||||
for rows.Next() {
|
||||
var row usagestats.GroupUsageSummary
|
||||
if err := rows.Scan(&row.GroupID, &row.TotalCost, &row.TodayCost); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results = append(results, row)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// resolveEndpointColumn maps endpoint type to the corresponding DB column name.
|
||||
func resolveEndpointColumn(endpointType string) string {
|
||||
switch endpointType {
|
||||
case "upstream":
|
||||
return "ul.upstream_endpoint"
|
||||
case "path":
|
||||
return "ul.inbound_endpoint || ' -> ' || ul.upstream_endpoint"
|
||||
default:
|
||||
return "ul.inbound_endpoint"
|
||||
}
|
||||
}
|
||||
|
||||
// GetGlobalStats gets usage statistics for all users within a time range
|
||||
func (r *usageLogRepository) GetGlobalStats(ctx context.Context, startTime, endTime time.Time) (*UsageStats, error) {
|
||||
query := `
|
||||
|
||||
29
backend/internal/repository/usage_log_repo_breakdown_test.go
Normal file
29
backend/internal/repository/usage_log_repo_breakdown_test.go
Normal file
@@ -0,0 +1,29 @@
|
||||
//go:build unit
|
||||
|
||||
package repository
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestResolveEndpointColumn(t *testing.T) {
|
||||
tests := []struct {
|
||||
endpointType string
|
||||
want string
|
||||
}{
|
||||
{"inbound", "ul.inbound_endpoint"},
|
||||
{"upstream", "ul.upstream_endpoint"},
|
||||
{"path", "ul.inbound_endpoint || ' -> ' || ul.upstream_endpoint"},
|
||||
{"", "ul.inbound_endpoint"}, // default
|
||||
{"unknown", "ul.inbound_endpoint"}, // fallback
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.endpointType, func(t *testing.T) {
|
||||
got := resolveEndpointColumn(tc.endpointType)
|
||||
require.Equal(t, tc.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"time"
|
||||
|
||||
dbent "github.com/Wei-Shaw/sub2api/ent"
|
||||
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||
"github.com/Wei-Shaw/sub2api/ent/usersubscription"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/pagination"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
@@ -190,7 +191,7 @@ func (r *userSubscriptionRepository) ListByGroupID(ctx context.Context, groupID
|
||||
return userSubscriptionEntitiesToService(subs), paginationResultFromTotal(int64(total), params), nil
|
||||
}
|
||||
|
||||
func (r *userSubscriptionRepository) List(ctx context.Context, params pagination.PaginationParams, userID, groupID *int64, status, sortBy, sortOrder string) ([]service.UserSubscription, *pagination.PaginationResult, error) {
|
||||
func (r *userSubscriptionRepository) List(ctx context.Context, params pagination.PaginationParams, userID, groupID *int64, status, platform, sortBy, sortOrder string) ([]service.UserSubscription, *pagination.PaginationResult, error) {
|
||||
client := clientFromContext(ctx, r.client)
|
||||
q := client.UserSubscription.Query()
|
||||
if userID != nil {
|
||||
@@ -199,6 +200,9 @@ func (r *userSubscriptionRepository) List(ctx context.Context, params pagination
|
||||
if groupID != nil {
|
||||
q = q.Where(usersubscription.GroupIDEQ(*groupID))
|
||||
}
|
||||
if platform != "" {
|
||||
q = q.Where(usersubscription.HasGroupWith(group.PlatformEQ(platform)))
|
||||
}
|
||||
|
||||
// Status filtering with real-time expiration check
|
||||
now := time.Now()
|
||||
|
||||
@@ -271,7 +271,7 @@ func (s *UserSubscriptionRepoSuite) TestList_NoFilters() {
|
||||
group := s.mustCreateGroup("g-list")
|
||||
s.mustCreateSubscription(user.ID, group.ID, nil)
|
||||
|
||||
subs, page, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, nil, nil, "", "", "")
|
||||
subs, page, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, nil, nil, "", "", "", "")
|
||||
s.Require().NoError(err, "List")
|
||||
s.Require().Len(subs, 1)
|
||||
s.Require().Equal(int64(1), page.Total)
|
||||
@@ -285,7 +285,7 @@ func (s *UserSubscriptionRepoSuite) TestList_FilterByUserID() {
|
||||
s.mustCreateSubscription(user1.ID, group.ID, nil)
|
||||
s.mustCreateSubscription(user2.ID, group.ID, nil)
|
||||
|
||||
subs, _, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, &user1.ID, nil, "", "", "")
|
||||
subs, _, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, &user1.ID, nil, "", "", "", "")
|
||||
s.Require().NoError(err)
|
||||
s.Require().Len(subs, 1)
|
||||
s.Require().Equal(user1.ID, subs[0].UserID)
|
||||
@@ -299,7 +299,7 @@ func (s *UserSubscriptionRepoSuite) TestList_FilterByGroupID() {
|
||||
s.mustCreateSubscription(user.ID, g1.ID, nil)
|
||||
s.mustCreateSubscription(user.ID, g2.ID, nil)
|
||||
|
||||
subs, _, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, nil, &g1.ID, "", "", "")
|
||||
subs, _, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, nil, &g1.ID, "", "", "", "")
|
||||
s.Require().NoError(err)
|
||||
s.Require().Len(subs, 1)
|
||||
s.Require().Equal(g1.ID, subs[0].GroupID)
|
||||
@@ -320,7 +320,7 @@ func (s *UserSubscriptionRepoSuite) TestList_FilterByStatus() {
|
||||
c.SetExpiresAt(time.Now().Add(-24 * time.Hour))
|
||||
})
|
||||
|
||||
subs, _, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, nil, nil, service.SubscriptionStatusExpired, "", "")
|
||||
subs, _, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, nil, nil, service.SubscriptionStatusExpired, "", "", "")
|
||||
s.Require().NoError(err)
|
||||
s.Require().Len(subs, 1)
|
||||
s.Require().Equal(service.SubscriptionStatusExpired, subs[0].Status)
|
||||
|
||||
@@ -924,8 +924,8 @@ func (stubGroupRepo) ExistsByName(ctx context.Context, name string) (bool, error
|
||||
return false, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (stubGroupRepo) GetAccountCount(ctx context.Context, groupID int64) (int64, error) {
|
||||
return 0, errors.New("not implemented")
|
||||
func (stubGroupRepo) GetAccountCount(ctx context.Context, groupID int64) (int64, int64, error) {
|
||||
return 0, 0, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (stubGroupRepo) DeleteAccountGroupsByGroupID(ctx context.Context, groupID int64) (int64, error) {
|
||||
@@ -1289,7 +1289,7 @@ func (r *stubUserSubscriptionRepo) ListActiveByUserID(ctx context.Context, userI
|
||||
func (stubUserSubscriptionRepo) ListByGroupID(ctx context.Context, groupID int64, params pagination.PaginationParams) ([]service.UserSubscription, *pagination.PaginationResult, error) {
|
||||
return nil, nil, errors.New("not implemented")
|
||||
}
|
||||
func (stubUserSubscriptionRepo) List(ctx context.Context, params pagination.PaginationParams, userID, groupID *int64, status, sortBy, sortOrder string) ([]service.UserSubscription, *pagination.PaginationResult, error) {
|
||||
func (stubUserSubscriptionRepo) List(ctx context.Context, params pagination.PaginationParams, userID, groupID *int64, status, platform, sortBy, sortOrder string) ([]service.UserSubscription, *pagination.PaginationResult, error) {
|
||||
return nil, nil, errors.New("not implemented")
|
||||
}
|
||||
func (stubUserSubscriptionRepo) ExistsByUserIDAndGroupID(ctx context.Context, userID, groupID int64) (bool, error) {
|
||||
@@ -1637,6 +1637,10 @@ func (r *stubUsageLogRepo) GetGroupStatsWithFilters(ctx context.Context, startTi
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (r *stubUsageLogRepo) GetUserBreakdownStats(ctx context.Context, startTime, endTime time.Time, dim usagestats.UserBreakdownDimension, limit int) ([]usagestats.UserBreakdownItem, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (r *stubUsageLogRepo) GetAPIKeyUsageTrend(ctx context.Context, startTime, endTime time.Time, granularity string, limit int) ([]usagestats.APIKeyUsageTrendPoint, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
@@ -1782,6 +1786,9 @@ func (r *stubUsageLogRepo) GetAccountUsageStats(ctx context.Context, accountID i
|
||||
func (r *stubUsageLogRepo) GetStatsWithFilters(ctx context.Context, filters usagestats.UsageLogFilters) (*usagestats.UsageStats, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
func (r *stubUsageLogRepo) GetAllGroupUsageSummary(ctx context.Context, todayStart time.Time) ([]usagestats.GroupUsageSummary, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
type stubSettingRepo struct {
|
||||
all map[string]string
|
||||
|
||||
@@ -135,7 +135,7 @@ func (f fakeGoogleSubscriptionRepo) ListActiveByUserID(ctx context.Context, user
|
||||
func (f fakeGoogleSubscriptionRepo) ListByGroupID(ctx context.Context, groupID int64, params pagination.PaginationParams) ([]service.UserSubscription, *pagination.PaginationResult, error) {
|
||||
return nil, nil, errors.New("not implemented")
|
||||
}
|
||||
func (f fakeGoogleSubscriptionRepo) List(ctx context.Context, params pagination.PaginationParams, userID, groupID *int64, status, sortBy, sortOrder string) ([]service.UserSubscription, *pagination.PaginationResult, error) {
|
||||
func (f fakeGoogleSubscriptionRepo) List(ctx context.Context, params pagination.PaginationParams, userID, groupID *int64, status, platform, sortBy, sortOrder string) ([]service.UserSubscription, *pagination.PaginationResult, error) {
|
||||
return nil, nil, errors.New("not implemented")
|
||||
}
|
||||
func (f fakeGoogleSubscriptionRepo) ExistsByUserIDAndGroupID(ctx context.Context, userID, groupID int64) (bool, error) {
|
||||
|
||||
@@ -646,7 +646,7 @@ func (r *stubUserSubscriptionRepo) ListByGroupID(ctx context.Context, groupID in
|
||||
return nil, nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (r *stubUserSubscriptionRepo) List(ctx context.Context, params pagination.PaginationParams, userID, groupID *int64, status, sortBy, sortOrder string) ([]service.UserSubscription, *pagination.PaginationResult, error) {
|
||||
func (r *stubUserSubscriptionRepo) List(ctx context.Context, params pagination.PaginationParams, userID, groupID *int64, status, platform, sortBy, sortOrder string) ([]service.UserSubscription, *pagination.PaginationResult, error) {
|
||||
return nil, nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
|
||||
@@ -198,6 +198,7 @@ func registerDashboardRoutes(admin *gin.RouterGroup, h *handler.Handlers) {
|
||||
dashboard.GET("/users-ranking", h.Admin.Dashboard.GetUserSpendingRanking)
|
||||
dashboard.POST("/users-usage", h.Admin.Dashboard.GetBatchUsersUsage)
|
||||
dashboard.POST("/api-keys-usage", h.Admin.Dashboard.GetBatchAPIKeysUsage)
|
||||
dashboard.GET("/user-breakdown", h.Admin.Dashboard.GetUserBreakdown)
|
||||
dashboard.POST("/aggregation/backfill", h.Admin.Dashboard.BackfillAggregation)
|
||||
}
|
||||
}
|
||||
@@ -226,6 +227,8 @@ func registerGroupRoutes(admin *gin.RouterGroup, h *handler.Handlers) {
|
||||
{
|
||||
groups.GET("", h.Admin.Group.List)
|
||||
groups.GET("/all", h.Admin.Group.GetAll)
|
||||
groups.GET("/usage-summary", h.Admin.Group.GetUsageSummary)
|
||||
groups.GET("/capacity-summary", h.Admin.Group.GetCapacitySummary)
|
||||
groups.PUT("/sort-order", h.Admin.Group.UpdateSortOrder)
|
||||
groups.GET("/:id", h.Admin.Group.GetByID)
|
||||
groups.POST("", h.Admin.Group.Create)
|
||||
|
||||
@@ -113,15 +113,18 @@ func (s *AccountTestService) validateUpstreamBaseURL(raw string) (string, error)
|
||||
return normalized, nil
|
||||
}
|
||||
|
||||
// generateSessionString generates a Claude Code style session string
|
||||
// generateSessionString generates a Claude Code style session string.
|
||||
// The output format is determined by the UA version in claude.DefaultHeaders,
|
||||
// ensuring consistency between the user_id format and the UA sent to upstream.
|
||||
func generateSessionString() (string, error) {
|
||||
bytes := make([]byte, 32)
|
||||
if _, err := rand.Read(bytes); err != nil {
|
||||
b := make([]byte, 32)
|
||||
if _, err := rand.Read(b); err != nil {
|
||||
return "", err
|
||||
}
|
||||
hex64 := hex.EncodeToString(bytes)
|
||||
hex64 := hex.EncodeToString(b)
|
||||
sessionUUID := uuid.New().String()
|
||||
return fmt.Sprintf("user_%s_account__session_%s", hex64, sessionUUID), nil
|
||||
uaVersion := ExtractCLIVersion(claude.DefaultHeaders["User-Agent"])
|
||||
return FormatMetadataUserID(hex64, "", sessionUUID, uaVersion), nil
|
||||
}
|
||||
|
||||
// createTestPayload creates a Claude Code style test request payload
|
||||
|
||||
@@ -48,6 +48,8 @@ type UsageLogRepository interface {
|
||||
GetEndpointStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, model string, requestType *int16, stream *bool, billingType *int8) ([]usagestats.EndpointStat, error)
|
||||
GetUpstreamEndpointStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, model string, requestType *int16, stream *bool, billingType *int8) ([]usagestats.EndpointStat, error)
|
||||
GetGroupStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, requestType *int16, stream *bool, billingType *int8) ([]usagestats.GroupStat, error)
|
||||
GetUserBreakdownStats(ctx context.Context, startTime, endTime time.Time, dim usagestats.UserBreakdownDimension, limit int) ([]usagestats.UserBreakdownItem, error)
|
||||
GetAllGroupUsageSummary(ctx context.Context, todayStart time.Time) ([]usagestats.GroupUsageSummary, error)
|
||||
GetAPIKeyUsageTrend(ctx context.Context, startTime, endTime time.Time, granularity string, limit int) ([]usagestats.APIKeyUsageTrendPoint, error)
|
||||
GetUserUsageTrend(ctx context.Context, startTime, endTime time.Time, granularity string, limit int) ([]usagestats.UserUsageTrendPoint, error)
|
||||
GetUserSpendingRanking(ctx context.Context, startTime, endTime time.Time, limit int) (*usagestats.UserSpendingRankingResponse, error)
|
||||
@@ -446,23 +448,17 @@ func (s *AccountUsageService) getOpenAIUsage(ctx context.Context, account *Accou
|
||||
}
|
||||
|
||||
if stats, err := s.usageLogRepo.GetAccountWindowStats(ctx, account.ID, now.Add(-5*time.Hour)); err == nil {
|
||||
windowStats := windowStatsFromAccountStats(stats)
|
||||
if hasMeaningfulWindowStats(windowStats) {
|
||||
if usage.FiveHour == nil {
|
||||
usage.FiveHour = &UsageProgress{Utilization: 0}
|
||||
}
|
||||
usage.FiveHour.WindowStats = windowStats
|
||||
if usage.FiveHour == nil {
|
||||
usage.FiveHour = &UsageProgress{Utilization: 0}
|
||||
}
|
||||
usage.FiveHour.WindowStats = windowStatsFromAccountStats(stats)
|
||||
}
|
||||
|
||||
if stats, err := s.usageLogRepo.GetAccountWindowStats(ctx, account.ID, now.Add(-7*24*time.Hour)); err == nil {
|
||||
windowStats := windowStatsFromAccountStats(stats)
|
||||
if hasMeaningfulWindowStats(windowStats) {
|
||||
if usage.SevenDay == nil {
|
||||
usage.SevenDay = &UsageProgress{Utilization: 0}
|
||||
}
|
||||
usage.SevenDay.WindowStats = windowStats
|
||||
if usage.SevenDay == nil {
|
||||
usage.SevenDay = &UsageProgress{Utilization: 0}
|
||||
}
|
||||
usage.SevenDay.WindowStats = windowStatsFromAccountStats(stats)
|
||||
}
|
||||
|
||||
return usage, nil
|
||||
@@ -992,13 +988,6 @@ func windowStatsFromAccountStats(stats *usagestats.AccountStats) *WindowStats {
|
||||
}
|
||||
}
|
||||
|
||||
func hasMeaningfulWindowStats(stats *WindowStats) bool {
|
||||
if stats == nil {
|
||||
return false
|
||||
}
|
||||
return stats.Requests > 0 || stats.Tokens > 0 || stats.Cost > 0 || stats.StandardCost > 0 || stats.UserCost > 0
|
||||
}
|
||||
|
||||
func buildCodexUsageProgressFromExtra(extra map[string]any, window string, now time.Time) *UsageProgress {
|
||||
if len(extra) == 0 {
|
||||
return nil
|
||||
@@ -1055,6 +1044,11 @@ func buildCodexUsageProgressFromExtra(extra map[string]any, window string, now t
|
||||
}
|
||||
}
|
||||
|
||||
// 窗口已过期(resetAt 在 now 之前)→ 额度已重置,归零
|
||||
if progress.ResetsAt != nil && !now.Before(*progress.ResetsAt) {
|
||||
progress.Utilization = 0
|
||||
}
|
||||
|
||||
return progress
|
||||
}
|
||||
|
||||
|
||||
@@ -148,3 +148,54 @@ func TestAccountUsageService_PersistOpenAICodexProbeSnapshotSetsRateLimit(t *tes
|
||||
t.Fatal("waiting for codex probe rate limit persistence timed out")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildCodexUsageProgressFromExtra_ZerosExpiredWindow(t *testing.T) {
|
||||
t.Parallel()
|
||||
now := time.Date(2026, 3, 16, 12, 0, 0, 0, time.UTC)
|
||||
|
||||
t.Run("expired 5h window zeroes utilization", func(t *testing.T) {
|
||||
extra := map[string]any{
|
||||
"codex_5h_used_percent": 42.0,
|
||||
"codex_5h_reset_at": "2026-03-16T10:00:00Z", // 2h ago
|
||||
}
|
||||
progress := buildCodexUsageProgressFromExtra(extra, "5h", now)
|
||||
if progress == nil {
|
||||
t.Fatal("expected non-nil progress")
|
||||
}
|
||||
if progress.Utilization != 0 {
|
||||
t.Fatalf("expected Utilization=0 for expired window, got %v", progress.Utilization)
|
||||
}
|
||||
if progress.RemainingSeconds != 0 {
|
||||
t.Fatalf("expected RemainingSeconds=0, got %v", progress.RemainingSeconds)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("active 5h window keeps utilization", func(t *testing.T) {
|
||||
resetAt := now.Add(2 * time.Hour).Format(time.RFC3339)
|
||||
extra := map[string]any{
|
||||
"codex_5h_used_percent": 42.0,
|
||||
"codex_5h_reset_at": resetAt,
|
||||
}
|
||||
progress := buildCodexUsageProgressFromExtra(extra, "5h", now)
|
||||
if progress == nil {
|
||||
t.Fatal("expected non-nil progress")
|
||||
}
|
||||
if progress.Utilization != 42.0 {
|
||||
t.Fatalf("expected Utilization=42, got %v", progress.Utilization)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("expired 7d window zeroes utilization", func(t *testing.T) {
|
||||
extra := map[string]any{
|
||||
"codex_7d_used_percent": 88.0,
|
||||
"codex_7d_reset_at": "2026-03-15T00:00:00Z", // yesterday
|
||||
}
|
||||
progress := buildCodexUsageProgressFromExtra(extra, "7d", now)
|
||||
if progress == nil {
|
||||
t.Fatal("expected non-nil progress")
|
||||
}
|
||||
if progress.Utilization != 0 {
|
||||
t.Fatalf("expected Utilization=0 for expired 7d window, got %v", progress.Utilization)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1530,7 +1530,9 @@ func (s *adminServiceImpl) UpdateAccount(ctx context.Context, id int64, input *U
|
||||
if len(input.Credentials) > 0 {
|
||||
account.Credentials = input.Credentials
|
||||
}
|
||||
if len(input.Extra) > 0 {
|
||||
// Extra 使用 map:需要区分“未提供(nil)”与“显式清空({})”。
|
||||
// 关闭配额限制时前端会删除 quota_* 键并提交 extra:{},此时也必须落库。
|
||||
if input.Extra != nil {
|
||||
// 保留配额用量字段,防止编辑账号时意外重置
|
||||
for _, key := range []string{"quota_used", "quota_daily_used", "quota_daily_start", "quota_weekly_used", "quota_weekly_start"} {
|
||||
if v, ok := account.Extra[key]; ok {
|
||||
|
||||
@@ -194,7 +194,7 @@ func (s *groupRepoStubForGroupUpdate) ListActiveByPlatform(context.Context, stri
|
||||
func (s *groupRepoStubForGroupUpdate) ExistsByName(context.Context, string) (bool, error) {
|
||||
panic("unexpected")
|
||||
}
|
||||
func (s *groupRepoStubForGroupUpdate) GetAccountCount(context.Context, int64) (int64, error) {
|
||||
func (s *groupRepoStubForGroupUpdate) GetAccountCount(context.Context, int64) (int64, int64, error) {
|
||||
panic("unexpected")
|
||||
}
|
||||
func (s *groupRepoStubForGroupUpdate) DeleteAccountGroupsByGroupID(context.Context, int64) (int64, error) {
|
||||
|
||||
@@ -160,7 +160,7 @@ func (s *groupRepoStub) ExistsByName(ctx context.Context, name string) (bool, er
|
||||
panic("unexpected ExistsByName call")
|
||||
}
|
||||
|
||||
func (s *groupRepoStub) GetAccountCount(ctx context.Context, groupID int64) (int64, error) {
|
||||
func (s *groupRepoStub) GetAccountCount(ctx context.Context, groupID int64) (int64, int64, error) {
|
||||
panic("unexpected GetAccountCount call")
|
||||
}
|
||||
|
||||
|
||||
@@ -100,7 +100,7 @@ func (s *groupRepoStubForAdmin) ExistsByName(_ context.Context, _ string) (bool,
|
||||
panic("unexpected ExistsByName call")
|
||||
}
|
||||
|
||||
func (s *groupRepoStubForAdmin) GetAccountCount(_ context.Context, _ int64) (int64, error) {
|
||||
func (s *groupRepoStubForAdmin) GetAccountCount(_ context.Context, _ int64) (int64, int64, error) {
|
||||
panic("unexpected GetAccountCount call")
|
||||
}
|
||||
|
||||
@@ -383,7 +383,7 @@ func (s *groupRepoStubForFallbackCycle) ExistsByName(_ context.Context, _ string
|
||||
panic("unexpected ExistsByName call")
|
||||
}
|
||||
|
||||
func (s *groupRepoStubForFallbackCycle) GetAccountCount(_ context.Context, _ int64) (int64, error) {
|
||||
func (s *groupRepoStubForFallbackCycle) GetAccountCount(_ context.Context, _ int64) (int64, int64, error) {
|
||||
panic("unexpected GetAccountCount call")
|
||||
}
|
||||
|
||||
@@ -458,7 +458,7 @@ func (s *groupRepoStubForInvalidRequestFallback) ExistsByName(_ context.Context,
|
||||
panic("unexpected ExistsByName call")
|
||||
}
|
||||
|
||||
func (s *groupRepoStubForInvalidRequestFallback) GetAccountCount(_ context.Context, _ int64) (int64, error) {
|
||||
func (s *groupRepoStubForInvalidRequestFallback) GetAccountCount(_ context.Context, _ int64) (int64, int64, error) {
|
||||
panic("unexpected GetAccountCount call")
|
||||
}
|
||||
|
||||
|
||||
@@ -121,3 +121,35 @@ func TestUpdateAccount_EnableOveragesClearsModelRateLimitsBeforePersist(t *testi
|
||||
_, exists := repo.account.Extra[modelRateLimitsKey]
|
||||
require.False(t, exists, "开启 overages 时应在持久化前清掉旧模型限流")
|
||||
}
|
||||
|
||||
func TestUpdateAccount_EmptyExtraPayloadCanClearQuotaLimits(t *testing.T) {
|
||||
accountID := int64(103)
|
||||
repo := &updateAccountOveragesRepoStub{
|
||||
account: &Account{
|
||||
ID: accountID,
|
||||
Platform: PlatformAnthropic,
|
||||
Type: AccountTypeAPIKey,
|
||||
Status: StatusActive,
|
||||
Extra: map[string]any{
|
||||
"quota_limit": 100.0,
|
||||
"quota_daily_limit": 10.0,
|
||||
"quota_weekly_limit": 40.0,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
svc := &adminServiceImpl{accountRepo: repo}
|
||||
updated, err := svc.UpdateAccount(context.Background(), accountID, &UpdateAccountInput{
|
||||
// 显式空对象:语义是“清空 extra 中的可配置键”(例如关闭配额限制)
|
||||
Extra: map[string]any{},
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, updated)
|
||||
require.Equal(t, 1, repo.updateCalls)
|
||||
require.NotNil(t, repo.account.Extra)
|
||||
require.NotContains(t, repo.account.Extra, "quota_limit")
|
||||
require.NotContains(t, repo.account.Extra, "quota_daily_limit")
|
||||
require.NotContains(t, repo.account.Extra, "quota_weekly_limit")
|
||||
require.Len(t, repo.account.Extra, 0)
|
||||
}
|
||||
|
||||
@@ -930,7 +930,7 @@ func (s *AntigravityGatewayService) applyErrorPolicy(p antigravityRetryLoopParam
|
||||
case ErrorPolicyTempUnscheduled:
|
||||
slog.Info("temp_unschedulable_matched",
|
||||
"prefix", p.prefix, "status_code", statusCode, "account_id", p.account.ID)
|
||||
return true, statusCode, &AntigravityAccountSwitchError{OriginalAccountID: p.account.ID, IsStickySession: p.isStickySession}
|
||||
return true, statusCode, &AntigravityAccountSwitchError{OriginalAccountID: p.account.ID, RateLimitedModel: p.requestedModel, IsStickySession: p.isStickySession}
|
||||
}
|
||||
return false, statusCode, nil
|
||||
}
|
||||
@@ -1001,8 +1001,9 @@ type TestConnectionResult struct {
|
||||
MappedModel string // 实际使用的模型
|
||||
}
|
||||
|
||||
// TestConnection 测试 Antigravity 账号连接(非流式,无重试、无计费)
|
||||
// 支持 Claude 和 Gemini 两种协议,根据 modelID 前缀自动选择
|
||||
// TestConnection 测试 Antigravity 账号连接。
|
||||
// 复用 antigravityRetryLoop 的完整重试 / credits overages / 智能重试逻辑,
|
||||
// 与真实调度行为一致。差异:不做账号切换(测试指定账号)、不记录 ops 错误。
|
||||
func (s *AntigravityGatewayService) TestConnection(ctx context.Context, account *Account, modelID string) (*TestConnectionResult, error) {
|
||||
|
||||
// 获取 token
|
||||
@@ -1026,10 +1027,8 @@ func (s *AntigravityGatewayService) TestConnection(ctx context.Context, account
|
||||
// 构建请求体
|
||||
var requestBody []byte
|
||||
if strings.HasPrefix(modelID, "gemini-") {
|
||||
// Gemini 模型:直接使用 Gemini 格式
|
||||
requestBody, err = s.buildGeminiTestRequest(projectID, mappedModel)
|
||||
} else {
|
||||
// Claude 模型:使用协议转换
|
||||
requestBody, err = s.buildClaudeTestRequest(projectID, mappedModel)
|
||||
}
|
||||
if err != nil {
|
||||
@@ -1042,64 +1041,63 @@ func (s *AntigravityGatewayService) TestConnection(ctx context.Context, account
|
||||
proxyURL = account.Proxy.URL()
|
||||
}
|
||||
|
||||
baseURL := resolveAntigravityForwardBaseURL()
|
||||
if baseURL == "" {
|
||||
return nil, errors.New("no antigravity forward base url configured")
|
||||
}
|
||||
availableURLs := []string{baseURL}
|
||||
|
||||
var lastErr error
|
||||
for urlIdx, baseURL := range availableURLs {
|
||||
// 构建 HTTP 请求(总是使用流式 endpoint,与官方客户端一致)
|
||||
req, err := antigravity.NewAPIRequestWithURL(ctx, baseURL, "streamGenerateContent", accessToken, requestBody)
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
|
||||
// 调试日志:Test 请求信息
|
||||
logger.LegacyPrintf("service.antigravity_gateway", "[antigravity-Test] account=%s request_size=%d url=%s", account.Name, len(requestBody), req.URL.String())
|
||||
|
||||
// 发送请求
|
||||
resp, err := s.httpUpstream.Do(req, proxyURL, account.ID, account.Concurrency)
|
||||
if err != nil {
|
||||
lastErr = fmt.Errorf("请求失败: %w", err)
|
||||
if shouldAntigravityFallbackToNextURL(err, 0) && urlIdx < len(availableURLs)-1 {
|
||||
logger.LegacyPrintf("service.antigravity_gateway", "[antigravity-Test] URL fallback: %s -> %s", baseURL, availableURLs[urlIdx+1])
|
||||
continue
|
||||
}
|
||||
return nil, lastErr
|
||||
}
|
||||
|
||||
// 读取响应
|
||||
respBody, err := io.ReadAll(io.LimitReader(resp.Body, 2<<20))
|
||||
_ = resp.Body.Close() // 立即关闭,避免循环内 defer 导致的资源泄漏
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("读取响应失败: %w", err)
|
||||
}
|
||||
|
||||
// 检查是否需要 URL 降级
|
||||
if shouldAntigravityFallbackToNextURL(nil, resp.StatusCode) && urlIdx < len(availableURLs)-1 {
|
||||
logger.LegacyPrintf("service.antigravity_gateway", "[antigravity-Test] URL fallback (HTTP %d): %s -> %s", resp.StatusCode, baseURL, availableURLs[urlIdx+1])
|
||||
continue
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
return nil, fmt.Errorf("API 返回 %d: %s", resp.StatusCode, string(respBody))
|
||||
}
|
||||
|
||||
// 解析流式响应,提取文本
|
||||
text := extractTextFromSSEResponse(respBody)
|
||||
|
||||
// 标记成功的 URL,下次优先使用
|
||||
antigravity.DefaultURLAvailability.MarkSuccess(baseURL)
|
||||
return &TestConnectionResult{
|
||||
Text: text,
|
||||
MappedModel: mappedModel,
|
||||
}, nil
|
||||
// 复用 antigravityRetryLoop:完整的重试 / credits overages / 智能重试
|
||||
prefix := fmt.Sprintf("[antigravity-Test] account=%d(%s)", account.ID, account.Name)
|
||||
p := antigravityRetryLoopParams{
|
||||
ctx: ctx,
|
||||
prefix: prefix,
|
||||
account: account,
|
||||
proxyURL: proxyURL,
|
||||
accessToken: accessToken,
|
||||
action: "streamGenerateContent",
|
||||
body: requestBody,
|
||||
c: nil, // 无 gin.Context → 跳过 ops 追踪
|
||||
httpUpstream: s.httpUpstream,
|
||||
settingService: s.settingService,
|
||||
accountRepo: s.accountRepo,
|
||||
requestedModel: modelID,
|
||||
handleError: testConnectionHandleError,
|
||||
}
|
||||
|
||||
return nil, lastErr
|
||||
result, err := s.antigravityRetryLoop(p)
|
||||
if err != nil {
|
||||
// AccountSwitchError → 测试时不切换账号,返回友好提示
|
||||
var switchErr *AntigravityAccountSwitchError
|
||||
if errors.As(err, &switchErr) {
|
||||
return nil, fmt.Errorf("该账号模型 %s 当前限流中,请稍后重试", switchErr.RateLimitedModel)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if result == nil || result.resp == nil {
|
||||
return nil, errors.New("upstream returned empty response")
|
||||
}
|
||||
defer func() { _ = result.resp.Body.Close() }()
|
||||
|
||||
respBody, err := io.ReadAll(io.LimitReader(result.resp.Body, 2<<20))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("读取响应失败: %w", err)
|
||||
}
|
||||
|
||||
if result.resp.StatusCode >= 400 {
|
||||
return nil, fmt.Errorf("API 返回 %d: %s", result.resp.StatusCode, string(respBody))
|
||||
}
|
||||
|
||||
text := extractTextFromSSEResponse(respBody)
|
||||
return &TestConnectionResult{Text: text, MappedModel: mappedModel}, nil
|
||||
}
|
||||
|
||||
// testConnectionHandleError 是 TestConnection 使用的轻量 handleError 回调。
|
||||
// 仅记录日志,不做 ops 错误追踪或粘性会话清除。
|
||||
func testConnectionHandleError(
|
||||
_ context.Context, prefix string, account *Account,
|
||||
statusCode int, _ http.Header, body []byte,
|
||||
requestedModel string, _ int64, _ string, _ bool,
|
||||
) *handleModelRateLimitResult {
|
||||
logger.LegacyPrintf("service.antigravity_gateway",
|
||||
"%s test_handle_error status=%d model=%s account=%d body=%s",
|
||||
prefix, statusCode, requestedModel, account.ID, truncateForLog(body, 200))
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildGeminiTestRequest 构建 Gemini 格式测试请求
|
||||
@@ -3079,6 +3077,22 @@ func (s *AntigravityGatewayService) handleGeminiStreamingResponse(c *gin.Context
|
||||
intervalCh = intervalTicker.C
|
||||
}
|
||||
|
||||
// 下游 keepalive:防止代理/Cloudflare Tunnel 因连接空闲而断开
|
||||
keepaliveInterval := time.Duration(0)
|
||||
if s.settingService.cfg != nil && s.settingService.cfg.Gateway.StreamKeepaliveInterval > 0 {
|
||||
keepaliveInterval = time.Duration(s.settingService.cfg.Gateway.StreamKeepaliveInterval) * time.Second
|
||||
}
|
||||
var keepaliveTicker *time.Ticker
|
||||
if keepaliveInterval > 0 {
|
||||
keepaliveTicker = time.NewTicker(keepaliveInterval)
|
||||
defer keepaliveTicker.Stop()
|
||||
}
|
||||
var keepaliveCh <-chan time.Time
|
||||
if keepaliveTicker != nil {
|
||||
keepaliveCh = keepaliveTicker.C
|
||||
}
|
||||
lastDataAt := time.Now()
|
||||
|
||||
cw := newAntigravityClientWriter(c.Writer, flusher, "antigravity gemini")
|
||||
|
||||
// 仅发送一次错误事件,避免多次写入导致协议混乱
|
||||
@@ -3111,6 +3125,8 @@ func (s *AntigravityGatewayService) handleGeminiStreamingResponse(c *gin.Context
|
||||
return nil, ev.err
|
||||
}
|
||||
|
||||
lastDataAt = time.Now()
|
||||
|
||||
line := ev.line
|
||||
trimmed := strings.TrimRight(line, "\r\n")
|
||||
if strings.HasPrefix(trimmed, "data:") {
|
||||
@@ -3170,6 +3186,19 @@ func (s *AntigravityGatewayService) handleGeminiStreamingResponse(c *gin.Context
|
||||
logger.LegacyPrintf("service.antigravity_gateway", "Stream data interval timeout (antigravity)")
|
||||
sendErrorEvent("stream_timeout")
|
||||
return &antigravityStreamResult{usage: usage, firstTokenMs: firstTokenMs}, fmt.Errorf("stream data interval timeout")
|
||||
|
||||
case <-keepaliveCh:
|
||||
if cw.Disconnected() {
|
||||
continue
|
||||
}
|
||||
if time.Since(lastDataAt) < keepaliveInterval {
|
||||
continue
|
||||
}
|
||||
// SSE ping/keepalive:保持连接活跃防止 Cloudflare Tunnel 等代理断开
|
||||
if !cw.Fprintf(":\n\n") {
|
||||
logger.LegacyPrintf("service.antigravity_gateway", "Client disconnected during keepalive ping (antigravity gemini), continuing to drain upstream for billing")
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3895,6 +3924,22 @@ func (s *AntigravityGatewayService) handleClaudeStreamingResponse(c *gin.Context
|
||||
intervalCh = intervalTicker.C
|
||||
}
|
||||
|
||||
// 下游 keepalive:防止代理/Cloudflare Tunnel 因连接空闲而断开
|
||||
keepaliveInterval := time.Duration(0)
|
||||
if s.settingService.cfg != nil && s.settingService.cfg.Gateway.StreamKeepaliveInterval > 0 {
|
||||
keepaliveInterval = time.Duration(s.settingService.cfg.Gateway.StreamKeepaliveInterval) * time.Second
|
||||
}
|
||||
var keepaliveTicker *time.Ticker
|
||||
if keepaliveInterval > 0 {
|
||||
keepaliveTicker = time.NewTicker(keepaliveInterval)
|
||||
defer keepaliveTicker.Stop()
|
||||
}
|
||||
var keepaliveCh <-chan time.Time
|
||||
if keepaliveTicker != nil {
|
||||
keepaliveCh = keepaliveTicker.C
|
||||
}
|
||||
lastDataAt := time.Now()
|
||||
|
||||
cw := newAntigravityClientWriter(c.Writer, flusher, "antigravity claude")
|
||||
|
||||
// 仅发送一次错误事件,避免多次写入导致协议混乱
|
||||
@@ -3947,6 +3992,8 @@ func (s *AntigravityGatewayService) handleClaudeStreamingResponse(c *gin.Context
|
||||
return nil, fmt.Errorf("stream read error: %w", ev.err)
|
||||
}
|
||||
|
||||
lastDataAt = time.Now()
|
||||
|
||||
// 处理 SSE 行,转换为 Claude 格式
|
||||
claudeEvents := processor.ProcessLine(strings.TrimRight(ev.line, "\r\n"))
|
||||
if len(claudeEvents) > 0 {
|
||||
@@ -3969,6 +4016,20 @@ func (s *AntigravityGatewayService) handleClaudeStreamingResponse(c *gin.Context
|
||||
logger.LegacyPrintf("service.antigravity_gateway", "Stream data interval timeout (antigravity)")
|
||||
sendErrorEvent("stream_timeout")
|
||||
return &antigravityStreamResult{usage: convertUsage(nil), firstTokenMs: firstTokenMs}, fmt.Errorf("stream data interval timeout")
|
||||
|
||||
case <-keepaliveCh:
|
||||
if cw.Disconnected() {
|
||||
continue
|
||||
}
|
||||
if time.Since(lastDataAt) < keepaliveInterval {
|
||||
continue
|
||||
}
|
||||
// SSE ping 事件:Anthropic 原生格式,客户端会正确处理,
|
||||
// 同时保持连接活跃防止 Cloudflare Tunnel 等代理断开
|
||||
if !cw.Fprintf("event: ping\ndata: {\"type\": \"ping\"}\n\n") {
|
||||
logger.LegacyPrintf("service.antigravity_gateway", "Client disconnected during keepalive ping (antigravity claude), continuing to drain upstream for billing")
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -4299,6 +4360,22 @@ func (s *AntigravityGatewayService) streamUpstreamResponse(c *gin.Context, resp
|
||||
intervalCh = intervalTicker.C
|
||||
}
|
||||
|
||||
// 下游 keepalive:防止代理/Cloudflare Tunnel 因连接空闲而断开
|
||||
keepaliveInterval := time.Duration(0)
|
||||
if s.settingService.cfg != nil && s.settingService.cfg.Gateway.StreamKeepaliveInterval > 0 {
|
||||
keepaliveInterval = time.Duration(s.settingService.cfg.Gateway.StreamKeepaliveInterval) * time.Second
|
||||
}
|
||||
var keepaliveTicker *time.Ticker
|
||||
if keepaliveInterval > 0 {
|
||||
keepaliveTicker = time.NewTicker(keepaliveInterval)
|
||||
defer keepaliveTicker.Stop()
|
||||
}
|
||||
var keepaliveCh <-chan time.Time
|
||||
if keepaliveTicker != nil {
|
||||
keepaliveCh = keepaliveTicker.C
|
||||
}
|
||||
lastDataAt := time.Now()
|
||||
|
||||
flusher, _ := c.Writer.(http.Flusher)
|
||||
cw := newAntigravityClientWriter(c.Writer, flusher, "antigravity upstream")
|
||||
|
||||
@@ -4316,6 +4393,8 @@ func (s *AntigravityGatewayService) streamUpstreamResponse(c *gin.Context, resp
|
||||
return &antigravityStreamResult{usage: usage, firstTokenMs: firstTokenMs}
|
||||
}
|
||||
|
||||
lastDataAt = time.Now()
|
||||
|
||||
line := ev.line
|
||||
|
||||
// 记录首 token 时间
|
||||
@@ -4341,6 +4420,20 @@ func (s *AntigravityGatewayService) streamUpstreamResponse(c *gin.Context, resp
|
||||
}
|
||||
logger.LegacyPrintf("service.antigravity_gateway", "Stream data interval timeout (antigravity upstream)")
|
||||
return &antigravityStreamResult{usage: usage, firstTokenMs: firstTokenMs}
|
||||
|
||||
case <-keepaliveCh:
|
||||
if cw.Disconnected() {
|
||||
continue
|
||||
}
|
||||
if time.Since(lastDataAt) < keepaliveInterval {
|
||||
continue
|
||||
}
|
||||
// SSE ping 事件:Anthropic 原生格式,客户端会正确处理,
|
||||
// 同时保持连接活跃防止 Cloudflare Tunnel 等代理断开
|
||||
if !cw.Fprintf("event: ping\ndata: {\"type\": \"ping\"}\n\n") {
|
||||
logger.LegacyPrintf("service.antigravity_gateway", "Client disconnected during keepalive ping (antigravity upstream), continuing to drain upstream for billing")
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -260,14 +260,15 @@ func TestHandleSmartRetry_429_LongDelay_SingleAccountRetry_StillSwitches(t *test
|
||||
|
||||
// TestHandleSmartRetry_503_ShortDelay_SingleAccountRetry_NoRateLimit
|
||||
// 503 + retryDelay < 7s + SingleAccountRetry → 智能重试耗尽后直接返回 503,不设限流
|
||||
// 使用 RATE_LIMIT_EXCEEDED(走 1 次智能重试),避免 MODEL_CAPACITY_EXHAUSTED 的 60 次重试导致测试超时
|
||||
func TestHandleSmartRetry_503_ShortDelay_SingleAccountRetry_NoRateLimit(t *testing.T) {
|
||||
// 智能重试也返回 503
|
||||
failRespBody := `{
|
||||
"error": {
|
||||
"code": 503,
|
||||
"status": "UNAVAILABLE",
|
||||
"status": "RESOURCE_EXHAUSTED",
|
||||
"details": [
|
||||
{"@type": "type.googleapis.com/google.rpc.ErrorInfo", "metadata": {"model": "gemini-3-flash"}, "reason": "MODEL_CAPACITY_EXHAUSTED"},
|
||||
{"@type": "type.googleapis.com/google.rpc.ErrorInfo", "metadata": {"model": "gemini-3-flash"}, "reason": "RATE_LIMIT_EXCEEDED"},
|
||||
{"@type": "type.googleapis.com/google.rpc.RetryInfo", "retryDelay": "0.1s"}
|
||||
]
|
||||
}
|
||||
@@ -278,8 +279,9 @@ func TestHandleSmartRetry_503_ShortDelay_SingleAccountRetry_NoRateLimit(t *testi
|
||||
Body: io.NopCloser(strings.NewReader(failRespBody)),
|
||||
}
|
||||
upstream := &mockSmartRetryUpstream{
|
||||
responses: []*http.Response{failResp},
|
||||
errors: []error{nil},
|
||||
responses: []*http.Response{failResp},
|
||||
errors: []error{nil},
|
||||
repeatLast: true,
|
||||
}
|
||||
|
||||
repo := &stubAntigravityAccountRepo{}
|
||||
@@ -294,9 +296,9 @@ func TestHandleSmartRetry_503_ShortDelay_SingleAccountRetry_NoRateLimit(t *testi
|
||||
respBody := []byte(`{
|
||||
"error": {
|
||||
"code": 503,
|
||||
"status": "UNAVAILABLE",
|
||||
"status": "RESOURCE_EXHAUSTED",
|
||||
"details": [
|
||||
{"@type": "type.googleapis.com/google.rpc.ErrorInfo", "metadata": {"model": "gemini-3-flash"}, "reason": "MODEL_CAPACITY_EXHAUSTED"},
|
||||
{"@type": "type.googleapis.com/google.rpc.ErrorInfo", "metadata": {"model": "gemini-3-flash"}, "reason": "RATE_LIMIT_EXCEEDED"},
|
||||
{"@type": "type.googleapis.com/google.rpc.RetryInfo", "retryDelay": "0.1s"}
|
||||
]
|
||||
}
|
||||
@@ -569,8 +571,9 @@ func TestHandleSingleAccountRetryInPlace_WaitDurationClamped(t *testing.T) {
|
||||
|
||||
svc := &AntigravityGatewayService{}
|
||||
|
||||
// 等待时间过大应被 clamp 到 antigravitySingleAccountSmartRetryMaxWait
|
||||
result := svc.handleSingleAccountRetryInPlace(params, resp, nil, "https://ag-1.test", 999*time.Second, "gemini-3-pro")
|
||||
// waitDuration=0 会被 clamp 到 antigravitySmartRetryMinWait=1s。
|
||||
// 首次重试即成功(200),总耗时 ~1s。
|
||||
result := svc.handleSingleAccountRetryInPlace(params, resp, nil, "https://ag-1.test", 0, "gemini-3-pro")
|
||||
require.NotNil(t, result)
|
||||
require.Equal(t, smartRetryActionBreakWithResp, result.action)
|
||||
require.NotNil(t, result.resp)
|
||||
|
||||
@@ -32,11 +32,13 @@ func (c *stubSmartRetryCache) DeleteSessionAccountID(_ context.Context, groupID
|
||||
|
||||
// mockSmartRetryUpstream 用于 handleSmartRetry 测试的 mock upstream
|
||||
type mockSmartRetryUpstream struct {
|
||||
responses []*http.Response
|
||||
errors []error
|
||||
callIdx int
|
||||
calls []string
|
||||
requestBodies [][]byte
|
||||
responses []*http.Response
|
||||
responseBodies [][]byte // 缓存的 response body 字节(用于 repeatLast 重建)
|
||||
errors []error
|
||||
callIdx int
|
||||
calls []string
|
||||
requestBodies [][]byte
|
||||
repeatLast bool // 超出范围时重复最后一个响应
|
||||
}
|
||||
|
||||
func (m *mockSmartRetryUpstream) Do(req *http.Request, proxyURL string, accountID int64, accountConcurrency int) (*http.Response, error) {
|
||||
@@ -50,10 +52,45 @@ func (m *mockSmartRetryUpstream) Do(req *http.Request, proxyURL string, accountI
|
||||
m.requestBodies = append(m.requestBodies, nil)
|
||||
}
|
||||
m.callIdx++
|
||||
if idx < len(m.responses) {
|
||||
return m.responses[idx], m.errors[idx]
|
||||
|
||||
// 确定使用哪个索引
|
||||
respIdx := idx
|
||||
if respIdx >= len(m.responses) {
|
||||
if !m.repeatLast || len(m.responses) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
respIdx = len(m.responses) - 1
|
||||
}
|
||||
return nil, nil
|
||||
|
||||
resp := m.responses[respIdx]
|
||||
respErr := m.errors[respIdx]
|
||||
if resp == nil {
|
||||
return nil, respErr
|
||||
}
|
||||
|
||||
// 首次调用时缓存 body 字节
|
||||
if respIdx >= len(m.responseBodies) {
|
||||
for len(m.responseBodies) <= respIdx {
|
||||
m.responseBodies = append(m.responseBodies, nil)
|
||||
}
|
||||
}
|
||||
if m.responseBodies[respIdx] == nil && resp.Body != nil {
|
||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||
_ = resp.Body.Close()
|
||||
m.responseBodies[respIdx] = bodyBytes
|
||||
}
|
||||
|
||||
// 用缓存的 body 字节重建新的 reader
|
||||
var body io.ReadCloser
|
||||
if m.responseBodies[respIdx] != nil {
|
||||
body = io.NopCloser(bytes.NewReader(m.responseBodies[respIdx]))
|
||||
}
|
||||
|
||||
return &http.Response{
|
||||
StatusCode: resp.StatusCode,
|
||||
Header: resp.Header.Clone(),
|
||||
Body: body,
|
||||
}, respErr
|
||||
}
|
||||
|
||||
func (m *mockSmartRetryUpstream) DoWithTLS(req *http.Request, proxyURL string, accountID int64, accountConcurrency int, enableTLSFingerprint bool) (*http.Response, error) {
|
||||
|
||||
@@ -4,11 +4,13 @@ import (
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
@@ -84,17 +86,21 @@ type BackupScheduleConfig struct {
|
||||
|
||||
// BackupRecord 备份记录
|
||||
type BackupRecord struct {
|
||||
ID string `json:"id"`
|
||||
Status string `json:"status"` // pending, running, completed, failed
|
||||
BackupType string `json:"backup_type"` // postgres
|
||||
FileName string `json:"file_name"`
|
||||
S3Key string `json:"s3_key"`
|
||||
SizeBytes int64 `json:"size_bytes"`
|
||||
TriggeredBy string `json:"triggered_by"` // manual, scheduled
|
||||
ErrorMsg string `json:"error_message,omitempty"`
|
||||
StartedAt string `json:"started_at"`
|
||||
FinishedAt string `json:"finished_at,omitempty"`
|
||||
ExpiresAt string `json:"expires_at,omitempty"` // 过期时间
|
||||
ID string `json:"id"`
|
||||
Status string `json:"status"` // pending, running, completed, failed
|
||||
BackupType string `json:"backup_type"` // postgres
|
||||
FileName string `json:"file_name"`
|
||||
S3Key string `json:"s3_key"`
|
||||
SizeBytes int64 `json:"size_bytes"`
|
||||
TriggeredBy string `json:"triggered_by"` // manual, scheduled
|
||||
ErrorMsg string `json:"error_message,omitempty"`
|
||||
StartedAt string `json:"started_at"`
|
||||
FinishedAt string `json:"finished_at,omitempty"`
|
||||
ExpiresAt string `json:"expires_at,omitempty"` // 过期时间
|
||||
Progress string `json:"progress,omitempty"` // "dumping", "uploading", ""
|
||||
RestoreStatus string `json:"restore_status,omitempty"` // "", "running", "completed", "failed"
|
||||
RestoreError string `json:"restore_error,omitempty"`
|
||||
RestoredAt string `json:"restored_at,omitempty"`
|
||||
}
|
||||
|
||||
// BackupService 数据库备份恢复服务
|
||||
@@ -105,17 +111,24 @@ type BackupService struct {
|
||||
storeFactory BackupObjectStoreFactory
|
||||
dumper DBDumper
|
||||
|
||||
mu sync.Mutex
|
||||
store BackupObjectStore
|
||||
s3Cfg *BackupS3Config
|
||||
opMu sync.Mutex // 保护 backingUp/restoring 标志
|
||||
backingUp bool
|
||||
restoring bool
|
||||
|
||||
storeMu sync.Mutex // 保护 store/s3Cfg 缓存
|
||||
store BackupObjectStore
|
||||
s3Cfg *BackupS3Config
|
||||
|
||||
recordsMu sync.Mutex // 保护 records 的 load/save 操作
|
||||
|
||||
cronMu sync.Mutex
|
||||
cronSched *cron.Cron
|
||||
cronEntryID cron.EntryID
|
||||
|
||||
wg sync.WaitGroup // 追踪活跃的备份/恢复 goroutine
|
||||
shuttingDown atomic.Bool // 阻止新备份启动
|
||||
bgCtx context.Context // 所有后台操作的 parent context
|
||||
bgCancel context.CancelFunc // 取消所有活跃后台操作
|
||||
}
|
||||
|
||||
func NewBackupService(
|
||||
@@ -125,20 +138,26 @@ func NewBackupService(
|
||||
storeFactory BackupObjectStoreFactory,
|
||||
dumper DBDumper,
|
||||
) *BackupService {
|
||||
bgCtx, bgCancel := context.WithCancel(context.Background())
|
||||
return &BackupService{
|
||||
settingRepo: settingRepo,
|
||||
dbCfg: &cfg.Database,
|
||||
encryptor: encryptor,
|
||||
storeFactory: storeFactory,
|
||||
dumper: dumper,
|
||||
bgCtx: bgCtx,
|
||||
bgCancel: bgCancel,
|
||||
}
|
||||
}
|
||||
|
||||
// Start 启动定时备份调度器
|
||||
// Start 启动定时备份调度器并清理孤立记录
|
||||
func (s *BackupService) Start() {
|
||||
s.cronSched = cron.New()
|
||||
s.cronSched.Start()
|
||||
|
||||
// 清理重启后孤立的 running 记录
|
||||
s.recoverStaleRecords()
|
||||
|
||||
// 加载已有的定时配置
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
@@ -154,13 +173,65 @@ func (s *BackupService) Start() {
|
||||
}
|
||||
}
|
||||
|
||||
// Stop 停止定时备份
|
||||
// recoverStaleRecords 启动时将孤立的 running 记录标记为 failed
|
||||
func (s *BackupService) recoverStaleRecords() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
records, err := s.loadRecords(ctx)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for i := range records {
|
||||
if records[i].Status == "running" {
|
||||
records[i].Status = "failed"
|
||||
records[i].ErrorMsg = "interrupted by server restart"
|
||||
records[i].Progress = ""
|
||||
records[i].FinishedAt = time.Now().Format(time.RFC3339)
|
||||
_ = s.saveRecord(ctx, &records[i])
|
||||
logger.LegacyPrintf("service.backup", "[Backup] recovered stale running record: %s", records[i].ID)
|
||||
}
|
||||
if records[i].RestoreStatus == "running" {
|
||||
records[i].RestoreStatus = "failed"
|
||||
records[i].RestoreError = "interrupted by server restart"
|
||||
_ = s.saveRecord(ctx, &records[i])
|
||||
logger.LegacyPrintf("service.backup", "[Backup] recovered stale restoring record: %s", records[i].ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop 停止定时备份并等待活跃操作完成
|
||||
func (s *BackupService) Stop() {
|
||||
s.shuttingDown.Store(true)
|
||||
|
||||
s.cronMu.Lock()
|
||||
defer s.cronMu.Unlock()
|
||||
if s.cronSched != nil {
|
||||
s.cronSched.Stop()
|
||||
}
|
||||
s.cronMu.Unlock()
|
||||
|
||||
// 等待活跃备份/恢复完成(最多 5 分钟)
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
s.wg.Wait()
|
||||
close(done)
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
logger.LegacyPrintf("service.backup", "[Backup] all active operations finished")
|
||||
case <-time.After(5 * time.Minute):
|
||||
logger.LegacyPrintf("service.backup", "[Backup] shutdown timeout after 5min, cancelling active operations")
|
||||
if s.bgCancel != nil {
|
||||
s.bgCancel() // 取消所有后台操作
|
||||
}
|
||||
// 给 goroutine 时间响应取消并完成清理
|
||||
select {
|
||||
case <-done:
|
||||
logger.LegacyPrintf("service.backup", "[Backup] active operations cancelled and cleaned up")
|
||||
case <-time.After(10 * time.Second):
|
||||
logger.LegacyPrintf("service.backup", "[Backup] goroutine cleanup timed out")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ─── S3 配置管理 ───
|
||||
@@ -203,10 +274,10 @@ func (s *BackupService) UpdateS3Config(ctx context.Context, cfg BackupS3Config)
|
||||
}
|
||||
|
||||
// 清除缓存的 S3 客户端
|
||||
s.mu.Lock()
|
||||
s.storeMu.Lock()
|
||||
s.store = nil
|
||||
s.s3Cfg = nil
|
||||
s.mu.Unlock()
|
||||
s.storeMu.Unlock()
|
||||
|
||||
cfg.SecretAccessKey = ""
|
||||
return &cfg, nil
|
||||
@@ -314,7 +385,10 @@ func (s *BackupService) removeCronSchedule() {
|
||||
}
|
||||
|
||||
func (s *BackupService) runScheduledBackup() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute)
|
||||
s.wg.Add(1)
|
||||
defer s.wg.Done()
|
||||
|
||||
ctx, cancel := context.WithTimeout(s.bgCtx, 30*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
// 读取定时备份配置中的过期天数
|
||||
@@ -327,7 +401,11 @@ func (s *BackupService) runScheduledBackup() {
|
||||
logger.LegacyPrintf("service.backup", "[Backup] 开始执行定时备份, 过期天数: %d", expireDays)
|
||||
record, err := s.CreateBackup(ctx, "scheduled", expireDays)
|
||||
if err != nil {
|
||||
logger.LegacyPrintf("service.backup", "[Backup] 定时备份失败: %v", err)
|
||||
if errors.Is(err, ErrBackupInProgress) {
|
||||
logger.LegacyPrintf("service.backup", "[Backup] 定时备份跳过: 已有备份正在进行中")
|
||||
} else {
|
||||
logger.LegacyPrintf("service.backup", "[Backup] 定时备份失败: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
logger.LegacyPrintf("service.backup", "[Backup] 定时备份完成: id=%s size=%d", record.ID, record.SizeBytes)
|
||||
@@ -346,17 +424,21 @@ func (s *BackupService) runScheduledBackup() {
|
||||
// CreateBackup 创建全量数据库备份并上传到 S3(流式处理)
|
||||
// expireDays: 备份过期天数,0=永不过期,默认14天
|
||||
func (s *BackupService) CreateBackup(ctx context.Context, triggeredBy string, expireDays int) (*BackupRecord, error) {
|
||||
s.mu.Lock()
|
||||
if s.shuttingDown.Load() {
|
||||
return nil, infraerrors.ServiceUnavailable("SERVER_SHUTTING_DOWN", "server is shutting down")
|
||||
}
|
||||
|
||||
s.opMu.Lock()
|
||||
if s.backingUp {
|
||||
s.mu.Unlock()
|
||||
s.opMu.Unlock()
|
||||
return nil, ErrBackupInProgress
|
||||
}
|
||||
s.backingUp = true
|
||||
s.mu.Unlock()
|
||||
s.opMu.Unlock()
|
||||
defer func() {
|
||||
s.mu.Lock()
|
||||
s.opMu.Lock()
|
||||
s.backingUp = false
|
||||
s.mu.Unlock()
|
||||
s.opMu.Unlock()
|
||||
}()
|
||||
|
||||
s3Cfg, err := s.loadS3Config(ctx)
|
||||
@@ -405,36 +487,47 @@ func (s *BackupService) CreateBackup(ctx context.Context, triggeredBy string, ex
|
||||
|
||||
// 使用 io.Pipe 将 gzip 压缩数据流式传递给 S3 上传
|
||||
pr, pw := io.Pipe()
|
||||
var gzipErr error
|
||||
gzipDone := make(chan error, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
pw.CloseWithError(fmt.Errorf("gzip goroutine panic: %v", r)) //nolint:errcheck
|
||||
gzipDone <- fmt.Errorf("gzip goroutine panic: %v", r)
|
||||
}
|
||||
}()
|
||||
gzWriter := gzip.NewWriter(pw)
|
||||
_, gzipErr = io.Copy(gzWriter, dumpReader)
|
||||
if closeErr := gzWriter.Close(); closeErr != nil && gzipErr == nil {
|
||||
gzipErr = closeErr
|
||||
var gzErr error
|
||||
_, gzErr = io.Copy(gzWriter, dumpReader)
|
||||
if closeErr := gzWriter.Close(); closeErr != nil && gzErr == nil {
|
||||
gzErr = closeErr
|
||||
}
|
||||
if closeErr := dumpReader.Close(); closeErr != nil && gzipErr == nil {
|
||||
gzipErr = closeErr
|
||||
if closeErr := dumpReader.Close(); closeErr != nil && gzErr == nil {
|
||||
gzErr = closeErr
|
||||
}
|
||||
if gzipErr != nil {
|
||||
_ = pw.CloseWithError(gzipErr)
|
||||
if gzErr != nil {
|
||||
_ = pw.CloseWithError(gzErr)
|
||||
} else {
|
||||
_ = pw.Close()
|
||||
}
|
||||
gzipDone <- gzErr
|
||||
}()
|
||||
|
||||
contentType := "application/gzip"
|
||||
sizeBytes, err := objectStore.Upload(ctx, s3Key, pr, contentType)
|
||||
if err != nil {
|
||||
_ = pr.CloseWithError(err) // 确保 gzip goroutine 不会悬挂
|
||||
gzErr := <-gzipDone // 安全等待 gzip goroutine 完成
|
||||
record.Status = "failed"
|
||||
errMsg := fmt.Sprintf("S3 upload failed: %v", err)
|
||||
if gzipErr != nil {
|
||||
errMsg = fmt.Sprintf("gzip/dump failed: %v", gzipErr)
|
||||
if gzErr != nil {
|
||||
errMsg = fmt.Sprintf("gzip/dump failed: %v", gzErr)
|
||||
}
|
||||
record.ErrorMsg = errMsg
|
||||
record.FinishedAt = time.Now().Format(time.RFC3339)
|
||||
_ = s.saveRecord(ctx, record)
|
||||
return record, fmt.Errorf("backup upload: %w", err)
|
||||
}
|
||||
<-gzipDone // 确保 gzip goroutine 已退出
|
||||
|
||||
record.SizeBytes = sizeBytes
|
||||
record.Status = "completed"
|
||||
@@ -446,19 +539,187 @@ func (s *BackupService) CreateBackup(ctx context.Context, triggeredBy string, ex
|
||||
return record, nil
|
||||
}
|
||||
|
||||
// StartBackup 异步创建备份,立即返回 running 状态的记录
|
||||
func (s *BackupService) StartBackup(ctx context.Context, triggeredBy string, expireDays int) (*BackupRecord, error) {
|
||||
if s.shuttingDown.Load() {
|
||||
return nil, infraerrors.ServiceUnavailable("SERVER_SHUTTING_DOWN", "server is shutting down")
|
||||
}
|
||||
|
||||
s.opMu.Lock()
|
||||
if s.backingUp {
|
||||
s.opMu.Unlock()
|
||||
return nil, ErrBackupInProgress
|
||||
}
|
||||
s.backingUp = true
|
||||
s.opMu.Unlock()
|
||||
|
||||
// 初始化阶段出错时自动重置标志
|
||||
launched := false
|
||||
defer func() {
|
||||
if !launched {
|
||||
s.opMu.Lock()
|
||||
s.backingUp = false
|
||||
s.opMu.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
// 在返回前加载 S3 配置和创建 store,避免 goroutine 中配置被修改
|
||||
s3Cfg, err := s.loadS3Config(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if s3Cfg == nil || !s3Cfg.IsConfigured() {
|
||||
return nil, ErrBackupS3NotConfigured
|
||||
}
|
||||
|
||||
objectStore, err := s.getOrCreateStore(ctx, s3Cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("init object store: %w", err)
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
backupID := uuid.New().String()[:8]
|
||||
fileName := fmt.Sprintf("%s_%s.sql.gz", s.dbCfg.DBName, now.Format("20060102_150405"))
|
||||
s3Key := s.buildS3Key(s3Cfg, fileName)
|
||||
|
||||
var expiresAt string
|
||||
if expireDays > 0 {
|
||||
expiresAt = now.AddDate(0, 0, expireDays).Format(time.RFC3339)
|
||||
}
|
||||
|
||||
record := &BackupRecord{
|
||||
ID: backupID,
|
||||
Status: "running",
|
||||
BackupType: "postgres",
|
||||
FileName: fileName,
|
||||
S3Key: s3Key,
|
||||
TriggeredBy: triggeredBy,
|
||||
StartedAt: now.Format(time.RFC3339),
|
||||
ExpiresAt: expiresAt,
|
||||
Progress: "pending",
|
||||
}
|
||||
|
||||
if err := s.saveRecord(ctx, record); err != nil {
|
||||
return nil, fmt.Errorf("save initial record: %w", err)
|
||||
}
|
||||
|
||||
launched = true
|
||||
// 在启动 goroutine 前完成拷贝,避免数据竞争
|
||||
result := *record
|
||||
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
defer s.wg.Done()
|
||||
defer func() {
|
||||
s.opMu.Lock()
|
||||
s.backingUp = false
|
||||
s.opMu.Unlock()
|
||||
}()
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
logger.LegacyPrintf("service.backup", "[Backup] panic recovered: %v", r)
|
||||
record.Status = "failed"
|
||||
record.ErrorMsg = fmt.Sprintf("internal panic: %v", r)
|
||||
record.Progress = ""
|
||||
record.FinishedAt = time.Now().Format(time.RFC3339)
|
||||
_ = s.saveRecord(context.Background(), record)
|
||||
}
|
||||
}()
|
||||
s.executeBackup(record, objectStore)
|
||||
}()
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// executeBackup 后台执行备份(独立于 HTTP context)
|
||||
func (s *BackupService) executeBackup(record *BackupRecord, objectStore BackupObjectStore) {
|
||||
ctx, cancel := context.WithTimeout(s.bgCtx, 30*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
// 阶段1: pg_dump
|
||||
record.Progress = "dumping"
|
||||
_ = s.saveRecord(ctx, record)
|
||||
|
||||
dumpReader, err := s.dumper.Dump(ctx)
|
||||
if err != nil {
|
||||
record.Status = "failed"
|
||||
record.ErrorMsg = fmt.Sprintf("pg_dump failed: %v", err)
|
||||
record.Progress = ""
|
||||
record.FinishedAt = time.Now().Format(time.RFC3339)
|
||||
_ = s.saveRecord(context.Background(), record)
|
||||
return
|
||||
}
|
||||
|
||||
// 阶段2: gzip + upload
|
||||
record.Progress = "uploading"
|
||||
_ = s.saveRecord(ctx, record)
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
gzipDone := make(chan error, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
pw.CloseWithError(fmt.Errorf("gzip goroutine panic: %v", r)) //nolint:errcheck
|
||||
gzipDone <- fmt.Errorf("gzip goroutine panic: %v", r)
|
||||
}
|
||||
}()
|
||||
gzWriter := gzip.NewWriter(pw)
|
||||
var gzErr error
|
||||
_, gzErr = io.Copy(gzWriter, dumpReader)
|
||||
if closeErr := gzWriter.Close(); closeErr != nil && gzErr == nil {
|
||||
gzErr = closeErr
|
||||
}
|
||||
if closeErr := dumpReader.Close(); closeErr != nil && gzErr == nil {
|
||||
gzErr = closeErr
|
||||
}
|
||||
if gzErr != nil {
|
||||
_ = pw.CloseWithError(gzErr)
|
||||
} else {
|
||||
_ = pw.Close()
|
||||
}
|
||||
gzipDone <- gzErr
|
||||
}()
|
||||
|
||||
contentType := "application/gzip"
|
||||
sizeBytes, err := objectStore.Upload(ctx, record.S3Key, pr, contentType)
|
||||
if err != nil {
|
||||
_ = pr.CloseWithError(err) // 确保 gzip goroutine 不会悬挂
|
||||
gzErr := <-gzipDone // 安全等待 gzip goroutine 完成
|
||||
record.Status = "failed"
|
||||
errMsg := fmt.Sprintf("S3 upload failed: %v", err)
|
||||
if gzErr != nil {
|
||||
errMsg = fmt.Sprintf("gzip/dump failed: %v", gzErr)
|
||||
}
|
||||
record.ErrorMsg = errMsg
|
||||
record.Progress = ""
|
||||
record.FinishedAt = time.Now().Format(time.RFC3339)
|
||||
_ = s.saveRecord(context.Background(), record)
|
||||
return
|
||||
}
|
||||
<-gzipDone // 确保 gzip goroutine 已退出
|
||||
|
||||
record.SizeBytes = sizeBytes
|
||||
record.Status = "completed"
|
||||
record.Progress = ""
|
||||
record.FinishedAt = time.Now().Format(time.RFC3339)
|
||||
if err := s.saveRecord(context.Background(), record); err != nil {
|
||||
logger.LegacyPrintf("service.backup", "[Backup] 保存备份记录失败: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// RestoreBackup 从 S3 下载备份并流式恢复到数据库
|
||||
func (s *BackupService) RestoreBackup(ctx context.Context, backupID string) error {
|
||||
s.mu.Lock()
|
||||
s.opMu.Lock()
|
||||
if s.restoring {
|
||||
s.mu.Unlock()
|
||||
s.opMu.Unlock()
|
||||
return ErrRestoreInProgress
|
||||
}
|
||||
s.restoring = true
|
||||
s.mu.Unlock()
|
||||
s.opMu.Unlock()
|
||||
defer func() {
|
||||
s.mu.Lock()
|
||||
s.opMu.Lock()
|
||||
s.restoring = false
|
||||
s.mu.Unlock()
|
||||
s.opMu.Unlock()
|
||||
}()
|
||||
|
||||
record, err := s.GetBackupRecord(ctx, backupID)
|
||||
@@ -500,6 +761,112 @@ func (s *BackupService) RestoreBackup(ctx context.Context, backupID string) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartRestore 异步恢复备份,立即返回
|
||||
func (s *BackupService) StartRestore(ctx context.Context, backupID string) (*BackupRecord, error) {
|
||||
if s.shuttingDown.Load() {
|
||||
return nil, infraerrors.ServiceUnavailable("SERVER_SHUTTING_DOWN", "server is shutting down")
|
||||
}
|
||||
|
||||
s.opMu.Lock()
|
||||
if s.restoring {
|
||||
s.opMu.Unlock()
|
||||
return nil, ErrRestoreInProgress
|
||||
}
|
||||
s.restoring = true
|
||||
s.opMu.Unlock()
|
||||
|
||||
// 初始化阶段出错时自动重置标志
|
||||
launched := false
|
||||
defer func() {
|
||||
if !launched {
|
||||
s.opMu.Lock()
|
||||
s.restoring = false
|
||||
s.opMu.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
record, err := s.GetBackupRecord(ctx, backupID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if record.Status != "completed" {
|
||||
return nil, infraerrors.BadRequest("BACKUP_NOT_COMPLETED", "can only restore from a completed backup")
|
||||
}
|
||||
|
||||
s3Cfg, err := s.loadS3Config(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objectStore, err := s.getOrCreateStore(ctx, s3Cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("init object store: %w", err)
|
||||
}
|
||||
|
||||
record.RestoreStatus = "running"
|
||||
_ = s.saveRecord(ctx, record)
|
||||
|
||||
launched = true
|
||||
result := *record
|
||||
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
defer s.wg.Done()
|
||||
defer func() {
|
||||
s.opMu.Lock()
|
||||
s.restoring = false
|
||||
s.opMu.Unlock()
|
||||
}()
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
logger.LegacyPrintf("service.backup", "[Backup] restore panic recovered: %v", r)
|
||||
record.RestoreStatus = "failed"
|
||||
record.RestoreError = fmt.Sprintf("internal panic: %v", r)
|
||||
_ = s.saveRecord(context.Background(), record)
|
||||
}
|
||||
}()
|
||||
s.executeRestore(record, objectStore)
|
||||
}()
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// executeRestore 后台执行恢复
|
||||
func (s *BackupService) executeRestore(record *BackupRecord, objectStore BackupObjectStore) {
|
||||
ctx, cancel := context.WithTimeout(s.bgCtx, 30*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
body, err := objectStore.Download(ctx, record.S3Key)
|
||||
if err != nil {
|
||||
record.RestoreStatus = "failed"
|
||||
record.RestoreError = fmt.Sprintf("S3 download failed: %v", err)
|
||||
_ = s.saveRecord(context.Background(), record)
|
||||
return
|
||||
}
|
||||
defer func() { _ = body.Close() }()
|
||||
|
||||
gzReader, err := gzip.NewReader(body)
|
||||
if err != nil {
|
||||
record.RestoreStatus = "failed"
|
||||
record.RestoreError = fmt.Sprintf("gzip reader: %v", err)
|
||||
_ = s.saveRecord(context.Background(), record)
|
||||
return
|
||||
}
|
||||
defer func() { _ = gzReader.Close() }()
|
||||
|
||||
if err := s.dumper.Restore(ctx, gzReader); err != nil {
|
||||
record.RestoreStatus = "failed"
|
||||
record.RestoreError = fmt.Sprintf("pg restore: %v", err)
|
||||
_ = s.saveRecord(context.Background(), record)
|
||||
return
|
||||
}
|
||||
|
||||
record.RestoreStatus = "completed"
|
||||
record.RestoredAt = time.Now().Format(time.RFC3339)
|
||||
if err := s.saveRecord(context.Background(), record); err != nil {
|
||||
logger.LegacyPrintf("service.backup", "[Backup] 保存恢复记录失败: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// ─── 备份记录管理 ───
|
||||
|
||||
func (s *BackupService) ListBackups(ctx context.Context) ([]BackupRecord, error) {
|
||||
@@ -614,8 +981,8 @@ func (s *BackupService) loadS3Config(ctx context.Context) (*BackupS3Config, erro
|
||||
}
|
||||
|
||||
func (s *BackupService) getOrCreateStore(ctx context.Context, cfg *BackupS3Config) (BackupObjectStore, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.storeMu.Lock()
|
||||
defer s.storeMu.Unlock()
|
||||
|
||||
if s.store != nil && s.s3Cfg != nil {
|
||||
return s.store, nil
|
||||
|
||||
@@ -134,6 +134,30 @@ func (m *mockDumper) Restore(_ context.Context, data io.Reader) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// blockingDumper 可控延迟的 dumper,用于测试异步行为
|
||||
type blockingDumper struct {
|
||||
blockCh chan struct{}
|
||||
data []byte
|
||||
restErr error
|
||||
}
|
||||
|
||||
func (d *blockingDumper) Dump(ctx context.Context) (io.ReadCloser, error) {
|
||||
select {
|
||||
case <-d.blockCh:
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
return io.NopCloser(bytes.NewReader(d.data)), nil
|
||||
}
|
||||
|
||||
func (d *blockingDumper) Restore(_ context.Context, data io.Reader) error {
|
||||
if d.restErr != nil {
|
||||
return d.restErr
|
||||
}
|
||||
_, _ = io.ReadAll(data)
|
||||
return nil
|
||||
}
|
||||
|
||||
type mockObjectStore struct {
|
||||
objects map[string][]byte
|
||||
mu sync.Mutex
|
||||
@@ -179,7 +203,7 @@ func (m *mockObjectStore) HeadBucket(_ context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func newTestBackupService(repo *mockSettingRepo, dumper *mockDumper, store *mockObjectStore) *BackupService {
|
||||
func newTestBackupService(repo *mockSettingRepo, dumper DBDumper, store *mockObjectStore) *BackupService {
|
||||
cfg := &config.Config{
|
||||
Database: config.DatabaseConfig{
|
||||
Host: "localhost",
|
||||
@@ -361,9 +385,9 @@ func TestBackupService_CreateBackup_ConcurrentBlocked(t *testing.T) {
|
||||
svc := newTestBackupService(repo, dumper, store)
|
||||
|
||||
// 手动设置 backingUp 标志
|
||||
svc.mu.Lock()
|
||||
svc.opMu.Lock()
|
||||
svc.backingUp = true
|
||||
svc.mu.Unlock()
|
||||
svc.opMu.Unlock()
|
||||
|
||||
_, err := svc.CreateBackup(context.Background(), "manual", 14)
|
||||
require.ErrorIs(t, err, ErrBackupInProgress)
|
||||
@@ -526,3 +550,154 @@ func TestBackupService_LoadS3Config_Corrupted(t *testing.T) {
|
||||
require.Error(t, err)
|
||||
require.Nil(t, cfg)
|
||||
}
|
||||
|
||||
// ─── Async Backup Tests ───
|
||||
|
||||
func TestStartBackup_ReturnsImmediately(t *testing.T) {
|
||||
repo := newMockSettingRepo()
|
||||
seedS3Config(t, repo)
|
||||
|
||||
dumper := &blockingDumper{blockCh: make(chan struct{}), data: []byte("data")}
|
||||
store := newMockObjectStore()
|
||||
svc := newTestBackupService(repo, dumper, store)
|
||||
|
||||
record, err := svc.StartBackup(context.Background(), "manual", 14)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "running", record.Status)
|
||||
require.NotEmpty(t, record.ID)
|
||||
|
||||
// 释放 dumper 让后台完成
|
||||
close(dumper.blockCh)
|
||||
svc.wg.Wait()
|
||||
|
||||
// 验证最终状态
|
||||
final, err := svc.GetBackupRecord(context.Background(), record.ID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "completed", final.Status)
|
||||
require.Greater(t, final.SizeBytes, int64(0))
|
||||
}
|
||||
|
||||
func TestStartBackup_ConcurrentBlocked(t *testing.T) {
|
||||
repo := newMockSettingRepo()
|
||||
seedS3Config(t, repo)
|
||||
|
||||
dumper := &blockingDumper{blockCh: make(chan struct{}), data: []byte("data")}
|
||||
store := newMockObjectStore()
|
||||
svc := newTestBackupService(repo, dumper, store)
|
||||
|
||||
// 第一次启动
|
||||
_, err := svc.StartBackup(context.Background(), "manual", 14)
|
||||
require.NoError(t, err)
|
||||
|
||||
// 第二次应被阻塞
|
||||
_, err = svc.StartBackup(context.Background(), "manual", 14)
|
||||
require.ErrorIs(t, err, ErrBackupInProgress)
|
||||
|
||||
close(dumper.blockCh)
|
||||
svc.wg.Wait()
|
||||
}
|
||||
|
||||
func TestStartBackup_ShuttingDown(t *testing.T) {
|
||||
repo := newMockSettingRepo()
|
||||
seedS3Config(t, repo)
|
||||
svc := newTestBackupService(repo, &mockDumper{dumpData: []byte("data")}, newMockObjectStore())
|
||||
|
||||
svc.shuttingDown.Store(true)
|
||||
|
||||
_, err := svc.StartBackup(context.Background(), "manual", 14)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "shutting down")
|
||||
}
|
||||
|
||||
func TestRecoverStaleRecords(t *testing.T) {
|
||||
repo := newMockSettingRepo()
|
||||
svc := newTestBackupService(repo, &mockDumper{}, newMockObjectStore())
|
||||
|
||||
// 模拟一条孤立的 running 记录
|
||||
_ = svc.saveRecord(context.Background(), &BackupRecord{
|
||||
ID: "stale-1",
|
||||
Status: "running",
|
||||
StartedAt: time.Now().Add(-1 * time.Hour).Format(time.RFC3339),
|
||||
})
|
||||
// 模拟一条孤立的恢复中记录
|
||||
_ = svc.saveRecord(context.Background(), &BackupRecord{
|
||||
ID: "stale-2",
|
||||
Status: "completed",
|
||||
RestoreStatus: "running",
|
||||
StartedAt: time.Now().Add(-1 * time.Hour).Format(time.RFC3339),
|
||||
})
|
||||
|
||||
svc.recoverStaleRecords()
|
||||
|
||||
r1, _ := svc.GetBackupRecord(context.Background(), "stale-1")
|
||||
require.Equal(t, "failed", r1.Status)
|
||||
require.Contains(t, r1.ErrorMsg, "server restart")
|
||||
|
||||
r2, _ := svc.GetBackupRecord(context.Background(), "stale-2")
|
||||
require.Equal(t, "failed", r2.RestoreStatus)
|
||||
require.Contains(t, r2.RestoreError, "server restart")
|
||||
}
|
||||
|
||||
func TestGracefulShutdown(t *testing.T) {
|
||||
repo := newMockSettingRepo()
|
||||
seedS3Config(t, repo)
|
||||
|
||||
dumper := &blockingDumper{blockCh: make(chan struct{}), data: []byte("data")}
|
||||
store := newMockObjectStore()
|
||||
svc := newTestBackupService(repo, dumper, store)
|
||||
|
||||
_, err := svc.StartBackup(context.Background(), "manual", 14)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Stop 应该等待备份完成
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
svc.Stop()
|
||||
close(done)
|
||||
}()
|
||||
|
||||
// 短暂等待确认 Stop 还在等待
|
||||
select {
|
||||
case <-done:
|
||||
t.Fatal("Stop returned before backup finished")
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
// 预期:Stop 还在等待
|
||||
}
|
||||
|
||||
// 释放备份
|
||||
close(dumper.blockCh)
|
||||
|
||||
// 现在 Stop 应该完成
|
||||
select {
|
||||
case <-done:
|
||||
// 预期
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatal("Stop did not return after backup finished")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStartRestore_Async(t *testing.T) {
|
||||
repo := newMockSettingRepo()
|
||||
seedS3Config(t, repo)
|
||||
|
||||
dumpContent := "-- PostgreSQL dump\nCREATE TABLE test (id int);\n"
|
||||
dumper := &mockDumper{dumpData: []byte(dumpContent)}
|
||||
store := newMockObjectStore()
|
||||
svc := newTestBackupService(repo, dumper, store)
|
||||
|
||||
// 先创建一个备份(同步方式)
|
||||
record, err := svc.CreateBackup(context.Background(), "manual", 14)
|
||||
require.NoError(t, err)
|
||||
|
||||
// 异步恢复
|
||||
restored, err := svc.StartRestore(context.Background(), record.ID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "running", restored.RestoreStatus)
|
||||
|
||||
svc.wg.Wait()
|
||||
|
||||
// 验证最终状态
|
||||
final, err := svc.GetBackupRecord(context.Background(), record.ID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "completed", final.RestoreStatus)
|
||||
}
|
||||
|
||||
@@ -21,9 +21,6 @@ var (
|
||||
// 带捕获组的版本提取正则
|
||||
claudeCodeUAVersionPattern = regexp.MustCompile(`(?i)^claude-cli/(\d+\.\d+\.\d+)`)
|
||||
|
||||
// metadata.user_id 格式: user_{64位hex}_account__session_{uuid}
|
||||
userIDPattern = regexp.MustCompile(`^user_[a-fA-F0-9]{64}_account__session_[\w-]+$`)
|
||||
|
||||
// System prompt 相似度阈值(默认 0.5,和 claude-relay-service 一致)
|
||||
systemPromptThreshold = 0.5
|
||||
)
|
||||
@@ -124,7 +121,7 @@ func (v *ClaudeCodeValidator) Validate(r *http.Request, body map[string]any) boo
|
||||
return false
|
||||
}
|
||||
|
||||
if !userIDPattern.MatchString(userID) {
|
||||
if ParseMetadataUserID(userID) == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -278,11 +275,7 @@ func SetClaudeCodeClient(ctx context.Context, isClaudeCode bool) context.Context
|
||||
// ExtractVersion 从 User-Agent 中提取 Claude Code 版本号
|
||||
// 返回 "2.1.22" 形式的版本号,如果不匹配返回空字符串
|
||||
func (v *ClaudeCodeValidator) ExtractVersion(ua string) string {
|
||||
matches := claudeCodeUAVersionPattern.FindStringSubmatch(ua)
|
||||
if len(matches) >= 2 {
|
||||
return matches[1]
|
||||
}
|
||||
return ""
|
||||
return ExtractCLIVersion(ua)
|
||||
}
|
||||
|
||||
// SetClaudeCodeVersion 将 Claude Code 版本号设置到 context 中
|
||||
|
||||
@@ -148,6 +148,15 @@ func (s *DashboardService) GetGroupStatsWithFilters(ctx context.Context, startTi
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
// GetGroupUsageSummary returns today's and cumulative cost for all groups.
|
||||
func (s *DashboardService) GetGroupUsageSummary(ctx context.Context, todayStart time.Time) ([]usagestats.GroupUsageSummary, error) {
|
||||
results, err := s.usageRepo.GetAllGroupUsageSummary(ctx, todayStart)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get group usage summary: %w", err)
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func (s *DashboardService) getCachedDashboardStats(ctx context.Context) (*usagestats.DashboardStats, bool, error) {
|
||||
data, err := s.cache.GetDashboardStats(ctx)
|
||||
if err != nil {
|
||||
@@ -335,6 +344,14 @@ func (s *DashboardService) GetUserSpendingRanking(ctx context.Context, startTime
|
||||
return ranking, nil
|
||||
}
|
||||
|
||||
func (s *DashboardService) GetUserBreakdownStats(ctx context.Context, startTime, endTime time.Time, dim usagestats.UserBreakdownDimension, limit int) ([]usagestats.UserBreakdownItem, error) {
|
||||
stats, err := s.usageRepo.GetUserBreakdownStats(ctx, startTime, endTime, dim, limit)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get user breakdown stats: %w", err)
|
||||
}
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func (s *DashboardService) GetBatchUserUsageStats(ctx context.Context, userIDs []int64, startTime, endTime time.Time) (map[int64]*usagestats.BatchUserUsageStats, error) {
|
||||
stats, err := s.usageRepo.GetBatchUserUsageStats(ctx, userIDs, startTime, endTime)
|
||||
if err != nil {
|
||||
|
||||
@@ -278,8 +278,8 @@ func (m *mockGroupRepoForGateway) ListActiveByPlatform(ctx context.Context, plat
|
||||
func (m *mockGroupRepoForGateway) ExistsByName(ctx context.Context, name string) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
func (m *mockGroupRepoForGateway) GetAccountCount(ctx context.Context, groupID int64) (int64, error) {
|
||||
return 0, nil
|
||||
func (m *mockGroupRepoForGateway) GetAccountCount(ctx context.Context, groupID int64) (int64, int64, error) {
|
||||
return 0, 0, nil
|
||||
}
|
||||
func (m *mockGroupRepoForGateway) DeleteAccountGroupsByGroupID(ctx context.Context, groupID int64) (int64, error) {
|
||||
return 0, nil
|
||||
|
||||
@@ -326,7 +326,6 @@ func isClaudeCodeCredentialScopeError(msg string) bool {
|
||||
// Some upstream APIs return non-standard "data:" without space (should be "data: ").
|
||||
var (
|
||||
sseDataRe = regexp.MustCompile(`^data:\s*`)
|
||||
sessionIDRegex = regexp.MustCompile(`session_([a-f0-9-]{36})`)
|
||||
claudeCliUserAgentRe = regexp.MustCompile(`^claude-cli/\d+\.\d+\.\d+`)
|
||||
|
||||
// claudeCodePromptPrefixes 用于检测 Claude Code 系统提示词的前缀列表
|
||||
@@ -644,8 +643,8 @@ func (s *GatewayService) GenerateSessionHash(parsed *ParsedRequest) string {
|
||||
|
||||
// 1. 最高优先级:从 metadata.user_id 提取 session_xxx
|
||||
if parsed.MetadataUserID != "" {
|
||||
if match := sessionIDRegex.FindStringSubmatch(parsed.MetadataUserID); len(match) > 1 {
|
||||
return match[1]
|
||||
if uid := ParseMetadataUserID(parsed.MetadataUserID); uid != nil && uid.SessionID != "" {
|
||||
return uid.SessionID
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1026,13 +1025,13 @@ func (s *GatewayService) buildOAuthMetadataUserID(parsed *ParsedRequest, account
|
||||
sessionID = generateSessionUUID(seed)
|
||||
}
|
||||
|
||||
// Prefer the newer format that includes account_uuid (if present),
|
||||
// otherwise fall back to the legacy Claude Code format.
|
||||
accountUUID := strings.TrimSpace(account.GetExtraString("account_uuid"))
|
||||
if accountUUID != "" {
|
||||
return fmt.Sprintf("user_%s_account_%s_session_%s", userID, accountUUID, sessionID)
|
||||
// 根据指纹 UA 版本选择输出格式
|
||||
var uaVersion string
|
||||
if fp != nil {
|
||||
uaVersion = ExtractCLIVersion(fp.UserAgent)
|
||||
}
|
||||
return fmt.Sprintf("user_%s_account__session_%s", userID, sessionID)
|
||||
accountUUID := strings.TrimSpace(account.GetExtraString("account_uuid"))
|
||||
return FormatMetadataUserID(userID, accountUUID, sessionID, uaVersion)
|
||||
}
|
||||
|
||||
// GenerateSessionUUID creates a deterministic UUID4 from a seed string.
|
||||
@@ -5533,7 +5532,7 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex
|
||||
// 如果启用了会话ID伪装,会在重写后替换 session 部分为固定值
|
||||
accountUUID := account.GetExtraString("account_uuid")
|
||||
if accountUUID != "" && fp.ClientID != "" {
|
||||
if newBody, err := s.identityService.RewriteUserIDWithMasking(ctx, body, account, accountUUID, fp.ClientID); err == nil && len(newBody) > 0 {
|
||||
if newBody, err := s.identityService.RewriteUserIDWithMasking(ctx, body, account, accountUUID, fp.ClientID, fp.UserAgent); err == nil && len(newBody) > 0 {
|
||||
body = newBody
|
||||
}
|
||||
}
|
||||
@@ -8161,7 +8160,7 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con
|
||||
if err == nil {
|
||||
accountUUID := account.GetExtraString("account_uuid")
|
||||
if accountUUID != "" && fp.ClientID != "" {
|
||||
if newBody, err := s.identityService.RewriteUserIDWithMasking(ctx, body, account, accountUUID, fp.ClientID); err == nil && len(newBody) > 0 {
|
||||
if newBody, err := s.identityService.RewriteUserIDWithMasking(ctx, body, account, accountUUID, fp.ClientID, fp.UserAgent); err == nil && len(newBody) > 0 {
|
||||
body = newBody
|
||||
}
|
||||
}
|
||||
|
||||
@@ -230,8 +230,8 @@ func (m *mockGroupRepoForGemini) ListActiveByPlatform(ctx context.Context, platf
|
||||
func (m *mockGroupRepoForGemini) ExistsByName(ctx context.Context, name string) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
func (m *mockGroupRepoForGemini) GetAccountCount(ctx context.Context, groupID int64) (int64, error) {
|
||||
return 0, nil
|
||||
func (m *mockGroupRepoForGemini) GetAccountCount(ctx context.Context, groupID int64) (int64, int64, error) {
|
||||
return 0, 0, nil
|
||||
}
|
||||
func (m *mockGroupRepoForGemini) DeleteAccountGroupsByGroupID(ctx context.Context, groupID int64) (int64, error) {
|
||||
return 0, nil
|
||||
|
||||
@@ -24,7 +24,7 @@ func TestGenerateSessionHash_MetadataHasHighestPriority(t *testing.T) {
|
||||
svc := &GatewayService{}
|
||||
|
||||
parsed := &ParsedRequest{
|
||||
MetadataUserID: "session_123e4567-e89b-12d3-a456-426614174000",
|
||||
MetadataUserID: "user_a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2_account__session_123e4567-e89b-12d3-a456-426614174000",
|
||||
System: "You are a helpful assistant.",
|
||||
HasSystem: true,
|
||||
Messages: []any{
|
||||
@@ -196,7 +196,7 @@ func TestGenerateSessionHash_MetadataOverridesSessionContext(t *testing.T) {
|
||||
svc := &GatewayService{}
|
||||
|
||||
parsed := &ParsedRequest{
|
||||
MetadataUserID: "session_123e4567-e89b-12d3-a456-426614174000",
|
||||
MetadataUserID: "user_a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2_account__session_123e4567-e89b-12d3-a456-426614174000",
|
||||
Messages: []any{
|
||||
map[string]any{"role": "user", "content": "hello"},
|
||||
},
|
||||
@@ -212,6 +212,22 @@ func TestGenerateSessionHash_MetadataOverridesSessionContext(t *testing.T) {
|
||||
"metadata session_id should take priority over SessionContext")
|
||||
}
|
||||
|
||||
func TestGenerateSessionHash_MetadataJSON_HasHighestPriority(t *testing.T) {
|
||||
svc := &GatewayService{}
|
||||
|
||||
parsed := &ParsedRequest{
|
||||
MetadataUserID: `{"device_id":"a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2","account_uuid":"","session_id":"c72554f2-1234-5678-abcd-123456789abc"}`,
|
||||
System: "You are a helpful assistant.",
|
||||
HasSystem: true,
|
||||
Messages: []any{
|
||||
map[string]any{"role": "user", "content": "hello"},
|
||||
},
|
||||
}
|
||||
|
||||
hash := svc.GenerateSessionHash(parsed)
|
||||
require.Equal(t, "c72554f2-1234-5678-abcd-123456789abc", hash, "JSON format metadata session_id should have highest priority")
|
||||
}
|
||||
|
||||
func TestGenerateSessionHash_NilSessionContextBackwardCompatible(t *testing.T) {
|
||||
svc := &GatewayService{}
|
||||
|
||||
|
||||
@@ -64,8 +64,10 @@ type Group struct {
|
||||
CreatedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
|
||||
AccountGroups []AccountGroup
|
||||
AccountCount int64
|
||||
AccountGroups []AccountGroup
|
||||
AccountCount int64
|
||||
ActiveAccountCount int64
|
||||
RateLimitedAccountCount int64
|
||||
}
|
||||
|
||||
func (g *Group) IsActive() bool {
|
||||
|
||||
131
backend/internal/service/group_capacity_service.go
Normal file
131
backend/internal/service/group_capacity_service.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
// GroupCapacitySummary holds aggregated capacity for a single group.
|
||||
type GroupCapacitySummary struct {
|
||||
GroupID int64 `json:"group_id"`
|
||||
ConcurrencyUsed int `json:"concurrency_used"`
|
||||
ConcurrencyMax int `json:"concurrency_max"`
|
||||
SessionsUsed int `json:"sessions_used"`
|
||||
SessionsMax int `json:"sessions_max"`
|
||||
RPMUsed int `json:"rpm_used"`
|
||||
RPMMax int `json:"rpm_max"`
|
||||
}
|
||||
|
||||
// GroupCapacityService aggregates per-group capacity from runtime data.
|
||||
type GroupCapacityService struct {
|
||||
accountRepo AccountRepository
|
||||
groupRepo GroupRepository
|
||||
concurrencyService *ConcurrencyService
|
||||
sessionLimitCache SessionLimitCache
|
||||
rpmCache RPMCache
|
||||
}
|
||||
|
||||
// NewGroupCapacityService creates a new GroupCapacityService.
|
||||
func NewGroupCapacityService(
|
||||
accountRepo AccountRepository,
|
||||
groupRepo GroupRepository,
|
||||
concurrencyService *ConcurrencyService,
|
||||
sessionLimitCache SessionLimitCache,
|
||||
rpmCache RPMCache,
|
||||
) *GroupCapacityService {
|
||||
return &GroupCapacityService{
|
||||
accountRepo: accountRepo,
|
||||
groupRepo: groupRepo,
|
||||
concurrencyService: concurrencyService,
|
||||
sessionLimitCache: sessionLimitCache,
|
||||
rpmCache: rpmCache,
|
||||
}
|
||||
}
|
||||
|
||||
// GetAllGroupCapacity returns capacity summary for all active groups.
|
||||
func (s *GroupCapacityService) GetAllGroupCapacity(ctx context.Context) ([]GroupCapacitySummary, error) {
|
||||
groups, err := s.groupRepo.ListActive(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
results := make([]GroupCapacitySummary, 0, len(groups))
|
||||
for i := range groups {
|
||||
cap, err := s.getGroupCapacity(ctx, groups[i].ID)
|
||||
if err != nil {
|
||||
// Skip groups with errors, return partial results
|
||||
continue
|
||||
}
|
||||
cap.GroupID = groups[i].ID
|
||||
results = append(results, cap)
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func (s *GroupCapacityService) getGroupCapacity(ctx context.Context, groupID int64) (GroupCapacitySummary, error) {
|
||||
accounts, err := s.accountRepo.ListSchedulableByGroupID(ctx, groupID)
|
||||
if err != nil {
|
||||
return GroupCapacitySummary{}, err
|
||||
}
|
||||
if len(accounts) == 0 {
|
||||
return GroupCapacitySummary{}, nil
|
||||
}
|
||||
|
||||
// Collect account IDs and config values
|
||||
accountIDs := make([]int64, 0, len(accounts))
|
||||
sessionTimeouts := make(map[int64]time.Duration)
|
||||
var concurrencyMax, sessionsMax, rpmMax int
|
||||
|
||||
for i := range accounts {
|
||||
acc := &accounts[i]
|
||||
accountIDs = append(accountIDs, acc.ID)
|
||||
concurrencyMax += acc.Concurrency
|
||||
|
||||
if ms := acc.GetMaxSessions(); ms > 0 {
|
||||
sessionsMax += ms
|
||||
timeout := time.Duration(acc.GetSessionIdleTimeoutMinutes()) * time.Minute
|
||||
if timeout <= 0 {
|
||||
timeout = 5 * time.Minute
|
||||
}
|
||||
sessionTimeouts[acc.ID] = timeout
|
||||
}
|
||||
|
||||
if rpm := acc.GetBaseRPM(); rpm > 0 {
|
||||
rpmMax += rpm
|
||||
}
|
||||
}
|
||||
|
||||
// Batch query runtime data from Redis
|
||||
concurrencyMap, _ := s.concurrencyService.GetAccountConcurrencyBatch(ctx, accountIDs)
|
||||
|
||||
var sessionsMap map[int64]int
|
||||
if sessionsMax > 0 && s.sessionLimitCache != nil {
|
||||
sessionsMap, _ = s.sessionLimitCache.GetActiveSessionCountBatch(ctx, accountIDs, sessionTimeouts)
|
||||
}
|
||||
|
||||
var rpmMap map[int64]int
|
||||
if rpmMax > 0 && s.rpmCache != nil {
|
||||
rpmMap, _ = s.rpmCache.GetRPMBatch(ctx, accountIDs)
|
||||
}
|
||||
|
||||
// Aggregate
|
||||
var concurrencyUsed, sessionsUsed, rpmUsed int
|
||||
for _, id := range accountIDs {
|
||||
concurrencyUsed += concurrencyMap[id]
|
||||
if sessionsMap != nil {
|
||||
sessionsUsed += sessionsMap[id]
|
||||
}
|
||||
if rpmMap != nil {
|
||||
rpmUsed += rpmMap[id]
|
||||
}
|
||||
}
|
||||
|
||||
return GroupCapacitySummary{
|
||||
ConcurrencyUsed: concurrencyUsed,
|
||||
ConcurrencyMax: concurrencyMax,
|
||||
SessionsUsed: sessionsUsed,
|
||||
SessionsMax: sessionsMax,
|
||||
RPMUsed: rpmUsed,
|
||||
RPMMax: rpmMax,
|
||||
}, nil
|
||||
}
|
||||
@@ -27,7 +27,7 @@ type GroupRepository interface {
|
||||
ListActiveByPlatform(ctx context.Context, platform string) ([]Group, error)
|
||||
|
||||
ExistsByName(ctx context.Context, name string) (bool, error)
|
||||
GetAccountCount(ctx context.Context, groupID int64) (int64, error)
|
||||
GetAccountCount(ctx context.Context, groupID int64) (total int64, active int64, err error)
|
||||
DeleteAccountGroupsByGroupID(ctx context.Context, groupID int64) (int64, error)
|
||||
// GetAccountIDsByGroupIDs 获取多个分组的所有账号 ID(去重)
|
||||
GetAccountIDsByGroupIDs(ctx context.Context, groupIDs []int64) ([]int64, error)
|
||||
@@ -202,7 +202,7 @@ func (s *GroupService) GetStats(ctx context.Context, id int64) (map[string]any,
|
||||
}
|
||||
|
||||
// 获取账号数量
|
||||
accountCount, err := s.groupRepo.GetAccountCount(ctx, id)
|
||||
accountCount, _, err := s.groupRepo.GetAccountCount(ctx, id)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get account count: %w", err)
|
||||
}
|
||||
|
||||
@@ -19,10 +19,6 @@ import (
|
||||
|
||||
// 预编译正则表达式(避免每次调用重新编译)
|
||||
var (
|
||||
// 匹配 user_id 格式:
|
||||
// 旧格式: user_{64位hex}_account__session_{uuid} (account 后无 UUID)
|
||||
// 新格式: user_{64位hex}_account_{uuid}_session_{uuid} (account 后有 UUID)
|
||||
userIDRegex = regexp.MustCompile(`^user_[a-f0-9]{64}_account_([a-f0-9-]*)_session_([a-f0-9-]{36})$`)
|
||||
// 匹配 User-Agent 版本号: xxx/x.y.z
|
||||
userAgentVersionRegex = regexp.MustCompile(`/(\d+)\.(\d+)\.(\d+)`)
|
||||
)
|
||||
@@ -209,12 +205,12 @@ func (s *IdentityService) ApplyFingerprint(req *http.Request, fp *Fingerprint) {
|
||||
}
|
||||
|
||||
// RewriteUserID 重写body中的metadata.user_id
|
||||
// 输入格式:user_{clientId}_account__session_{sessionUUID}
|
||||
// 输出格式:user_{cachedClientID}_account_{accountUUID}_session_{newHash}
|
||||
// 支持旧拼接格式和新 JSON 格式的 user_id 解析,
|
||||
// 根据 fingerprintUA 版本选择输出格式。
|
||||
//
|
||||
// 重要:此函数使用 json.RawMessage 保留其他字段的原始字节,
|
||||
// 避免重新序列化导致 thinking 块等内容被修改。
|
||||
func (s *IdentityService) RewriteUserID(body []byte, accountID int64, accountUUID, cachedClientID string) ([]byte, error) {
|
||||
func (s *IdentityService) RewriteUserID(body []byte, accountID int64, accountUUID, cachedClientID, fingerprintUA string) ([]byte, error) {
|
||||
if len(body) == 0 || accountUUID == "" || cachedClientID == "" {
|
||||
return body, nil
|
||||
}
|
||||
@@ -241,24 +237,21 @@ func (s *IdentityService) RewriteUserID(body []byte, accountID int64, accountUUI
|
||||
return body, nil
|
||||
}
|
||||
|
||||
// 匹配格式:
|
||||
// 旧格式: user_{64位hex}_account__session_{uuid}
|
||||
// 新格式: user_{64位hex}_account_{uuid}_session_{uuid}
|
||||
matches := userIDRegex.FindStringSubmatch(userID)
|
||||
if matches == nil {
|
||||
// 解析 user_id(兼容旧拼接格式和新 JSON 格式)
|
||||
parsed := ParseMetadataUserID(userID)
|
||||
if parsed == nil {
|
||||
return body, nil
|
||||
}
|
||||
|
||||
// matches[1] = account UUID (可能为空), matches[2] = session UUID
|
||||
sessionTail := matches[2] // 原始session UUID
|
||||
sessionTail := parsed.SessionID // 原始session UUID
|
||||
|
||||
// 生成新的session hash: SHA256(accountID::sessionTail) -> UUID格式
|
||||
seed := fmt.Sprintf("%d::%s", accountID, sessionTail)
|
||||
newSessionHash := generateUUIDFromSeed(seed)
|
||||
|
||||
// 构建新的user_id
|
||||
// 格式: user_{cachedClientID}_account_{account_uuid}_session_{newSessionHash}
|
||||
newUserID := fmt.Sprintf("user_%s_account_%s_session_%s", cachedClientID, accountUUID, newSessionHash)
|
||||
// 根据客户端版本选择输出格式
|
||||
version := ExtractCLIVersion(fingerprintUA)
|
||||
newUserID := FormatMetadataUserID(cachedClientID, accountUUID, newSessionHash, version)
|
||||
|
||||
metadata["user_id"] = newUserID
|
||||
|
||||
@@ -278,9 +271,9 @@ func (s *IdentityService) RewriteUserID(body []byte, accountID int64, accountUUI
|
||||
//
|
||||
// 重要:此函数使用 json.RawMessage 保留其他字段的原始字节,
|
||||
// 避免重新序列化导致 thinking 块等内容被修改。
|
||||
func (s *IdentityService) RewriteUserIDWithMasking(ctx context.Context, body []byte, account *Account, accountUUID, cachedClientID string) ([]byte, error) {
|
||||
func (s *IdentityService) RewriteUserIDWithMasking(ctx context.Context, body []byte, account *Account, accountUUID, cachedClientID, fingerprintUA string) ([]byte, error) {
|
||||
// 先执行常规的 RewriteUserID 逻辑
|
||||
newBody, err := s.RewriteUserID(body, account.ID, accountUUID, cachedClientID)
|
||||
newBody, err := s.RewriteUserID(body, account.ID, accountUUID, cachedClientID, fingerprintUA)
|
||||
if err != nil {
|
||||
return newBody, err
|
||||
}
|
||||
@@ -312,10 +305,9 @@ func (s *IdentityService) RewriteUserIDWithMasking(ctx context.Context, body []b
|
||||
return newBody, nil
|
||||
}
|
||||
|
||||
// 查找 _session_ 的位置,替换其后的内容
|
||||
const sessionMarker = "_session_"
|
||||
idx := strings.LastIndex(userID, sessionMarker)
|
||||
if idx == -1 {
|
||||
// 解析已重写的 user_id
|
||||
uidParsed := ParseMetadataUserID(userID)
|
||||
if uidParsed == nil {
|
||||
return newBody, nil
|
||||
}
|
||||
|
||||
@@ -337,8 +329,9 @@ func (s *IdentityService) RewriteUserIDWithMasking(ctx context.Context, body []b
|
||||
logger.LegacyPrintf("service.identity", "Warning: failed to set masked session ID for account %d: %v", account.ID, err)
|
||||
}
|
||||
|
||||
// 替换 session 部分:保留 _session_ 之前的内容,替换之后的内容
|
||||
newUserID := userID[:idx+len(sessionMarker)] + maskedSessionID
|
||||
// 用 FormatMetadataUserID 重建(保持与 RewriteUserID 相同的格式)
|
||||
version := ExtractCLIVersion(fingerprintUA)
|
||||
newUserID := FormatMetadataUserID(uidParsed.DeviceID, uidParsed.AccountUUID, maskedSessionID, version)
|
||||
|
||||
slog.Debug("session_id_masking_applied",
|
||||
"account_id", account.ID,
|
||||
|
||||
104
backend/internal/service/metadata_userid.go
Normal file
104
backend/internal/service/metadata_userid.go
Normal file
@@ -0,0 +1,104 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// NewMetadataFormatMinVersion is the minimum Claude Code version that uses
|
||||
// JSON-formatted metadata.user_id instead of the legacy concatenated string.
|
||||
const NewMetadataFormatMinVersion = "2.1.78"
|
||||
|
||||
// ParsedUserID represents the components extracted from a metadata.user_id value.
|
||||
type ParsedUserID struct {
|
||||
DeviceID string // 64-char hex (or arbitrary client id)
|
||||
AccountUUID string // may be empty
|
||||
SessionID string // UUID
|
||||
IsNewFormat bool // true if the original was JSON format
|
||||
}
|
||||
|
||||
// legacyUserIDRegex matches the legacy user_id format:
|
||||
//
|
||||
// user_{64hex}_account_{optional_uuid}_session_{uuid}
|
||||
var legacyUserIDRegex = regexp.MustCompile(`^user_([a-fA-F0-9]{64})_account_([a-fA-F0-9-]*)_session_([a-fA-F0-9-]{36})$`)
|
||||
|
||||
// jsonUserID is the JSON structure for the new metadata.user_id format.
|
||||
type jsonUserID struct {
|
||||
DeviceID string `json:"device_id"`
|
||||
AccountUUID string `json:"account_uuid"`
|
||||
SessionID string `json:"session_id"`
|
||||
}
|
||||
|
||||
// ParseMetadataUserID parses a metadata.user_id string in either format.
|
||||
// Returns nil if the input cannot be parsed.
|
||||
func ParseMetadataUserID(raw string) *ParsedUserID {
|
||||
raw = strings.TrimSpace(raw)
|
||||
if raw == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Try JSON format first (starts with '{')
|
||||
if raw[0] == '{' {
|
||||
var j jsonUserID
|
||||
if err := json.Unmarshal([]byte(raw), &j); err != nil {
|
||||
return nil
|
||||
}
|
||||
if j.DeviceID == "" || j.SessionID == "" {
|
||||
return nil
|
||||
}
|
||||
return &ParsedUserID{
|
||||
DeviceID: j.DeviceID,
|
||||
AccountUUID: j.AccountUUID,
|
||||
SessionID: j.SessionID,
|
||||
IsNewFormat: true,
|
||||
}
|
||||
}
|
||||
|
||||
// Try legacy format
|
||||
matches := legacyUserIDRegex.FindStringSubmatch(raw)
|
||||
if matches == nil {
|
||||
return nil
|
||||
}
|
||||
return &ParsedUserID{
|
||||
DeviceID: matches[1],
|
||||
AccountUUID: matches[2],
|
||||
SessionID: matches[3],
|
||||
IsNewFormat: false,
|
||||
}
|
||||
}
|
||||
|
||||
// FormatMetadataUserID builds a metadata.user_id string in the format
|
||||
// appropriate for the given CLI version. Components are the rewritten values
|
||||
// (not necessarily the originals).
|
||||
func FormatMetadataUserID(deviceID, accountUUID, sessionID, uaVersion string) string {
|
||||
if IsNewMetadataFormatVersion(uaVersion) {
|
||||
b, _ := json.Marshal(jsonUserID{
|
||||
DeviceID: deviceID,
|
||||
AccountUUID: accountUUID,
|
||||
SessionID: sessionID,
|
||||
})
|
||||
return string(b)
|
||||
}
|
||||
// Legacy format
|
||||
return "user_" + deviceID + "_account_" + accountUUID + "_session_" + sessionID
|
||||
}
|
||||
|
||||
// IsNewMetadataFormatVersion returns true if the given CLI version uses the
|
||||
// new JSON metadata.user_id format (>= 2.1.78).
|
||||
func IsNewMetadataFormatVersion(version string) bool {
|
||||
if version == "" {
|
||||
return false
|
||||
}
|
||||
return CompareVersions(version, NewMetadataFormatMinVersion) >= 0
|
||||
}
|
||||
|
||||
// ExtractCLIVersion extracts the Claude Code version from a User-Agent string.
|
||||
// Returns "" if the UA doesn't match the expected pattern.
|
||||
func ExtractCLIVersion(ua string) string {
|
||||
matches := claudeCodeUAVersionPattern.FindStringSubmatch(ua)
|
||||
if len(matches) >= 2 {
|
||||
return matches[1]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
183
backend/internal/service/metadata_userid_test.go
Normal file
183
backend/internal/service/metadata_userid_test.go
Normal file
@@ -0,0 +1,183 @@
|
||||
//go:build unit
|
||||
|
||||
package service
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// ============ ParseMetadataUserID Tests ============
|
||||
|
||||
func TestParseMetadataUserID_LegacyFormat_WithoutAccountUUID(t *testing.T) {
|
||||
raw := "user_a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2_account__session_123e4567-e89b-12d3-a456-426614174000"
|
||||
parsed := ParseMetadataUserID(raw)
|
||||
require.NotNil(t, parsed)
|
||||
require.Equal(t, "a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2", parsed.DeviceID)
|
||||
require.Equal(t, "", parsed.AccountUUID)
|
||||
require.Equal(t, "123e4567-e89b-12d3-a456-426614174000", parsed.SessionID)
|
||||
require.False(t, parsed.IsNewFormat)
|
||||
}
|
||||
|
||||
func TestParseMetadataUserID_LegacyFormat_WithAccountUUID(t *testing.T) {
|
||||
raw := "user_a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2_account_550e8400-e29b-41d4-a716-446655440000_session_123e4567-e89b-12d3-a456-426614174000"
|
||||
parsed := ParseMetadataUserID(raw)
|
||||
require.NotNil(t, parsed)
|
||||
require.Equal(t, "a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2", parsed.DeviceID)
|
||||
require.Equal(t, "550e8400-e29b-41d4-a716-446655440000", parsed.AccountUUID)
|
||||
require.Equal(t, "123e4567-e89b-12d3-a456-426614174000", parsed.SessionID)
|
||||
require.False(t, parsed.IsNewFormat)
|
||||
}
|
||||
|
||||
func TestParseMetadataUserID_JSONFormat_WithoutAccountUUID(t *testing.T) {
|
||||
raw := `{"device_id":"d61f76d0aabbccdd00112233445566778899aabbccddeeff0011223344556677","account_uuid":"","session_id":"c72554f2-1234-5678-abcd-123456789abc"}`
|
||||
parsed := ParseMetadataUserID(raw)
|
||||
require.NotNil(t, parsed)
|
||||
require.Equal(t, "d61f76d0aabbccdd00112233445566778899aabbccddeeff0011223344556677", parsed.DeviceID)
|
||||
require.Equal(t, "", parsed.AccountUUID)
|
||||
require.Equal(t, "c72554f2-1234-5678-abcd-123456789abc", parsed.SessionID)
|
||||
require.True(t, parsed.IsNewFormat)
|
||||
}
|
||||
|
||||
func TestParseMetadataUserID_JSONFormat_WithAccountUUID(t *testing.T) {
|
||||
raw := `{"device_id":"d61f76d0aabbccdd00112233445566778899aabbccddeeff0011223344556677","account_uuid":"550e8400-e29b-41d4-a716-446655440000","session_id":"c72554f2-1234-5678-abcd-123456789abc"}`
|
||||
parsed := ParseMetadataUserID(raw)
|
||||
require.NotNil(t, parsed)
|
||||
require.Equal(t, "d61f76d0aabbccdd00112233445566778899aabbccddeeff0011223344556677", parsed.DeviceID)
|
||||
require.Equal(t, "550e8400-e29b-41d4-a716-446655440000", parsed.AccountUUID)
|
||||
require.Equal(t, "c72554f2-1234-5678-abcd-123456789abc", parsed.SessionID)
|
||||
require.True(t, parsed.IsNewFormat)
|
||||
}
|
||||
|
||||
func TestParseMetadataUserID_InvalidInputs(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
raw string
|
||||
}{
|
||||
{"empty string", ""},
|
||||
{"whitespace only", " "},
|
||||
{"random text", "not-a-valid-user-id"},
|
||||
{"partial legacy format", "session_123e4567-e89b-12d3-a456-426614174000"},
|
||||
{"invalid JSON", `{"device_id":}`},
|
||||
{"JSON missing device_id", `{"account_uuid":"","session_id":"c72554f2-1234-5678-abcd-123456789abc"}`},
|
||||
{"JSON missing session_id", `{"device_id":"d61f76d0aabbccdd00112233445566778899aabbccddeeff0011223344556677","account_uuid":""}`},
|
||||
{"JSON empty device_id", `{"device_id":"","account_uuid":"","session_id":"c72554f2-1234-5678-abcd-123456789abc"}`},
|
||||
{"JSON empty session_id", `{"device_id":"d61f76d0aabbccdd00112233445566778899aabbccddeeff0011223344556677","account_uuid":"","session_id":""}`},
|
||||
{"legacy format short hex", "user_a1b2c3d4_account__session_123e4567-e89b-12d3-a456-426614174000"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
require.Nil(t, ParseMetadataUserID(tt.raw), "should return nil for: %s", tt.raw)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseMetadataUserID_HexCaseInsensitive(t *testing.T) {
|
||||
// Legacy format should accept both upper and lower case hex
|
||||
rawUpper := "user_A1B2C3D4E5F6A1B2C3D4E5F6A1B2C3D4E5F6A1B2C3D4E5F6A1B2C3D4E5F6A1B2_account__session_123e4567-e89b-12d3-a456-426614174000"
|
||||
parsed := ParseMetadataUserID(rawUpper)
|
||||
require.NotNil(t, parsed, "legacy format should accept uppercase hex")
|
||||
require.Equal(t, "A1B2C3D4E5F6A1B2C3D4E5F6A1B2C3D4E5F6A1B2C3D4E5F6A1B2C3D4E5F6A1B2", parsed.DeviceID)
|
||||
}
|
||||
|
||||
// ============ FormatMetadataUserID Tests ============
|
||||
|
||||
func TestFormatMetadataUserID_LegacyVersion(t *testing.T) {
|
||||
result := FormatMetadataUserID("deadbeef"+"00112233445566778899aabbccddeeff0011223344556677", "acc-uuid", "sess-uuid", "2.1.77")
|
||||
require.Equal(t, "user_deadbeef00112233445566778899aabbccddeeff0011223344556677_account_acc-uuid_session_sess-uuid", result)
|
||||
}
|
||||
|
||||
func TestFormatMetadataUserID_NewVersion(t *testing.T) {
|
||||
result := FormatMetadataUserID("deadbeef"+"00112233445566778899aabbccddeeff0011223344556677", "acc-uuid", "sess-uuid", "2.1.78")
|
||||
require.Equal(t, `{"device_id":"deadbeef00112233445566778899aabbccddeeff0011223344556677","account_uuid":"acc-uuid","session_id":"sess-uuid"}`, result)
|
||||
}
|
||||
|
||||
func TestFormatMetadataUserID_EmptyVersion_Legacy(t *testing.T) {
|
||||
result := FormatMetadataUserID("deadbeef"+"00112233445566778899aabbccddeeff0011223344556677", "", "sess-uuid", "")
|
||||
require.Equal(t, "user_deadbeef00112233445566778899aabbccddeeff0011223344556677_account__session_sess-uuid", result)
|
||||
}
|
||||
|
||||
func TestFormatMetadataUserID_EmptyAccountUUID(t *testing.T) {
|
||||
// Legacy format with empty account UUID → double underscore
|
||||
result := FormatMetadataUserID("deadbeef"+"00112233445566778899aabbccddeeff0011223344556677", "", "sess-uuid", "2.1.22")
|
||||
require.Contains(t, result, "_account__session_")
|
||||
|
||||
// New format with empty account UUID → empty string in JSON
|
||||
result = FormatMetadataUserID("deadbeef"+"00112233445566778899aabbccddeeff0011223344556677", "", "sess-uuid", "2.1.78")
|
||||
require.Contains(t, result, `"account_uuid":""`)
|
||||
}
|
||||
|
||||
// ============ IsNewMetadataFormatVersion Tests ============
|
||||
|
||||
func TestIsNewMetadataFormatVersion(t *testing.T) {
|
||||
tests := []struct {
|
||||
version string
|
||||
want bool
|
||||
}{
|
||||
{"", false},
|
||||
{"2.1.77", false},
|
||||
{"2.1.78", true},
|
||||
{"2.1.79", true},
|
||||
{"2.2.0", true},
|
||||
{"3.0.0", true},
|
||||
{"2.0.100", false},
|
||||
{"1.9.99", false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.version, func(t *testing.T) {
|
||||
require.Equal(t, tt.want, IsNewMetadataFormatVersion(tt.version))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ============ Round-trip Tests ============
|
||||
|
||||
func TestParseFormat_RoundTrip_Legacy(t *testing.T) {
|
||||
deviceID := "a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2"
|
||||
accountUUID := "550e8400-e29b-41d4-a716-446655440000"
|
||||
sessionID := "123e4567-e89b-12d3-a456-426614174000"
|
||||
|
||||
formatted := FormatMetadataUserID(deviceID, accountUUID, sessionID, "2.1.22")
|
||||
parsed := ParseMetadataUserID(formatted)
|
||||
require.NotNil(t, parsed)
|
||||
require.Equal(t, deviceID, parsed.DeviceID)
|
||||
require.Equal(t, accountUUID, parsed.AccountUUID)
|
||||
require.Equal(t, sessionID, parsed.SessionID)
|
||||
require.False(t, parsed.IsNewFormat)
|
||||
}
|
||||
|
||||
func TestParseFormat_RoundTrip_JSON(t *testing.T) {
|
||||
deviceID := "a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2"
|
||||
accountUUID := "550e8400-e29b-41d4-a716-446655440000"
|
||||
sessionID := "123e4567-e89b-12d3-a456-426614174000"
|
||||
|
||||
formatted := FormatMetadataUserID(deviceID, accountUUID, sessionID, "2.1.78")
|
||||
parsed := ParseMetadataUserID(formatted)
|
||||
require.NotNil(t, parsed)
|
||||
require.Equal(t, deviceID, parsed.DeviceID)
|
||||
require.Equal(t, accountUUID, parsed.AccountUUID)
|
||||
require.Equal(t, sessionID, parsed.SessionID)
|
||||
require.True(t, parsed.IsNewFormat)
|
||||
}
|
||||
|
||||
func TestParseFormat_RoundTrip_EmptyAccountUUID(t *testing.T) {
|
||||
deviceID := "a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2"
|
||||
sessionID := "123e4567-e89b-12d3-a456-426614174000"
|
||||
|
||||
// Legacy round-trip with empty account UUID
|
||||
formatted := FormatMetadataUserID(deviceID, "", sessionID, "2.1.22")
|
||||
parsed := ParseMetadataUserID(formatted)
|
||||
require.NotNil(t, parsed)
|
||||
require.Equal(t, deviceID, parsed.DeviceID)
|
||||
require.Equal(t, "", parsed.AccountUUID)
|
||||
require.Equal(t, sessionID, parsed.SessionID)
|
||||
|
||||
// JSON round-trip with empty account UUID
|
||||
formatted = FormatMetadataUserID(deviceID, "", sessionID, "2.1.78")
|
||||
parsed = ParseMetadataUserID(formatted)
|
||||
require.NotNil(t, parsed)
|
||||
require.Equal(t, deviceID, parsed.DeviceID)
|
||||
require.Equal(t, "", parsed.AccountUUID)
|
||||
require.Equal(t, sessionID, parsed.SessionID)
|
||||
}
|
||||
@@ -172,6 +172,11 @@ func applyCodexOAuthTransform(reqBody map[string]any, isCodexCLI bool, isCompact
|
||||
result.PromptCacheKey = strings.TrimSpace(v)
|
||||
}
|
||||
|
||||
// 提取 input 中 role:"system" 消息至 instructions(OAuth 上游不支持 system role)。
|
||||
if extractSystemMessagesFromInput(reqBody) {
|
||||
result.Modified = true
|
||||
}
|
||||
|
||||
// instructions 处理逻辑:根据是否是 Codex CLI 分别调用不同方法
|
||||
if applyInstructions(reqBody, isCodexCLI) {
|
||||
result.Modified = true
|
||||
@@ -301,6 +306,73 @@ func getNormalizedCodexModel(modelID string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// extractTextFromContent extracts plain text from a content value that is either
|
||||
// a Go string or a []any of content-part maps with type:"text".
|
||||
func extractTextFromContent(content any) string {
|
||||
switch v := content.(type) {
|
||||
case string:
|
||||
return v
|
||||
case []any:
|
||||
var parts []string
|
||||
for _, part := range v {
|
||||
m, ok := part.(map[string]any)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if t, _ := m["type"].(string); t == "text" {
|
||||
if text, ok := m["text"].(string); ok {
|
||||
parts = append(parts, text)
|
||||
}
|
||||
}
|
||||
}
|
||||
return strings.Join(parts, "")
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// extractSystemMessagesFromInput scans the input array for items with role=="system",
|
||||
// removes them, and merges their content into reqBody["instructions"].
|
||||
// If instructions is already non-empty, extracted content is prepended with "\n\n".
|
||||
// Returns true if any system messages were extracted.
|
||||
func extractSystemMessagesFromInput(reqBody map[string]any) bool {
|
||||
input, ok := reqBody["input"].([]any)
|
||||
if !ok || len(input) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
var systemTexts []string
|
||||
remaining := make([]any, 0, len(input))
|
||||
|
||||
for _, item := range input {
|
||||
m, ok := item.(map[string]any)
|
||||
if !ok {
|
||||
remaining = append(remaining, item)
|
||||
continue
|
||||
}
|
||||
if role, _ := m["role"].(string); role != "system" {
|
||||
remaining = append(remaining, item)
|
||||
continue
|
||||
}
|
||||
if text := extractTextFromContent(m["content"]); text != "" {
|
||||
systemTexts = append(systemTexts, text)
|
||||
}
|
||||
}
|
||||
|
||||
if len(systemTexts) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
extracted := strings.Join(systemTexts, "\n\n")
|
||||
if existing, ok := reqBody["instructions"].(string); ok && strings.TrimSpace(existing) != "" {
|
||||
reqBody["instructions"] = extracted + "\n\n" + existing
|
||||
} else {
|
||||
reqBody["instructions"] = extracted
|
||||
}
|
||||
reqBody["input"] = remaining
|
||||
return true
|
||||
}
|
||||
|
||||
// applyInstructions 处理 instructions 字段:仅在 instructions 为空时填充默认值。
|
||||
func applyInstructions(reqBody map[string]any, isCodexCLI bool) bool {
|
||||
if !isInstructionsEmpty(reqBody) {
|
||||
|
||||
@@ -344,6 +344,135 @@ func TestApplyCodexOAuthTransform_StringInputWithToolsField(t *testing.T) {
|
||||
require.Len(t, input, 1)
|
||||
}
|
||||
|
||||
func TestExtractSystemMessagesFromInput(t *testing.T) {
|
||||
t.Run("no system messages", func(t *testing.T) {
|
||||
reqBody := map[string]any{
|
||||
"input": []any{
|
||||
map[string]any{"role": "user", "content": "hello"},
|
||||
},
|
||||
}
|
||||
result := extractSystemMessagesFromInput(reqBody)
|
||||
require.False(t, result)
|
||||
input, ok := reqBody["input"].([]any)
|
||||
require.True(t, ok)
|
||||
require.Len(t, input, 1)
|
||||
_, hasInstructions := reqBody["instructions"]
|
||||
require.False(t, hasInstructions)
|
||||
})
|
||||
|
||||
t.Run("string content system message", func(t *testing.T) {
|
||||
reqBody := map[string]any{
|
||||
"input": []any{
|
||||
map[string]any{"role": "system", "content": "You are an assistant."},
|
||||
map[string]any{"role": "user", "content": "hello"},
|
||||
},
|
||||
}
|
||||
result := extractSystemMessagesFromInput(reqBody)
|
||||
require.True(t, result)
|
||||
input, ok := reqBody["input"].([]any)
|
||||
require.True(t, ok)
|
||||
require.Len(t, input, 1)
|
||||
msg, ok := input[0].(map[string]any)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "user", msg["role"])
|
||||
require.Equal(t, "You are an assistant.", reqBody["instructions"])
|
||||
})
|
||||
|
||||
t.Run("array content system message", func(t *testing.T) {
|
||||
reqBody := map[string]any{
|
||||
"input": []any{
|
||||
map[string]any{
|
||||
"role": "system",
|
||||
"content": []any{
|
||||
map[string]any{"type": "text", "text": "Be helpful."},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
result := extractSystemMessagesFromInput(reqBody)
|
||||
require.True(t, result)
|
||||
require.Equal(t, "Be helpful.", reqBody["instructions"])
|
||||
input, ok := reqBody["input"].([]any)
|
||||
require.True(t, ok)
|
||||
require.Len(t, input, 0)
|
||||
})
|
||||
|
||||
t.Run("multiple system messages concatenated", func(t *testing.T) {
|
||||
reqBody := map[string]any{
|
||||
"input": []any{
|
||||
map[string]any{"role": "system", "content": "First."},
|
||||
map[string]any{"role": "system", "content": "Second."},
|
||||
map[string]any{"role": "user", "content": "hi"},
|
||||
},
|
||||
}
|
||||
result := extractSystemMessagesFromInput(reqBody)
|
||||
require.True(t, result)
|
||||
require.Equal(t, "First.\n\nSecond.", reqBody["instructions"])
|
||||
input, ok := reqBody["input"].([]any)
|
||||
require.True(t, ok)
|
||||
require.Len(t, input, 1)
|
||||
})
|
||||
|
||||
t.Run("mixed system and non-system preserves non-system", func(t *testing.T) {
|
||||
reqBody := map[string]any{
|
||||
"input": []any{
|
||||
map[string]any{"role": "user", "content": "hello"},
|
||||
map[string]any{"role": "system", "content": "Sys prompt."},
|
||||
map[string]any{"role": "assistant", "content": "Hi there"},
|
||||
},
|
||||
}
|
||||
result := extractSystemMessagesFromInput(reqBody)
|
||||
require.True(t, result)
|
||||
input, ok := reqBody["input"].([]any)
|
||||
require.True(t, ok)
|
||||
require.Len(t, input, 2)
|
||||
first, ok := input[0].(map[string]any)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "user", first["role"])
|
||||
second, ok := input[1].(map[string]any)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "assistant", second["role"])
|
||||
})
|
||||
|
||||
t.Run("existing instructions prepended", func(t *testing.T) {
|
||||
reqBody := map[string]any{
|
||||
"input": []any{
|
||||
map[string]any{"role": "system", "content": "Extracted."},
|
||||
map[string]any{"role": "user", "content": "hi"},
|
||||
},
|
||||
"instructions": "Existing instructions.",
|
||||
}
|
||||
result := extractSystemMessagesFromInput(reqBody)
|
||||
require.True(t, result)
|
||||
require.Equal(t, "Extracted.\n\nExisting instructions.", reqBody["instructions"])
|
||||
})
|
||||
}
|
||||
|
||||
func TestApplyCodexOAuthTransform_ExtractsSystemMessages(t *testing.T) {
|
||||
reqBody := map[string]any{
|
||||
"model": "gpt-5.1",
|
||||
"input": []any{
|
||||
map[string]any{"role": "system", "content": "You are a coding assistant."},
|
||||
map[string]any{"role": "user", "content": "Write a function."},
|
||||
},
|
||||
}
|
||||
|
||||
result := applyCodexOAuthTransform(reqBody, false, false)
|
||||
|
||||
require.True(t, result.Modified)
|
||||
|
||||
input, ok := reqBody["input"].([]any)
|
||||
require.True(t, ok)
|
||||
require.Len(t, input, 1)
|
||||
msg, ok := input[0].(map[string]any)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "user", msg["role"])
|
||||
|
||||
instructions, ok := reqBody["instructions"].(string)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "You are a coding assistant.", instructions)
|
||||
}
|
||||
|
||||
func TestIsInstructionsEmpty(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
@@ -107,10 +107,11 @@ func (s *OpenAIGatewayService) ForwardAsAnthropic(
|
||||
return nil, fmt.Errorf("build upstream request: %w", err)
|
||||
}
|
||||
|
||||
// Override session_id with a deterministic UUID derived from the sticky
|
||||
// session key (buildUpstreamRequest may have set it to the raw value).
|
||||
// Override session_id with a deterministic UUID derived from the isolated
|
||||
// session key, ensuring different API keys produce different upstream sessions.
|
||||
if promptCacheKey != "" {
|
||||
upstreamReq.Header.Set("session_id", generateSessionUUID(promptCacheKey))
|
||||
apiKeyID := getAPIKeyIDFromContext(c)
|
||||
upstreamReq.Header.Set("session_id", generateSessionUUID(isolateOpenAISessionID(apiKeyID, promptCacheKey)))
|
||||
}
|
||||
|
||||
// 7. Send request
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/openai"
|
||||
"github.com/Wei-Shaw/sub2api/internal/util/responseheaders"
|
||||
"github.com/Wei-Shaw/sub2api/internal/util/urlvalidator"
|
||||
"github.com/cespare/xxhash/v2"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/google/uuid"
|
||||
"github.com/tidwall/gjson"
|
||||
@@ -787,6 +788,20 @@ func getAPIKeyIDFromContext(c *gin.Context) int64 {
|
||||
return apiKey.ID
|
||||
}
|
||||
|
||||
// isolateOpenAISessionID 将 apiKeyID 混入 session 标识符,
|
||||
// 确保不同 API Key 的用户即使使用相同的原始 session_id/conversation_id,
|
||||
// 到达上游的标识符也不同,防止跨用户会话碰撞。
|
||||
func isolateOpenAISessionID(apiKeyID int64, raw string) string {
|
||||
raw = strings.TrimSpace(raw)
|
||||
if raw == "" {
|
||||
return ""
|
||||
}
|
||||
h := xxhash.New()
|
||||
_, _ = fmt.Fprintf(h, "k%d:", apiKeyID)
|
||||
_, _ = h.WriteString(raw)
|
||||
return fmt.Sprintf("%016x", h.Sum64())
|
||||
}
|
||||
|
||||
func logCodexCLIOnlyDetection(ctx context.Context, c *gin.Context, account *Account, apiKeyID int64, result CodexClientRestrictionDetectionResult, body []byte) {
|
||||
if !result.Enabled {
|
||||
return
|
||||
@@ -2501,13 +2516,17 @@ func (s *OpenAIGatewayService) buildUpstreamRequestOpenAIPassthrough(
|
||||
if chatgptAccountID := account.GetChatGPTAccountID(); chatgptAccountID != "" {
|
||||
req.Header.Set("chatgpt-account-id", chatgptAccountID)
|
||||
}
|
||||
apiKeyID := getAPIKeyIDFromContext(c)
|
||||
// 先保存客户端原始值,再做 compact 补充,避免后续统一隔离时读到已处理的值。
|
||||
clientSessionID := strings.TrimSpace(req.Header.Get("session_id"))
|
||||
clientConversationID := strings.TrimSpace(req.Header.Get("conversation_id"))
|
||||
if isOpenAIResponsesCompactPath(c) {
|
||||
req.Header.Set("accept", "application/json")
|
||||
if req.Header.Get("version") == "" {
|
||||
req.Header.Set("version", codexCLIVersion)
|
||||
}
|
||||
if req.Header.Get("session_id") == "" {
|
||||
req.Header.Set("session_id", resolveOpenAICompactSessionID(c))
|
||||
if clientSessionID == "" {
|
||||
clientSessionID = resolveOpenAICompactSessionID(c)
|
||||
}
|
||||
} else if req.Header.Get("accept") == "" {
|
||||
req.Header.Set("accept", "text/event-stream")
|
||||
@@ -2518,13 +2537,18 @@ func (s *OpenAIGatewayService) buildUpstreamRequestOpenAIPassthrough(
|
||||
if req.Header.Get("originator") == "" {
|
||||
req.Header.Set("originator", "codex_cli_rs")
|
||||
}
|
||||
if promptCacheKey != "" {
|
||||
if req.Header.Get("conversation_id") == "" {
|
||||
req.Header.Set("conversation_id", promptCacheKey)
|
||||
}
|
||||
if req.Header.Get("session_id") == "" {
|
||||
req.Header.Set("session_id", promptCacheKey)
|
||||
}
|
||||
// 用隔离后的 session 标识符覆盖客户端透传值,防止跨用户会话碰撞。
|
||||
if clientSessionID == "" {
|
||||
clientSessionID = promptCacheKey
|
||||
}
|
||||
if clientConversationID == "" {
|
||||
clientConversationID = promptCacheKey
|
||||
}
|
||||
if clientSessionID != "" {
|
||||
req.Header.Set("session_id", isolateOpenAISessionID(apiKeyID, clientSessionID))
|
||||
}
|
||||
if clientConversationID != "" {
|
||||
req.Header.Set("conversation_id", isolateOpenAISessionID(apiKeyID, clientConversationID))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2887,22 +2911,27 @@ func (s *OpenAIGatewayService) buildUpstreamRequest(ctx context.Context, c *gin.
|
||||
}
|
||||
}
|
||||
if account.Type == AccountTypeOAuth {
|
||||
// 清除客户端透传的 session 头,后续用隔离后的值重新设置,防止跨用户会话碰撞。
|
||||
req.Header.Del("conversation_id")
|
||||
req.Header.Del("session_id")
|
||||
|
||||
req.Header.Set("OpenAI-Beta", "responses=experimental")
|
||||
req.Header.Set("originator", resolveOpenAIUpstreamOriginator(c, isCodexCLI))
|
||||
apiKeyID := getAPIKeyIDFromContext(c)
|
||||
if isOpenAIResponsesCompactPath(c) {
|
||||
req.Header.Set("accept", "application/json")
|
||||
if req.Header.Get("version") == "" {
|
||||
req.Header.Set("version", codexCLIVersion)
|
||||
}
|
||||
if req.Header.Get("session_id") == "" {
|
||||
req.Header.Set("session_id", resolveOpenAICompactSessionID(c))
|
||||
}
|
||||
compactSession := resolveOpenAICompactSessionID(c)
|
||||
req.Header.Set("session_id", isolateOpenAISessionID(apiKeyID, compactSession))
|
||||
} else {
|
||||
req.Header.Set("accept", "text/event-stream")
|
||||
}
|
||||
if promptCacheKey != "" {
|
||||
req.Header.Set("conversation_id", promptCacheKey)
|
||||
req.Header.Set("session_id", promptCacheKey)
|
||||
isolated := isolateOpenAISessionID(apiKeyID, promptCacheKey)
|
||||
req.Header.Set("conversation_id", isolated)
|
||||
req.Header.Set("session_id", isolated)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1,50 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestIsolateOpenAISessionID(t *testing.T) {
|
||||
t.Run("empty_raw_returns_empty", func(t *testing.T) {
|
||||
assert.Equal(t, "", isolateOpenAISessionID(1, ""))
|
||||
assert.Equal(t, "", isolateOpenAISessionID(1, " "))
|
||||
})
|
||||
|
||||
t.Run("deterministic", func(t *testing.T) {
|
||||
a := isolateOpenAISessionID(42, "sess_abc123")
|
||||
b := isolateOpenAISessionID(42, "sess_abc123")
|
||||
assert.Equal(t, a, b)
|
||||
})
|
||||
|
||||
t.Run("different_apiKeyID_different_result", func(t *testing.T) {
|
||||
a := isolateOpenAISessionID(1, "same_session")
|
||||
b := isolateOpenAISessionID(2, "same_session")
|
||||
require.NotEqual(t, a, b, "不同 API Key 使用相同 session_id 应产生不同隔离值")
|
||||
})
|
||||
|
||||
t.Run("different_raw_different_result", func(t *testing.T) {
|
||||
a := isolateOpenAISessionID(1, "session_a")
|
||||
b := isolateOpenAISessionID(1, "session_b")
|
||||
require.NotEqual(t, a, b)
|
||||
})
|
||||
|
||||
t.Run("format_is_16_hex_chars", func(t *testing.T) {
|
||||
result := isolateOpenAISessionID(99, "test_session")
|
||||
assert.Len(t, result, 16, "应为 16 字符的 hex 字符串")
|
||||
for _, ch := range result {
|
||||
assert.True(t, (ch >= '0' && ch <= '9') || (ch >= 'a' && ch <= 'f'),
|
||||
"应仅包含 hex 字符: %c", ch)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("zero_apiKeyID_still_works", func(t *testing.T) {
|
||||
result := isolateOpenAISessionID(0, "session")
|
||||
assert.NotEmpty(t, result)
|
||||
// apiKeyID=0 与 apiKeyID=1 应产生不同结果
|
||||
other := isolateOpenAISessionID(1, "session")
|
||||
assert.NotEqual(t, result, other)
|
||||
})
|
||||
}
|
||||
@@ -1124,11 +1124,22 @@ func (s *OpenAIGatewayService) buildOpenAIWSHeaders(
|
||||
headers.Set("accept-language", v)
|
||||
}
|
||||
}
|
||||
if sessionResolution.SessionID != "" {
|
||||
headers.Set("session_id", sessionResolution.SessionID)
|
||||
}
|
||||
if sessionResolution.ConversationID != "" {
|
||||
headers.Set("conversation_id", sessionResolution.ConversationID)
|
||||
// OAuth 账号:将 apiKeyID 混入 session 标识符,防止跨用户会话碰撞。
|
||||
if account != nil && account.Type == AccountTypeOAuth {
|
||||
apiKeyID := getAPIKeyIDFromContext(c)
|
||||
if sessionResolution.SessionID != "" {
|
||||
headers.Set("session_id", isolateOpenAISessionID(apiKeyID, sessionResolution.SessionID))
|
||||
}
|
||||
if sessionResolution.ConversationID != "" {
|
||||
headers.Set("conversation_id", isolateOpenAISessionID(apiKeyID, sessionResolution.ConversationID))
|
||||
}
|
||||
} else {
|
||||
if sessionResolution.SessionID != "" {
|
||||
headers.Set("session_id", sessionResolution.SessionID)
|
||||
}
|
||||
if sessionResolution.ConversationID != "" {
|
||||
headers.Set("conversation_id", sessionResolution.ConversationID)
|
||||
}
|
||||
}
|
||||
if state := strings.TrimSpace(turnState); state != "" {
|
||||
headers.Set(openAIWSTurnStateHeader, state)
|
||||
@@ -1859,7 +1870,16 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2(
|
||||
}
|
||||
return nil, wrapOpenAIWSFallback(classifyOpenAIWSAcquireError(err), err)
|
||||
}
|
||||
defer lease.Release()
|
||||
// cleanExit 标记正常终端事件退出,此时上游不会再发送帧,连接可安全归还复用。
|
||||
// 所有异常路径(读写错误、error 事件等)已在各自分支中提前调用 MarkBroken,
|
||||
// 因此 defer 中只需处理正常退出时不 MarkBroken 即可。
|
||||
cleanExit := false
|
||||
defer func() {
|
||||
if !cleanExit {
|
||||
lease.MarkBroken()
|
||||
}
|
||||
lease.Release()
|
||||
}()
|
||||
connID := strings.TrimSpace(lease.ConnID())
|
||||
logOpenAIWSModeDebug(
|
||||
"connected account_id=%d account_type=%s transport=%s conn_id=%s conn_reused=%v conn_pick_ms=%d queue_wait_ms=%d has_previous_response_id=%v",
|
||||
@@ -2237,6 +2257,7 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2(
|
||||
}
|
||||
|
||||
if isTerminalEvent {
|
||||
cleanExit = true
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -2972,12 +2993,15 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient(
|
||||
pinnedSessionConnID = connID
|
||||
}
|
||||
}
|
||||
// lastTurnClean 标记最后一轮 sendAndRelay 是否正常完成(收到终端事件且客户端未断连)。
|
||||
// 所有异常路径(读写错误、error 事件、客户端断连)已在各自分支或上层(L3403)中 MarkBroken,
|
||||
// 因此 releaseSessionLease 中只需在非正常结束时 MarkBroken。
|
||||
lastTurnClean := false
|
||||
releaseSessionLease := func() {
|
||||
if sessionLease == nil {
|
||||
return
|
||||
}
|
||||
if dedicatedMode {
|
||||
// dedicated 会话结束后主动标记损坏,确保连接不会跨会话复用。
|
||||
if !lastTurnClean {
|
||||
sessionLease.MarkBroken()
|
||||
}
|
||||
unpinSessionConn(sessionConnID)
|
||||
@@ -3372,6 +3396,7 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient(
|
||||
|
||||
result, relayErr := sendAndRelay(turn, sessionLease, currentPayload, currentPayloadBytes, currentOriginalModel)
|
||||
if relayErr != nil {
|
||||
lastTurnClean = false
|
||||
if recoverIngressPrevResponseNotFound(relayErr, turn, connID) {
|
||||
continue
|
||||
}
|
||||
@@ -3391,6 +3416,7 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient(
|
||||
turnRetry = 0
|
||||
turnPrevRecoveryTried = false
|
||||
lastTurnFinishedAt = time.Now()
|
||||
lastTurnClean = true
|
||||
if hooks != nil && hooks.AfterTurn != nil {
|
||||
hooks.AfterTurn(turn, result, nil)
|
||||
}
|
||||
|
||||
@@ -380,7 +380,8 @@ func TestOpenAIGatewayService_Forward_WSv2_PoolReuseNotOneToOne(t *testing.T) {
|
||||
require.True(t, strings.HasPrefix(result.RequestID, "resp_reuse_"))
|
||||
}
|
||||
|
||||
require.Equal(t, int64(1), upgradeCount.Load(), "多个客户端请求应复用账号连接池而不是 1:1 对等建链")
|
||||
// 条件式 MarkBroken:正常终端事件退出后连接归还复用,不再无条件销毁。
|
||||
require.Equal(t, int64(1), upgradeCount.Load(), "正常完成后连接应归还复用,不应每次新建")
|
||||
metrics := svc.SnapshotOpenAIWSPoolMetrics()
|
||||
require.GreaterOrEqual(t, metrics.AcquireReuseTotal, int64(1))
|
||||
require.GreaterOrEqual(t, metrics.ConnPickTotal, int64(1))
|
||||
@@ -454,8 +455,10 @@ func TestOpenAIGatewayService_Forward_WSv2_OAuthStoreFalseByDefault(t *testing.T
|
||||
require.True(t, gjson.Get(requestJSON, "stream").Exists(), "WSv2 payload 应保留 stream 字段")
|
||||
require.True(t, gjson.Get(requestJSON, "stream").Bool(), "OAuth Codex 规范化后应强制 stream=true")
|
||||
require.Equal(t, openAIWSBetaV2Value, captureDialer.lastHeaders.Get("OpenAI-Beta"))
|
||||
require.Equal(t, "sess-oauth-1", captureDialer.lastHeaders.Get("session_id"))
|
||||
require.Equal(t, "conv-oauth-1", captureDialer.lastHeaders.Get("conversation_id"))
|
||||
// OAuth 账号的 session_id/conversation_id 应被 isolateOpenAISessionID 隔离,
|
||||
// 测试中未设置 api_key 到 context,apiKeyID=0。
|
||||
require.Equal(t, isolateOpenAISessionID(0, "sess-oauth-1"), captureDialer.lastHeaders.Get("session_id"))
|
||||
require.Equal(t, isolateOpenAISessionID(0, "conv-oauth-1"), captureDialer.lastHeaders.Get("conversation_id"))
|
||||
}
|
||||
|
||||
func TestOpenAIGatewayService_Forward_WSv2_OAuthOriginatorCompatibility(t *testing.T) {
|
||||
@@ -596,7 +599,8 @@ func TestOpenAIGatewayService_Forward_WSv2_HeaderSessionFallbackFromPromptCacheK
|
||||
require.NotNil(t, result)
|
||||
require.Equal(t, "resp_prompt_cache_key", result.RequestID)
|
||||
|
||||
require.Equal(t, "pcache_123", captureDialer.lastHeaders.Get("session_id"))
|
||||
// OAuth 账号的 session_id 应被 isolateOpenAISessionID 隔离(apiKeyID=0,未在 context 设置)。
|
||||
require.Equal(t, isolateOpenAISessionID(0, "pcache_123"), captureDialer.lastHeaders.Get("session_id"))
|
||||
require.Empty(t, captureDialer.lastHeaders.Get("conversation_id"))
|
||||
require.NotNil(t, captureConn.lastWrite)
|
||||
require.True(t, gjson.Get(requestToJSONString(captureConn.lastWrite), "stream").Exists())
|
||||
@@ -961,6 +965,10 @@ func TestOpenAIGatewayService_Forward_WSv2_TurnMetadataInPayloadOnConnReuse(t *t
|
||||
require.NotNil(t, result1)
|
||||
require.Equal(t, "resp_meta_1", result1.RequestID)
|
||||
|
||||
require.Len(t, captureConn.writes, 1)
|
||||
firstWrite := requestToJSONString(captureConn.writes[0])
|
||||
require.Equal(t, "turn_meta_payload_1", gjson.Get(firstWrite, "client_metadata.x-codex-turn-metadata").String())
|
||||
|
||||
rec2 := httptest.NewRecorder()
|
||||
c2, _ := gin.CreateTestContext(rec2)
|
||||
c2.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil)
|
||||
@@ -974,7 +982,7 @@ func TestOpenAIGatewayService_Forward_WSv2_TurnMetadataInPayloadOnConnReuse(t *t
|
||||
require.Equal(t, 1, captureDialer.DialCount(), "同一账号两轮请求应复用同一 WS 连接")
|
||||
require.Len(t, captureConn.writes, 2)
|
||||
|
||||
firstWrite := requestToJSONString(captureConn.writes[0])
|
||||
firstWrite = requestToJSONString(captureConn.writes[0])
|
||||
secondWrite := requestToJSONString(captureConn.writes[1])
|
||||
require.Equal(t, "turn_meta_payload_1", gjson.Get(firstWrite, "client_metadata.x-codex-turn-metadata").String())
|
||||
require.Equal(t, "turn_meta_payload_2", gjson.Get(secondWrite, "client_metadata.x-codex-turn-metadata").String())
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -23,10 +25,14 @@ func (p *Proxy) IsActive() bool {
|
||||
}
|
||||
|
||||
func (p *Proxy) URL() string {
|
||||
if p.Username != "" && p.Password != "" {
|
||||
return fmt.Sprintf("%s://%s:%s@%s:%d", p.Protocol, p.Username, p.Password, p.Host, p.Port)
|
||||
u := &url.URL{
|
||||
Scheme: p.Protocol,
|
||||
Host: net.JoinHostPort(p.Host, strconv.Itoa(p.Port)),
|
||||
}
|
||||
return fmt.Sprintf("%s://%s:%d", p.Protocol, p.Host, p.Port)
|
||||
if p.Username != "" && p.Password != "" {
|
||||
u.User = url.UserPassword(p.Username, p.Password)
|
||||
}
|
||||
return u.String()
|
||||
}
|
||||
|
||||
type ProxyWithAccountCount struct {
|
||||
|
||||
95
backend/internal/service/proxy_test.go
Normal file
95
backend/internal/service/proxy_test.go
Normal file
@@ -0,0 +1,95 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestProxyURL(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
proxy Proxy
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "without auth",
|
||||
proxy: Proxy{
|
||||
Protocol: "http",
|
||||
Host: "proxy.example.com",
|
||||
Port: 8080,
|
||||
},
|
||||
want: "http://proxy.example.com:8080",
|
||||
},
|
||||
{
|
||||
name: "with auth",
|
||||
proxy: Proxy{
|
||||
Protocol: "socks5",
|
||||
Host: "socks.example.com",
|
||||
Port: 1080,
|
||||
Username: "user",
|
||||
Password: "pass",
|
||||
},
|
||||
want: "socks5://user:pass@socks.example.com:1080",
|
||||
},
|
||||
{
|
||||
name: "username only keeps no auth for compatibility",
|
||||
proxy: Proxy{
|
||||
Protocol: "http",
|
||||
Host: "proxy.example.com",
|
||||
Port: 8080,
|
||||
Username: "user-only",
|
||||
},
|
||||
want: "http://proxy.example.com:8080",
|
||||
},
|
||||
{
|
||||
name: "with special characters in credentials",
|
||||
proxy: Proxy{
|
||||
Protocol: "http",
|
||||
Host: "proxy.example.com",
|
||||
Port: 3128,
|
||||
Username: "first last@corp",
|
||||
Password: "p@ ss:#word",
|
||||
},
|
||||
want: "http://first%20last%40corp:p%40%20ss%3A%23word@proxy.example.com:3128",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
if got := tc.proxy.URL(); got != tc.want {
|
||||
t.Fatalf("Proxy.URL() mismatch: got=%q want=%q", got, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxyURL_SpecialCharactersRoundTrip(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
proxy := Proxy{
|
||||
Protocol: "http",
|
||||
Host: "proxy.example.com",
|
||||
Port: 3128,
|
||||
Username: "first last@corp",
|
||||
Password: "p@ ss:#word",
|
||||
}
|
||||
|
||||
parsed, err := url.Parse(proxy.URL())
|
||||
if err != nil {
|
||||
t.Fatalf("parse proxy URL failed: %v", err)
|
||||
}
|
||||
if got := parsed.User.Username(); got != proxy.Username {
|
||||
t.Fatalf("username mismatch after parse: got=%q want=%q", got, proxy.Username)
|
||||
}
|
||||
pass, ok := parsed.User.Password()
|
||||
if !ok {
|
||||
t.Fatal("password missing after parse")
|
||||
}
|
||||
if pass != proxy.Password {
|
||||
t.Fatalf("password mismatch after parse: got=%q want=%q", pass, proxy.Password)
|
||||
}
|
||||
}
|
||||
@@ -1051,16 +1051,44 @@ func (s *RateLimitService) UpdateSessionWindow(ctx context.Context, account *Acc
|
||||
var windowStart, windowEnd *time.Time
|
||||
needInitWindow := account.SessionWindowEnd == nil || time.Now().After(*account.SessionWindowEnd)
|
||||
|
||||
if needInitWindow && (status == "allowed" || status == "allowed_warning") {
|
||||
// 预测时间窗口:从当前时间的整点开始,+5小时为结束
|
||||
// 例如:现在是 14:30,窗口为 14:00 ~ 19:00
|
||||
// 优先使用响应头中的真实重置时间(比预测更准确)
|
||||
if resetStr := headers.Get("anthropic-ratelimit-unified-5h-reset"); resetStr != "" {
|
||||
if ts, err := strconv.ParseInt(resetStr, 10, 64); err == nil {
|
||||
// 检测可能的毫秒时间戳(秒级约为 1e9,毫秒约为 1e12)
|
||||
if ts > 1e11 {
|
||||
slog.Warn("account_session_window_header_millis_detected", "account_id", account.ID, "raw_reset", resetStr)
|
||||
ts = ts / 1000
|
||||
}
|
||||
end := time.Unix(ts, 0)
|
||||
// 校验时间戳是否在合理范围内(不早于 5h 前,不晚于 7 天后)
|
||||
minAllowed := time.Now().Add(-5 * time.Hour)
|
||||
maxAllowed := time.Now().Add(7 * 24 * time.Hour)
|
||||
if end.Before(minAllowed) || end.After(maxAllowed) {
|
||||
slog.Warn("account_session_window_header_out_of_range", "account_id", account.ID, "raw_reset", resetStr, "parsed_end", end)
|
||||
} else if needInitWindow || account.SessionWindowEnd == nil || !end.Equal(*account.SessionWindowEnd) {
|
||||
// 窗口需要初始化,或者真实重置时间与已存储的不同,则更新
|
||||
start := end.Add(-5 * time.Hour)
|
||||
windowStart = &start
|
||||
windowEnd = &end
|
||||
slog.Info("account_session_window_from_header", "account_id", account.ID, "window_start", start, "window_end", end, "status", status)
|
||||
}
|
||||
} else {
|
||||
slog.Warn("account_session_window_header_parse_failed", "account_id", account.ID, "raw_reset", resetStr, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
// 回退:如果没有真实重置时间且需要初始化窗口,使用预测
|
||||
if windowEnd == nil && needInitWindow && (status == "allowed" || status == "allowed_warning") {
|
||||
now := time.Now()
|
||||
start := time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
|
||||
end := start.Add(5 * time.Hour)
|
||||
windowStart = &start
|
||||
windowEnd = &end
|
||||
slog.Info("account_session_window_initialized", "account_id", account.ID, "window_start", start, "window_end", end, "status", status)
|
||||
// 窗口重置时清除旧的 utilization,避免残留上个窗口的数据
|
||||
}
|
||||
|
||||
// 窗口重置时清除旧的 utilization,避免残留上个窗口的数据
|
||||
if windowEnd != nil && needInitWindow {
|
||||
_ = s.accountRepo.UpdateExtra(ctx, account.ID, map[string]any{
|
||||
"session_window_utilization": nil,
|
||||
})
|
||||
|
||||
370
backend/internal/service/ratelimit_session_window_test.go
Normal file
370
backend/internal/service/ratelimit_session_window_test.go
Normal file
@@ -0,0 +1,370 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/pagination"
|
||||
)
|
||||
|
||||
// sessionWindowMockRepo is a minimal AccountRepository mock that records calls
|
||||
// made by UpdateSessionWindow. Unrelated methods panic if invoked.
|
||||
type sessionWindowMockRepo struct {
|
||||
// captured calls
|
||||
sessionWindowCalls []swCall
|
||||
updateExtraCalls []ueCall
|
||||
clearRateLimitIDs []int64
|
||||
}
|
||||
|
||||
var _ AccountRepository = (*sessionWindowMockRepo)(nil)
|
||||
|
||||
type swCall struct {
|
||||
ID int64
|
||||
Start *time.Time
|
||||
End *time.Time
|
||||
Status string
|
||||
}
|
||||
|
||||
type ueCall struct {
|
||||
ID int64
|
||||
Updates map[string]any
|
||||
}
|
||||
|
||||
func (m *sessionWindowMockRepo) UpdateSessionWindow(_ context.Context, id int64, start, end *time.Time, status string) error {
|
||||
m.sessionWindowCalls = append(m.sessionWindowCalls, swCall{ID: id, Start: start, End: end, Status: status})
|
||||
return nil
|
||||
}
|
||||
func (m *sessionWindowMockRepo) UpdateExtra(_ context.Context, id int64, updates map[string]any) error {
|
||||
m.updateExtraCalls = append(m.updateExtraCalls, ueCall{ID: id, Updates: updates})
|
||||
return nil
|
||||
}
|
||||
func (m *sessionWindowMockRepo) ClearRateLimit(_ context.Context, id int64) error {
|
||||
m.clearRateLimitIDs = append(m.clearRateLimitIDs, id)
|
||||
return nil
|
||||
}
|
||||
func (m *sessionWindowMockRepo) ClearAntigravityQuotaScopes(_ context.Context, _ int64) error {
|
||||
return nil
|
||||
}
|
||||
func (m *sessionWindowMockRepo) ClearModelRateLimits(_ context.Context, _ int64) error {
|
||||
return nil
|
||||
}
|
||||
func (m *sessionWindowMockRepo) ClearTempUnschedulable(_ context.Context, _ int64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// --- Unused interface methods (panic on unexpected call) ---
|
||||
|
||||
func (m *sessionWindowMockRepo) Create(context.Context, *Account) error { panic("unexpected") }
|
||||
func (m *sessionWindowMockRepo) GetByID(context.Context, int64) (*Account, error) {
|
||||
panic("unexpected")
|
||||
}
|
||||
func (m *sessionWindowMockRepo) GetByIDs(context.Context, []int64) ([]*Account, error) {
|
||||
panic("unexpected")
|
||||
}
|
||||
func (m *sessionWindowMockRepo) ExistsByID(context.Context, int64) (bool, error) {
|
||||
panic("unexpected")
|
||||
}
|
||||
func (m *sessionWindowMockRepo) GetByCRSAccountID(context.Context, string) (*Account, error) {
|
||||
panic("unexpected")
|
||||
}
|
||||
func (m *sessionWindowMockRepo) FindByExtraField(context.Context, string, any) ([]Account, error) {
|
||||
panic("unexpected")
|
||||
}
|
||||
func (m *sessionWindowMockRepo) ListCRSAccountIDs(context.Context) (map[string]int64, error) {
|
||||
panic("unexpected")
|
||||
}
|
||||
func (m *sessionWindowMockRepo) Update(context.Context, *Account) error { panic("unexpected") }
|
||||
func (m *sessionWindowMockRepo) Delete(context.Context, int64) error { panic("unexpected") }
|
||||
func (m *sessionWindowMockRepo) List(context.Context, pagination.PaginationParams) ([]Account, *pagination.PaginationResult, error) {
|
||||
panic("unexpected")
|
||||
}
|
||||
func (m *sessionWindowMockRepo) ListWithFilters(context.Context, pagination.PaginationParams, string, string, string, string, int64) ([]Account, *pagination.PaginationResult, error) {
|
||||
panic("unexpected")
|
||||
}
|
||||
func (m *sessionWindowMockRepo) ListByGroup(context.Context, int64) ([]Account, error) {
|
||||
panic("unexpected")
|
||||
}
|
||||
func (m *sessionWindowMockRepo) ListActive(context.Context) ([]Account, error) {
|
||||
panic("unexpected")
|
||||
}
|
||||
func (m *sessionWindowMockRepo) ListByPlatform(context.Context, string) ([]Account, error) {
|
||||
panic("unexpected")
|
||||
}
|
||||
func (m *sessionWindowMockRepo) UpdateLastUsed(context.Context, int64) error { panic("unexpected") }
|
||||
func (m *sessionWindowMockRepo) BatchUpdateLastUsed(context.Context, map[int64]time.Time) error {
|
||||
panic("unexpected")
|
||||
}
|
||||
func (m *sessionWindowMockRepo) SetError(context.Context, int64, string) error {
|
||||
panic("unexpected")
|
||||
}
|
||||
func (m *sessionWindowMockRepo) ClearError(context.Context, int64) error { panic("unexpected") }
|
||||
func (m *sessionWindowMockRepo) SetSchedulable(context.Context, int64, bool) error {
|
||||
panic("unexpected")
|
||||
}
|
||||
func (m *sessionWindowMockRepo) AutoPauseExpiredAccounts(context.Context, time.Time) (int64, error) {
|
||||
panic("unexpected")
|
||||
}
|
||||
func (m *sessionWindowMockRepo) BindGroups(context.Context, int64, []int64) error {
|
||||
panic("unexpected")
|
||||
}
|
||||
func (m *sessionWindowMockRepo) ListSchedulable(context.Context) ([]Account, error) {
|
||||
panic("unexpected")
|
||||
}
|
||||
func (m *sessionWindowMockRepo) ListSchedulableByGroupID(context.Context, int64) ([]Account, error) {
|
||||
panic("unexpected")
|
||||
}
|
||||
func (m *sessionWindowMockRepo) ListSchedulableByPlatform(context.Context, string) ([]Account, error) {
|
||||
panic("unexpected")
|
||||
}
|
||||
func (m *sessionWindowMockRepo) ListSchedulableByGroupIDAndPlatform(context.Context, int64, string) ([]Account, error) {
|
||||
panic("unexpected")
|
||||
}
|
||||
func (m *sessionWindowMockRepo) ListSchedulableByPlatforms(context.Context, []string) ([]Account, error) {
|
||||
panic("unexpected")
|
||||
}
|
||||
func (m *sessionWindowMockRepo) ListSchedulableByGroupIDAndPlatforms(context.Context, int64, []string) ([]Account, error) {
|
||||
panic("unexpected")
|
||||
}
|
||||
func (m *sessionWindowMockRepo) ListSchedulableUngroupedByPlatform(context.Context, string) ([]Account, error) {
|
||||
panic("unexpected")
|
||||
}
|
||||
func (m *sessionWindowMockRepo) ListSchedulableUngroupedByPlatforms(context.Context, []string) ([]Account, error) {
|
||||
panic("unexpected")
|
||||
}
|
||||
func (m *sessionWindowMockRepo) SetRateLimited(context.Context, int64, time.Time) error {
|
||||
panic("unexpected")
|
||||
}
|
||||
func (m *sessionWindowMockRepo) SetModelRateLimit(context.Context, int64, string, time.Time) error {
|
||||
panic("unexpected")
|
||||
}
|
||||
func (m *sessionWindowMockRepo) SetOverloaded(context.Context, int64, time.Time) error {
|
||||
panic("unexpected")
|
||||
}
|
||||
func (m *sessionWindowMockRepo) SetTempUnschedulable(context.Context, int64, time.Time, string) error {
|
||||
panic("unexpected")
|
||||
}
|
||||
func (m *sessionWindowMockRepo) BulkUpdate(context.Context, []int64, AccountBulkUpdate) (int64, error) {
|
||||
panic("unexpected")
|
||||
}
|
||||
func (m *sessionWindowMockRepo) IncrementQuotaUsed(context.Context, int64, float64) error {
|
||||
panic("unexpected")
|
||||
}
|
||||
func (m *sessionWindowMockRepo) ResetQuotaUsed(context.Context, int64) error { panic("unexpected") }
|
||||
|
||||
// newRateLimitServiceForTest creates a RateLimitService with the given mock repo.
|
||||
func newRateLimitServiceForTest(repo AccountRepository) *RateLimitService {
|
||||
return &RateLimitService{accountRepo: repo}
|
||||
}
|
||||
|
||||
func TestUpdateSessionWindow_UsesResetHeader(t *testing.T) {
|
||||
// The reset header provides the real window end as a Unix timestamp.
|
||||
// UpdateSessionWindow should use it instead of the hour-truncated prediction.
|
||||
resetUnix := time.Now().Add(3 * time.Hour).Unix()
|
||||
wantEnd := time.Unix(resetUnix, 0)
|
||||
wantStart := wantEnd.Add(-5 * time.Hour)
|
||||
|
||||
repo := &sessionWindowMockRepo{}
|
||||
svc := newRateLimitServiceForTest(repo)
|
||||
|
||||
account := &Account{ID: 42} // no existing window → needInitWindow=true
|
||||
headers := http.Header{}
|
||||
headers.Set("anthropic-ratelimit-unified-5h-status", "allowed")
|
||||
headers.Set("anthropic-ratelimit-unified-5h-reset", fmt.Sprintf("%d", resetUnix))
|
||||
|
||||
svc.UpdateSessionWindow(context.Background(), account, headers)
|
||||
|
||||
if len(repo.sessionWindowCalls) != 1 {
|
||||
t.Fatalf("expected 1 UpdateSessionWindow call, got %d", len(repo.sessionWindowCalls))
|
||||
}
|
||||
|
||||
call := repo.sessionWindowCalls[0]
|
||||
if call.ID != 42 {
|
||||
t.Errorf("expected account ID 42, got %d", call.ID)
|
||||
}
|
||||
if call.End == nil || !call.End.Equal(wantEnd) {
|
||||
t.Errorf("expected window end %v, got %v", wantEnd, call.End)
|
||||
}
|
||||
if call.Start == nil || !call.Start.Equal(wantStart) {
|
||||
t.Errorf("expected window start %v, got %v", wantStart, call.Start)
|
||||
}
|
||||
if call.Status != "allowed" {
|
||||
t.Errorf("expected status 'allowed', got %q", call.Status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateSessionWindow_FallbackPredictionWhenNoResetHeader(t *testing.T) {
|
||||
// When the reset header is absent, should fall back to hour-truncated prediction.
|
||||
repo := &sessionWindowMockRepo{}
|
||||
svc := newRateLimitServiceForTest(repo)
|
||||
|
||||
account := &Account{ID: 10} // no existing window
|
||||
headers := http.Header{}
|
||||
headers.Set("anthropic-ratelimit-unified-5h-status", "allowed_warning")
|
||||
// No anthropic-ratelimit-unified-5h-reset header
|
||||
|
||||
// Capture now before the call to avoid hour-boundary races
|
||||
now := time.Now()
|
||||
expectedStart := time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
|
||||
expectedEnd := expectedStart.Add(5 * time.Hour)
|
||||
|
||||
svc.UpdateSessionWindow(context.Background(), account, headers)
|
||||
|
||||
if len(repo.sessionWindowCalls) != 1 {
|
||||
t.Fatalf("expected 1 UpdateSessionWindow call, got %d", len(repo.sessionWindowCalls))
|
||||
}
|
||||
|
||||
call := repo.sessionWindowCalls[0]
|
||||
if call.End == nil {
|
||||
t.Fatal("expected window end to be set (fallback prediction)")
|
||||
}
|
||||
// Fallback: start = current hour truncated, end = start + 5h
|
||||
|
||||
if !call.End.Equal(expectedEnd) {
|
||||
t.Errorf("expected fallback end %v, got %v", expectedEnd, *call.End)
|
||||
}
|
||||
if call.Start == nil || !call.Start.Equal(expectedStart) {
|
||||
t.Errorf("expected fallback start %v, got %v", expectedStart, call.Start)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateSessionWindow_CorrectsStalePrediction(t *testing.T) {
|
||||
// When the stored SessionWindowEnd is wrong (from a previous prediction),
|
||||
// and the reset header provides the real time, it should update the window.
|
||||
staleEnd := time.Now().Add(2 * time.Hour) // existing prediction: 2h from now
|
||||
realResetUnix := time.Now().Add(4 * time.Hour).Unix() // real reset: 4h from now
|
||||
wantEnd := time.Unix(realResetUnix, 0)
|
||||
|
||||
repo := &sessionWindowMockRepo{}
|
||||
svc := newRateLimitServiceForTest(repo)
|
||||
|
||||
account := &Account{
|
||||
ID: 55,
|
||||
SessionWindowEnd: &staleEnd,
|
||||
}
|
||||
headers := http.Header{}
|
||||
headers.Set("anthropic-ratelimit-unified-5h-status", "allowed")
|
||||
headers.Set("anthropic-ratelimit-unified-5h-reset", fmt.Sprintf("%d", realResetUnix))
|
||||
|
||||
svc.UpdateSessionWindow(context.Background(), account, headers)
|
||||
|
||||
if len(repo.sessionWindowCalls) != 1 {
|
||||
t.Fatalf("expected 1 UpdateSessionWindow call, got %d", len(repo.sessionWindowCalls))
|
||||
}
|
||||
|
||||
call := repo.sessionWindowCalls[0]
|
||||
if call.End == nil || !call.End.Equal(wantEnd) {
|
||||
t.Errorf("expected corrected end %v, got %v", wantEnd, call.End)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateSessionWindow_NoUpdateWhenHeaderMatchesStored(t *testing.T) {
|
||||
// If the reset header matches the stored SessionWindowEnd, no window update needed.
|
||||
futureUnix := time.Now().Add(3 * time.Hour).Unix()
|
||||
existingEnd := time.Unix(futureUnix, 0)
|
||||
|
||||
repo := &sessionWindowMockRepo{}
|
||||
svc := newRateLimitServiceForTest(repo)
|
||||
|
||||
account := &Account{
|
||||
ID: 77,
|
||||
SessionWindowEnd: &existingEnd,
|
||||
}
|
||||
headers := http.Header{}
|
||||
headers.Set("anthropic-ratelimit-unified-5h-status", "allowed")
|
||||
headers.Set("anthropic-ratelimit-unified-5h-reset", fmt.Sprintf("%d", futureUnix)) // same as stored
|
||||
|
||||
svc.UpdateSessionWindow(context.Background(), account, headers)
|
||||
|
||||
if len(repo.sessionWindowCalls) != 1 {
|
||||
t.Fatalf("expected 1 UpdateSessionWindow call, got %d", len(repo.sessionWindowCalls))
|
||||
}
|
||||
|
||||
call := repo.sessionWindowCalls[0]
|
||||
// windowStart and windowEnd should be nil (no update needed)
|
||||
if call.Start != nil || call.End != nil {
|
||||
t.Errorf("expected nil start/end (no window change needed), got start=%v end=%v", call.Start, call.End)
|
||||
}
|
||||
// Status is still updated
|
||||
if call.Status != "allowed" {
|
||||
t.Errorf("expected status 'allowed', got %q", call.Status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateSessionWindow_ClearsUtilizationOnWindowReset(t *testing.T) {
|
||||
// When needInitWindow=true and window is set, utilization should be cleared.
|
||||
resetUnix := time.Now().Add(3 * time.Hour).Unix()
|
||||
|
||||
repo := &sessionWindowMockRepo{}
|
||||
svc := newRateLimitServiceForTest(repo)
|
||||
|
||||
account := &Account{ID: 33} // no existing window → needInitWindow=true
|
||||
headers := http.Header{}
|
||||
headers.Set("anthropic-ratelimit-unified-5h-status", "allowed")
|
||||
headers.Set("anthropic-ratelimit-unified-5h-reset", fmt.Sprintf("%d", resetUnix))
|
||||
headers.Set("anthropic-ratelimit-unified-5h-utilization", "0.15")
|
||||
|
||||
svc.UpdateSessionWindow(context.Background(), account, headers)
|
||||
|
||||
// Should have 2 UpdateExtra calls: one to clear utilization, one to store new utilization
|
||||
if len(repo.updateExtraCalls) != 2 {
|
||||
t.Fatalf("expected 2 UpdateExtra calls, got %d", len(repo.updateExtraCalls))
|
||||
}
|
||||
|
||||
// First call: clear utilization (nil value)
|
||||
clearCall := repo.updateExtraCalls[0]
|
||||
if clearCall.Updates["session_window_utilization"] != nil {
|
||||
t.Errorf("expected utilization cleared to nil, got %v", clearCall.Updates["session_window_utilization"])
|
||||
}
|
||||
|
||||
// Second call: store new utilization
|
||||
storeCall := repo.updateExtraCalls[1]
|
||||
if val, ok := storeCall.Updates["session_window_utilization"].(float64); !ok || val != 0.15 {
|
||||
t.Errorf("expected utilization stored as 0.15, got %v", storeCall.Updates["session_window_utilization"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateSessionWindow_NoClearUtilizationOnCorrection(t *testing.T) {
|
||||
// When correcting a stale prediction (needInitWindow=false), utilization should NOT be cleared.
|
||||
staleEnd := time.Now().Add(2 * time.Hour)
|
||||
realResetUnix := time.Now().Add(4 * time.Hour).Unix()
|
||||
|
||||
repo := &sessionWindowMockRepo{}
|
||||
svc := newRateLimitServiceForTest(repo)
|
||||
|
||||
account := &Account{
|
||||
ID: 66,
|
||||
SessionWindowEnd: &staleEnd,
|
||||
}
|
||||
headers := http.Header{}
|
||||
headers.Set("anthropic-ratelimit-unified-5h-status", "allowed")
|
||||
headers.Set("anthropic-ratelimit-unified-5h-reset", fmt.Sprintf("%d", realResetUnix))
|
||||
headers.Set("anthropic-ratelimit-unified-5h-utilization", "0.30")
|
||||
|
||||
svc.UpdateSessionWindow(context.Background(), account, headers)
|
||||
|
||||
// Only 1 UpdateExtra call (store utilization), no clear call
|
||||
if len(repo.updateExtraCalls) != 1 {
|
||||
t.Fatalf("expected 1 UpdateExtra call (no clear), got %d", len(repo.updateExtraCalls))
|
||||
}
|
||||
|
||||
if val, ok := repo.updateExtraCalls[0].Updates["session_window_utilization"].(float64); !ok || val != 0.30 {
|
||||
t.Errorf("expected utilization 0.30, got %v", repo.updateExtraCalls[0].Updates["session_window_utilization"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateSessionWindow_NoStatusHeader(t *testing.T) {
|
||||
// Should return immediately if no status header.
|
||||
repo := &sessionWindowMockRepo{}
|
||||
svc := newRateLimitServiceForTest(repo)
|
||||
|
||||
account := &Account{ID: 1}
|
||||
|
||||
svc.UpdateSessionWindow(context.Background(), account, http.Header{})
|
||||
|
||||
if len(repo.sessionWindowCalls) != 0 {
|
||||
t.Errorf("expected no calls when status header absent, got %d", len(repo.sessionWindowCalls))
|
||||
}
|
||||
}
|
||||
@@ -52,8 +52,8 @@ func (r *stubGroupRepoForQuota) ListActiveByPlatform(context.Context, string) ([
|
||||
func (r *stubGroupRepoForQuota) ExistsByName(context.Context, string) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
func (r *stubGroupRepoForQuota) GetAccountCount(context.Context, int64) (int64, error) {
|
||||
return 0, nil
|
||||
func (r *stubGroupRepoForQuota) GetAccountCount(context.Context, int64) (int64, int64, error) {
|
||||
return 0, 0, nil
|
||||
}
|
||||
func (r *stubGroupRepoForQuota) DeleteAccountGroupsByGroupID(context.Context, int64) (int64, error) {
|
||||
return 0, nil
|
||||
|
||||
@@ -40,7 +40,7 @@ func (groupRepoNoop) ListActiveByPlatform(context.Context, string) ([]Group, err
|
||||
func (groupRepoNoop) ExistsByName(context.Context, string) (bool, error) {
|
||||
panic("unexpected ExistsByName call")
|
||||
}
|
||||
func (groupRepoNoop) GetAccountCount(context.Context, int64) (int64, error) {
|
||||
func (groupRepoNoop) GetAccountCount(context.Context, int64) (int64, int64, error) {
|
||||
panic("unexpected GetAccountCount call")
|
||||
}
|
||||
func (groupRepoNoop) DeleteAccountGroupsByGroupID(context.Context, int64) (int64, error) {
|
||||
@@ -92,7 +92,7 @@ func (userSubRepoNoop) ListActiveByUserID(context.Context, int64) ([]UserSubscri
|
||||
func (userSubRepoNoop) ListByGroupID(context.Context, int64, pagination.PaginationParams) ([]UserSubscription, *pagination.PaginationResult, error) {
|
||||
panic("unexpected ListByGroupID call")
|
||||
}
|
||||
func (userSubRepoNoop) List(context.Context, pagination.PaginationParams, *int64, *int64, string, string, string) ([]UserSubscription, *pagination.PaginationResult, error) {
|
||||
func (userSubRepoNoop) List(context.Context, pagination.PaginationParams, *int64, *int64, string, string, string, string) ([]UserSubscription, *pagination.PaginationResult, error) {
|
||||
panic("unexpected List call")
|
||||
}
|
||||
func (userSubRepoNoop) ExistsByUserIDAndGroupID(context.Context, int64, int64) (bool, error) {
|
||||
|
||||
@@ -634,9 +634,9 @@ func (s *SubscriptionService) ListGroupSubscriptions(ctx context.Context, groupI
|
||||
}
|
||||
|
||||
// List 获取所有订阅(分页,支持筛选和排序)
|
||||
func (s *SubscriptionService) List(ctx context.Context, page, pageSize int, userID, groupID *int64, status, sortBy, sortOrder string) ([]UserSubscription, *pagination.PaginationResult, error) {
|
||||
func (s *SubscriptionService) List(ctx context.Context, page, pageSize int, userID, groupID *int64, status, platform, sortBy, sortOrder string) ([]UserSubscription, *pagination.PaginationResult, error) {
|
||||
params := pagination.PaginationParams{Page: page, PageSize: pageSize}
|
||||
subs, pag, err := s.userSubRepo.List(ctx, params, userID, groupID, status, sortBy, sortOrder)
|
||||
subs, pag, err := s.userSubRepo.List(ctx, params, userID, groupID, status, platform, sortBy, sortOrder)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ type UserSubscriptionRepository interface {
|
||||
ListByUserID(ctx context.Context, userID int64) ([]UserSubscription, error)
|
||||
ListActiveByUserID(ctx context.Context, userID int64) ([]UserSubscription, error)
|
||||
ListByGroupID(ctx context.Context, groupID int64, params pagination.PaginationParams) ([]UserSubscription, *pagination.PaginationResult, error)
|
||||
List(ctx context.Context, params pagination.PaginationParams, userID, groupID *int64, status, sortBy, sortOrder string) ([]UserSubscription, *pagination.PaginationResult, error)
|
||||
List(ctx context.Context, params pagination.PaginationParams, userID, groupID *int64, status, platform, sortBy, sortOrder string) ([]UserSubscription, *pagination.PaginationResult, error)
|
||||
|
||||
ExistsByUserIDAndGroupID(ctx context.Context, userID, groupID int64) (bool, error)
|
||||
ExtendExpiry(ctx context.Context, subscriptionID int64, newExpiresAt time.Time) error
|
||||
|
||||
@@ -486,4 +486,5 @@ var ProviderSet = wire.NewSet(
|
||||
ProvideIdempotencyCleanupService,
|
||||
ProvideScheduledTestService,
|
||||
ProvideScheduledTestRunnerService,
|
||||
NewGroupCapacityService,
|
||||
)
|
||||
|
||||
@@ -164,8 +164,8 @@ func NeedsSetup() bool {
|
||||
func TestDatabaseConnection(cfg *DatabaseConfig) error {
|
||||
// First, connect to the default 'postgres' database to check/create target database
|
||||
defaultDSN := fmt.Sprintf(
|
||||
"host=%s port=%d user=%s password=%s dbname=postgres sslmode=%s",
|
||||
cfg.Host, cfg.Port, cfg.User, cfg.Password, cfg.SSLMode,
|
||||
"host=%s port=%d user=%s password=%s dbname=%s sslmode=%s",
|
||||
cfg.Host, cfg.Port, cfg.User, cfg.Password, cfg.DBName, cfg.SSLMode,
|
||||
)
|
||||
|
||||
db, err := sql.Open("postgres", defaultDSN)
|
||||
|
||||
@@ -82,6 +82,7 @@ RUN apk add --no-cache \
|
||||
ca-certificates \
|
||||
tzdata \
|
||||
curl \
|
||||
su-exec \
|
||||
&& rm -rf /var/cache/apk/*
|
||||
|
||||
# Create non-root user
|
||||
@@ -97,8 +98,9 @@ COPY --from=backend-builder /app/sub2api /app/sub2api
|
||||
# Create data directory
|
||||
RUN mkdir -p /app/data && chown -R sub2api:sub2api /app
|
||||
|
||||
# Switch to non-root user
|
||||
USER sub2api
|
||||
# Copy entrypoint script (fixes volume permissions then drops to sub2api)
|
||||
COPY deploy/docker-entrypoint.sh /app/docker-entrypoint.sh
|
||||
RUN chmod +x /app/docker-entrypoint.sh
|
||||
|
||||
# Expose port (can be overridden by SERVER_PORT env var)
|
||||
EXPOSE 8080
|
||||
@@ -107,5 +109,6 @@ EXPOSE 8080
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=10s --retries=3 \
|
||||
CMD wget -q -T 5 -O /dev/null http://localhost:${SERVER_PORT:-8080}/health || exit 1
|
||||
|
||||
# Run the application
|
||||
ENTRYPOINT ["/app/sub2api"]
|
||||
# Run the application (entrypoint fixes /app/data ownership then execs as sub2api)
|
||||
ENTRYPOINT ["/app/docker-entrypoint.sh"]
|
||||
CMD ["/app/sub2api"]
|
||||
|
||||
22
deploy/docker-entrypoint.sh
Normal file
22
deploy/docker-entrypoint.sh
Normal file
@@ -0,0 +1,22 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
# Fix data directory permissions when running as root.
|
||||
# Docker named volumes / host bind-mounts may be owned by root,
|
||||
# preventing the non-root sub2api user from writing files.
|
||||
if [ "$(id -u)" = "0" ]; then
|
||||
mkdir -p /app/data
|
||||
chown -R sub2api:sub2api /app/data
|
||||
# Re-invoke this script as sub2api so the flag-detection below
|
||||
# also runs under the correct user.
|
||||
exec su-exec sub2api "$0" "$@"
|
||||
fi
|
||||
|
||||
# Compatibility: if the first arg looks like a flag (e.g. --help),
|
||||
# prepend the default binary so it behaves the same as the old
|
||||
# ENTRYPOINT ["/app/sub2api"] style.
|
||||
if [ "${1#-}" != "$1" ]; then
|
||||
set -- /app/sub2api "$@"
|
||||
fi
|
||||
|
||||
exec "$@"
|
||||
@@ -29,6 +29,10 @@ export interface BackupRecord {
|
||||
started_at: string
|
||||
finished_at?: string
|
||||
expires_at?: string
|
||||
progress?: string
|
||||
restore_status?: string
|
||||
restore_error?: string
|
||||
restored_at?: string
|
||||
}
|
||||
|
||||
export interface CreateBackupRequest {
|
||||
@@ -69,7 +73,7 @@ export async function updateSchedule(config: BackupScheduleConfig): Promise<Back
|
||||
|
||||
// Backup operations
|
||||
export async function createBackup(req?: CreateBackupRequest): Promise<BackupRecord> {
|
||||
const { data } = await apiClient.post<BackupRecord>('/admin/backups', req || {}, { timeout: 600000 })
|
||||
const { data } = await apiClient.post<BackupRecord>('/admin/backups', req || {})
|
||||
return data
|
||||
}
|
||||
|
||||
@@ -93,8 +97,9 @@ export async function getDownloadURL(id: string): Promise<{ url: string }> {
|
||||
}
|
||||
|
||||
// Restore
|
||||
export async function restoreBackup(id: string, password: string): Promise<void> {
|
||||
await apiClient.post(`/admin/backups/${id}/restore`, { password }, { timeout: 600000 })
|
||||
export async function restoreBackup(id: string, password: string): Promise<BackupRecord> {
|
||||
const { data } = await apiClient.post<BackupRecord>(`/admin/backups/${id}/restore`, { password })
|
||||
return data
|
||||
}
|
||||
|
||||
export const backupAPI = {
|
||||
|
||||
@@ -12,6 +12,7 @@ import type {
|
||||
ApiKeyUsageTrendPoint,
|
||||
UserUsageTrendPoint,
|
||||
UserSpendingRankingResponse,
|
||||
UserBreakdownItem,
|
||||
UsageRequestType
|
||||
} from '@/types'
|
||||
|
||||
@@ -156,6 +157,29 @@ export async function getGroupStats(params?: GroupStatsParams): Promise<GroupSta
|
||||
return data
|
||||
}
|
||||
|
||||
export interface UserBreakdownParams {
|
||||
start_date?: string
|
||||
end_date?: string
|
||||
group_id?: number
|
||||
model?: string
|
||||
endpoint?: string
|
||||
endpoint_type?: 'inbound' | 'upstream' | 'path'
|
||||
limit?: number
|
||||
}
|
||||
|
||||
export interface UserBreakdownResponse {
|
||||
users: UserBreakdownItem[]
|
||||
start_date: string
|
||||
end_date: string
|
||||
}
|
||||
|
||||
export async function getUserBreakdown(params: UserBreakdownParams): Promise<UserBreakdownResponse> {
|
||||
const { data } = await apiClient.get<UserBreakdownResponse>('/admin/dashboard/user-breakdown', {
|
||||
params
|
||||
})
|
||||
return data
|
||||
}
|
||||
|
||||
/**
|
||||
* Get dashboard snapshot v2 (aggregated response for heavy admin pages).
|
||||
*/
|
||||
|
||||
@@ -218,6 +218,34 @@ export async function batchSetGroupRateMultipliers(
|
||||
return data
|
||||
}
|
||||
|
||||
/**
|
||||
* Get usage summary (today + cumulative cost) for all groups
|
||||
* @param timezone - IANA timezone string (e.g. "Asia/Shanghai")
|
||||
* @returns Array of group usage summaries
|
||||
*/
|
||||
export async function getUsageSummary(
|
||||
timezone?: string
|
||||
): Promise<{ group_id: number; today_cost: number; total_cost: number }[]> {
|
||||
const { data } = await apiClient.get<
|
||||
{ group_id: number; today_cost: number; total_cost: number }[]
|
||||
>('/admin/groups/usage-summary', {
|
||||
params: timezone ? { timezone } : undefined
|
||||
})
|
||||
return data
|
||||
}
|
||||
|
||||
/**
|
||||
* Get capacity summary (concurrency/sessions/RPM) for all active groups
|
||||
*/
|
||||
export async function getCapacitySummary(): Promise<
|
||||
{ group_id: number; concurrency_used: number; concurrency_max: number; sessions_used: number; sessions_max: number; rpm_used: number; rpm_max: number }[]
|
||||
> {
|
||||
const { data } = await apiClient.get<
|
||||
{ group_id: number; concurrency_used: number; concurrency_max: number; sessions_used: number; sessions_max: number; rpm_used: number; rpm_max: number }[]
|
||||
>('/admin/groups/capacity-summary')
|
||||
return data
|
||||
}
|
||||
|
||||
export const groupsAPI = {
|
||||
list,
|
||||
getAll,
|
||||
@@ -232,7 +260,9 @@ export const groupsAPI = {
|
||||
getGroupRateMultipliers,
|
||||
clearGroupRateMultipliers,
|
||||
batchSetGroupRateMultipliers,
|
||||
updateSortOrder
|
||||
updateSortOrder,
|
||||
getUsageSummary,
|
||||
getCapacitySummary
|
||||
}
|
||||
|
||||
export default groupsAPI
|
||||
|
||||
@@ -27,6 +27,7 @@ export async function list(
|
||||
status?: 'active' | 'expired' | 'revoked'
|
||||
user_id?: number
|
||||
group_id?: number
|
||||
platform?: string
|
||||
sort_by?: string
|
||||
sort_order?: 'asc' | 'desc'
|
||||
},
|
||||
|
||||
@@ -73,15 +73,16 @@
|
||||
<div v-else class="text-xs text-gray-400">-</div>
|
||||
</template>
|
||||
|
||||
<!-- OpenAI OAuth accounts: prefer fresh usage query for active rate-limited rows -->
|
||||
<!-- OpenAI OAuth accounts: single source from /usage API -->
|
||||
<template v-else-if="account.platform === 'openai' && account.type === 'oauth'">
|
||||
<div v-if="preferFetchedOpenAIUsage" class="space-y-1">
|
||||
<div v-if="hasOpenAIUsageFallback" class="space-y-1">
|
||||
<UsageProgressBar
|
||||
v-if="usageInfo?.five_hour"
|
||||
label="5h"
|
||||
:utilization="usageInfo.five_hour.utilization"
|
||||
:resets-at="usageInfo.five_hour.resets_at"
|
||||
:window-stats="usageInfo.five_hour.window_stats"
|
||||
:show-now-when-idle="true"
|
||||
color="indigo"
|
||||
/>
|
||||
<UsageProgressBar
|
||||
@@ -90,37 +91,7 @@
|
||||
:utilization="usageInfo.seven_day.utilization"
|
||||
:resets-at="usageInfo.seven_day.resets_at"
|
||||
:window-stats="usageInfo.seven_day.window_stats"
|
||||
color="emerald"
|
||||
/>
|
||||
</div>
|
||||
<div v-else-if="isActiveOpenAIRateLimited && loading" class="space-y-1.5">
|
||||
<div class="flex items-center gap-1">
|
||||
<div class="h-3 w-[32px] animate-pulse rounded bg-gray-200 dark:bg-gray-700"></div>
|
||||
<div class="h-1.5 w-8 animate-pulse rounded-full bg-gray-200 dark:bg-gray-700"></div>
|
||||
<div class="h-3 w-[32px] animate-pulse rounded bg-gray-200 dark:bg-gray-700"></div>
|
||||
</div>
|
||||
<div class="flex items-center gap-1">
|
||||
<div class="h-3 w-[32px] animate-pulse rounded bg-gray-200 dark:bg-gray-700"></div>
|
||||
<div class="h-1.5 w-8 animate-pulse rounded-full bg-gray-200 dark:bg-gray-700"></div>
|
||||
<div class="h-3 w-[32px] animate-pulse rounded bg-gray-200 dark:bg-gray-700"></div>
|
||||
</div>
|
||||
</div>
|
||||
<div v-else-if="hasCodexUsage" class="space-y-1">
|
||||
<!-- 5h Window -->
|
||||
<UsageProgressBar
|
||||
v-if="codex5hUsedPercent !== null"
|
||||
label="5h"
|
||||
:utilization="codex5hUsedPercent"
|
||||
:resets-at="codex5hResetAt"
|
||||
color="indigo"
|
||||
/>
|
||||
|
||||
<!-- 7d Window -->
|
||||
<UsageProgressBar
|
||||
v-if="codex7dUsedPercent !== null"
|
||||
label="7d"
|
||||
:utilization="codex7dUsedPercent"
|
||||
:resets-at="codex7dResetAt"
|
||||
:show-now-when-idle="true"
|
||||
color="emerald"
|
||||
/>
|
||||
</div>
|
||||
@@ -136,24 +107,6 @@
|
||||
<div class="h-3 w-[32px] animate-pulse rounded bg-gray-200 dark:bg-gray-700"></div>
|
||||
</div>
|
||||
</div>
|
||||
<div v-else-if="hasOpenAIUsageFallback" class="space-y-1">
|
||||
<UsageProgressBar
|
||||
v-if="usageInfo?.five_hour"
|
||||
label="5h"
|
||||
:utilization="usageInfo.five_hour.utilization"
|
||||
:resets-at="usageInfo.five_hour.resets_at"
|
||||
:window-stats="usageInfo.five_hour.window_stats"
|
||||
color="indigo"
|
||||
/>
|
||||
<UsageProgressBar
|
||||
v-if="usageInfo?.seven_day"
|
||||
label="7d"
|
||||
:utilization="usageInfo.seven_day.utilization"
|
||||
:resets-at="usageInfo.seven_day.resets_at"
|
||||
:window-stats="usageInfo.seven_day.window_stats"
|
||||
color="emerald"
|
||||
/>
|
||||
</div>
|
||||
<div v-else class="text-xs text-gray-400">-</div>
|
||||
</template>
|
||||
|
||||
@@ -389,8 +342,43 @@
|
||||
<div v-else>
|
||||
<!-- Gemini API Key accounts: show quota info -->
|
||||
<AccountQuotaInfo v-if="account.platform === 'gemini'" :account="account" />
|
||||
<!-- API Key accounts with quota limits: show progress bars -->
|
||||
<div v-else-if="hasApiKeyQuota" class="space-y-1">
|
||||
<!-- Key/Bedrock accounts: show today stats + optional quota bars -->
|
||||
<div v-else class="space-y-1">
|
||||
<!-- Today stats row (requests, tokens, cost, user_cost) -->
|
||||
<div
|
||||
v-if="todayStats"
|
||||
class="mb-0.5 flex items-center"
|
||||
>
|
||||
<div class="flex items-center gap-1.5 text-[9px] text-gray-500 dark:text-gray-400">
|
||||
<span class="rounded bg-gray-100 px-1.5 py-0.5 dark:bg-gray-800">
|
||||
{{ formatKeyRequests }} req
|
||||
</span>
|
||||
<span class="rounded bg-gray-100 px-1.5 py-0.5 dark:bg-gray-800">
|
||||
{{ formatKeyTokens }}
|
||||
</span>
|
||||
<span class="rounded bg-gray-100 px-1.5 py-0.5 dark:bg-gray-800" :title="t('usage.accountBilled')">
|
||||
A ${{ formatKeyCost }}
|
||||
</span>
|
||||
<span
|
||||
v-if="todayStats.user_cost != null"
|
||||
class="rounded bg-gray-100 px-1.5 py-0.5 dark:bg-gray-800"
|
||||
:title="t('usage.userBilled')"
|
||||
>
|
||||
U ${{ formatKeyUserCost }}
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
<!-- Loading skeleton for today stats -->
|
||||
<div
|
||||
v-else-if="todayStatsLoading"
|
||||
class="mb-0.5 flex items-center gap-1"
|
||||
>
|
||||
<div class="h-3 w-10 animate-pulse rounded bg-gray-200 dark:bg-gray-700"></div>
|
||||
<div class="h-3 w-8 animate-pulse rounded bg-gray-200 dark:bg-gray-700"></div>
|
||||
<div class="h-3 w-12 animate-pulse rounded bg-gray-200 dark:bg-gray-700"></div>
|
||||
</div>
|
||||
|
||||
<!-- API Key accounts with quota limits: show progress bars -->
|
||||
<UsageProgressBar
|
||||
v-if="quotaDailyBar"
|
||||
label="1d"
|
||||
@@ -411,8 +399,10 @@
|
||||
:utilization="quotaTotalBar.utilization"
|
||||
color="purple"
|
||||
/>
|
||||
|
||||
<!-- No data at all -->
|
||||
<div v-if="!todayStats && !todayStatsLoading && !hasApiKeyQuota" class="text-xs text-gray-400">-</div>
|
||||
</div>
|
||||
<div v-else class="text-xs text-gray-400">-</div>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
@@ -422,13 +412,23 @@ import { useI18n } from 'vue-i18n'
|
||||
import { adminAPI } from '@/api/admin'
|
||||
import type { Account, AccountUsageInfo, GeminiCredentials, WindowStats } from '@/types'
|
||||
import { buildOpenAIUsageRefreshKey } from '@/utils/accountUsageRefresh'
|
||||
import { resolveCodexUsageWindow } from '@/utils/codexUsage'
|
||||
import { formatCompactNumber } from '@/utils/format'
|
||||
import UsageProgressBar from './UsageProgressBar.vue'
|
||||
import AccountQuotaInfo from './AccountQuotaInfo.vue'
|
||||
|
||||
const props = defineProps<{
|
||||
account: Account
|
||||
}>()
|
||||
const props = withDefaults(
|
||||
defineProps<{
|
||||
account: Account
|
||||
todayStats?: WindowStats | null
|
||||
todayStatsLoading?: boolean
|
||||
manualRefreshToken?: number
|
||||
}>(),
|
||||
{
|
||||
todayStats: null,
|
||||
todayStatsLoading: false,
|
||||
manualRefreshToken: 0
|
||||
}
|
||||
)
|
||||
|
||||
const { t } = useI18n()
|
||||
|
||||
@@ -470,54 +470,17 @@ const geminiUsageAvailable = computed(() => {
|
||||
)
|
||||
})
|
||||
|
||||
const codex5hWindow = computed(() => resolveCodexUsageWindow(props.account.extra, '5h'))
|
||||
const codex7dWindow = computed(() => resolveCodexUsageWindow(props.account.extra, '7d'))
|
||||
|
||||
// OpenAI Codex usage computed properties
|
||||
const hasCodexUsage = computed(() => {
|
||||
return codex5hWindow.value.usedPercent !== null || codex7dWindow.value.usedPercent !== null
|
||||
})
|
||||
|
||||
const hasOpenAIUsageFallback = computed(() => {
|
||||
if (props.account.platform !== 'openai' || props.account.type !== 'oauth') return false
|
||||
return !!usageInfo.value?.five_hour || !!usageInfo.value?.seven_day
|
||||
})
|
||||
|
||||
const isActiveOpenAIRateLimited = computed(() => {
|
||||
if (props.account.platform !== 'openai' || props.account.type !== 'oauth') return false
|
||||
if (!props.account.rate_limit_reset_at) return false
|
||||
const resetAt = Date.parse(props.account.rate_limit_reset_at)
|
||||
return !Number.isNaN(resetAt) && resetAt > Date.now()
|
||||
})
|
||||
|
||||
const preferFetchedOpenAIUsage = computed(() => {
|
||||
return (isActiveOpenAIRateLimited.value || isOpenAICodexSnapshotStale.value) && hasOpenAIUsageFallback.value
|
||||
})
|
||||
|
||||
const openAIUsageRefreshKey = computed(() => buildOpenAIUsageRefreshKey(props.account))
|
||||
|
||||
const isOpenAICodexSnapshotStale = computed(() => {
|
||||
if (props.account.platform !== 'openai' || props.account.type !== 'oauth') return false
|
||||
const extra = props.account.extra as Record<string, unknown> | undefined
|
||||
const updatedAtRaw = extra?.codex_usage_updated_at
|
||||
if (!updatedAtRaw) return true
|
||||
const updatedAt = Date.parse(String(updatedAtRaw))
|
||||
if (Number.isNaN(updatedAt)) return true
|
||||
return Date.now() - updatedAt >= 10 * 60 * 1000
|
||||
})
|
||||
|
||||
const shouldAutoLoadUsageOnMount = computed(() => {
|
||||
if (props.account.platform === 'openai' && props.account.type === 'oauth') {
|
||||
return isActiveOpenAIRateLimited.value || !hasCodexUsage.value || isOpenAICodexSnapshotStale.value
|
||||
}
|
||||
return shouldFetchUsage.value
|
||||
})
|
||||
|
||||
const codex5hUsedPercent = computed(() => codex5hWindow.value.usedPercent)
|
||||
const codex5hResetAt = computed(() => codex5hWindow.value.resetAt)
|
||||
const codex7dUsedPercent = computed(() => codex7dWindow.value.usedPercent)
|
||||
const codex7dResetAt = computed(() => codex7dWindow.value.resetAt)
|
||||
|
||||
// Antigravity quota types (用于 API 返回的数据)
|
||||
interface AntigravityUsageResult {
|
||||
utilization: number
|
||||
@@ -1006,6 +969,28 @@ const quotaTotalBar = computed((): QuotaBarInfo | null => {
|
||||
return makeQuotaBar(props.account.quota_used ?? 0, limit)
|
||||
})
|
||||
|
||||
// ===== Key account today stats formatters =====
|
||||
|
||||
const formatKeyRequests = computed(() => {
|
||||
if (!props.todayStats) return ''
|
||||
return formatCompactNumber(props.todayStats.requests, { allowBillions: false })
|
||||
})
|
||||
|
||||
const formatKeyTokens = computed(() => {
|
||||
if (!props.todayStats) return ''
|
||||
return formatCompactNumber(props.todayStats.tokens)
|
||||
})
|
||||
|
||||
const formatKeyCost = computed(() => {
|
||||
if (!props.todayStats) return '0.00'
|
||||
return props.todayStats.cost.toFixed(2)
|
||||
})
|
||||
|
||||
const formatKeyUserCost = computed(() => {
|
||||
if (!props.todayStats || props.todayStats.user_cost == null) return '0.00'
|
||||
return props.todayStats.user_cost.toFixed(2)
|
||||
})
|
||||
|
||||
onMounted(() => {
|
||||
if (!shouldAutoLoadUsageOnMount.value) return
|
||||
loadUsage()
|
||||
@@ -1014,10 +999,21 @@ onMounted(() => {
|
||||
watch(openAIUsageRefreshKey, (nextKey, prevKey) => {
|
||||
if (!prevKey || nextKey === prevKey) return
|
||||
if (props.account.platform !== 'openai' || props.account.type !== 'oauth') return
|
||||
if (!isActiveOpenAIRateLimited.value && hasCodexUsage.value && !isOpenAICodexSnapshotStale.value) return
|
||||
|
||||
loadUsage().catch((e) => {
|
||||
console.error('Failed to refresh OpenAI usage:', e)
|
||||
})
|
||||
})
|
||||
|
||||
watch(
|
||||
() => props.manualRefreshToken,
|
||||
(nextToken, prevToken) => {
|
||||
if (nextToken === prevToken) return
|
||||
if (!shouldFetchUsage.value) return
|
||||
|
||||
loadUsage().catch((e) => {
|
||||
console.error('Failed to refresh usage after manual refresh:', e)
|
||||
})
|
||||
}
|
||||
)
|
||||
</script>
|
||||
|
||||
@@ -1980,271 +1980,281 @@ const normalizePoolModeRetryCount = (value: number) => {
|
||||
return normalized
|
||||
}
|
||||
|
||||
watch(
|
||||
() => props.account,
|
||||
(newAccount) => {
|
||||
if (newAccount) {
|
||||
antigravityMixedChannelConfirmed.value = false
|
||||
showMixedChannelWarning.value = false
|
||||
mixedChannelWarningDetails.value = null
|
||||
mixedChannelWarningRawMessage.value = ''
|
||||
mixedChannelWarningAction.value = null
|
||||
form.name = newAccount.name
|
||||
form.notes = newAccount.notes || ''
|
||||
form.proxy_id = newAccount.proxy_id
|
||||
form.concurrency = newAccount.concurrency
|
||||
form.load_factor = newAccount.load_factor ?? null
|
||||
form.priority = newAccount.priority
|
||||
form.rate_multiplier = newAccount.rate_multiplier ?? 1
|
||||
form.status = (newAccount.status === 'active' || newAccount.status === 'inactive' || newAccount.status === 'error')
|
||||
? newAccount.status
|
||||
: 'active'
|
||||
form.group_ids = newAccount.group_ids || []
|
||||
form.expires_at = newAccount.expires_at ?? null
|
||||
const syncFormFromAccount = (newAccount: Account | null) => {
|
||||
if (!newAccount) {
|
||||
return
|
||||
}
|
||||
antigravityMixedChannelConfirmed.value = false
|
||||
showMixedChannelWarning.value = false
|
||||
mixedChannelWarningDetails.value = null
|
||||
mixedChannelWarningRawMessage.value = ''
|
||||
mixedChannelWarningAction.value = null
|
||||
form.name = newAccount.name
|
||||
form.notes = newAccount.notes || ''
|
||||
form.proxy_id = newAccount.proxy_id
|
||||
form.concurrency = newAccount.concurrency
|
||||
form.load_factor = newAccount.load_factor ?? null
|
||||
form.priority = newAccount.priority
|
||||
form.rate_multiplier = newAccount.rate_multiplier ?? 1
|
||||
form.status = (newAccount.status === 'active' || newAccount.status === 'inactive' || newAccount.status === 'error')
|
||||
? newAccount.status
|
||||
: 'active'
|
||||
form.group_ids = newAccount.group_ids || []
|
||||
form.expires_at = newAccount.expires_at ?? null
|
||||
|
||||
// Load intercept warmup requests setting (applies to all account types)
|
||||
const credentials = newAccount.credentials as Record<string, unknown> | undefined
|
||||
interceptWarmupRequests.value = credentials?.intercept_warmup_requests === true
|
||||
autoPauseOnExpired.value = newAccount.auto_pause_on_expired === true
|
||||
// Load intercept warmup requests setting (applies to all account types)
|
||||
const credentials = newAccount.credentials as Record<string, unknown> | undefined
|
||||
interceptWarmupRequests.value = credentials?.intercept_warmup_requests === true
|
||||
autoPauseOnExpired.value = newAccount.auto_pause_on_expired === true
|
||||
|
||||
// Load mixed scheduling setting (only for antigravity accounts)
|
||||
mixedScheduling.value = false
|
||||
allowOverages.value = false
|
||||
const extra = newAccount.extra as Record<string, unknown> | undefined
|
||||
mixedScheduling.value = extra?.mixed_scheduling === true
|
||||
allowOverages.value = extra?.allow_overages === true
|
||||
// Load mixed scheduling setting (only for antigravity accounts)
|
||||
mixedScheduling.value = false
|
||||
allowOverages.value = false
|
||||
const extra = newAccount.extra as Record<string, unknown> | undefined
|
||||
mixedScheduling.value = extra?.mixed_scheduling === true
|
||||
allowOverages.value = extra?.allow_overages === true
|
||||
|
||||
// Load OpenAI passthrough toggle (OpenAI OAuth/API Key)
|
||||
openaiPassthroughEnabled.value = false
|
||||
openaiOAuthResponsesWebSocketV2Mode.value = OPENAI_WS_MODE_OFF
|
||||
openaiAPIKeyResponsesWebSocketV2Mode.value = OPENAI_WS_MODE_OFF
|
||||
codexCLIOnlyEnabled.value = false
|
||||
anthropicPassthroughEnabled.value = false
|
||||
if (newAccount.platform === 'openai' && (newAccount.type === 'oauth' || newAccount.type === 'apikey')) {
|
||||
openaiPassthroughEnabled.value = extra?.openai_passthrough === true || extra?.openai_oauth_passthrough === true
|
||||
openaiOAuthResponsesWebSocketV2Mode.value = resolveOpenAIWSModeFromExtra(extra, {
|
||||
modeKey: 'openai_oauth_responses_websockets_v2_mode',
|
||||
enabledKey: 'openai_oauth_responses_websockets_v2_enabled',
|
||||
fallbackEnabledKeys: ['responses_websockets_v2_enabled', 'openai_ws_enabled'],
|
||||
defaultMode: OPENAI_WS_MODE_OFF
|
||||
})
|
||||
openaiAPIKeyResponsesWebSocketV2Mode.value = resolveOpenAIWSModeFromExtra(extra, {
|
||||
modeKey: 'openai_apikey_responses_websockets_v2_mode',
|
||||
enabledKey: 'openai_apikey_responses_websockets_v2_enabled',
|
||||
fallbackEnabledKeys: ['responses_websockets_v2_enabled', 'openai_ws_enabled'],
|
||||
defaultMode: OPENAI_WS_MODE_OFF
|
||||
})
|
||||
if (newAccount.type === 'oauth') {
|
||||
codexCLIOnlyEnabled.value = extra?.codex_cli_only === true
|
||||
}
|
||||
}
|
||||
if (newAccount.platform === 'anthropic' && newAccount.type === 'apikey') {
|
||||
anthropicPassthroughEnabled.value = extra?.anthropic_passthrough === true
|
||||
}
|
||||
// Load OpenAI passthrough toggle (OpenAI OAuth/API Key)
|
||||
openaiPassthroughEnabled.value = false
|
||||
openaiOAuthResponsesWebSocketV2Mode.value = OPENAI_WS_MODE_OFF
|
||||
openaiAPIKeyResponsesWebSocketV2Mode.value = OPENAI_WS_MODE_OFF
|
||||
codexCLIOnlyEnabled.value = false
|
||||
anthropicPassthroughEnabled.value = false
|
||||
if (newAccount.platform === 'openai' && (newAccount.type === 'oauth' || newAccount.type === 'apikey')) {
|
||||
openaiPassthroughEnabled.value = extra?.openai_passthrough === true || extra?.openai_oauth_passthrough === true
|
||||
openaiOAuthResponsesWebSocketV2Mode.value = resolveOpenAIWSModeFromExtra(extra, {
|
||||
modeKey: 'openai_oauth_responses_websockets_v2_mode',
|
||||
enabledKey: 'openai_oauth_responses_websockets_v2_enabled',
|
||||
fallbackEnabledKeys: ['responses_websockets_v2_enabled', 'openai_ws_enabled'],
|
||||
defaultMode: OPENAI_WS_MODE_OFF
|
||||
})
|
||||
openaiAPIKeyResponsesWebSocketV2Mode.value = resolveOpenAIWSModeFromExtra(extra, {
|
||||
modeKey: 'openai_apikey_responses_websockets_v2_mode',
|
||||
enabledKey: 'openai_apikey_responses_websockets_v2_enabled',
|
||||
fallbackEnabledKeys: ['responses_websockets_v2_enabled', 'openai_ws_enabled'],
|
||||
defaultMode: OPENAI_WS_MODE_OFF
|
||||
})
|
||||
if (newAccount.type === 'oauth') {
|
||||
codexCLIOnlyEnabled.value = extra?.codex_cli_only === true
|
||||
}
|
||||
}
|
||||
if (newAccount.platform === 'anthropic' && newAccount.type === 'apikey') {
|
||||
anthropicPassthroughEnabled.value = extra?.anthropic_passthrough === true
|
||||
}
|
||||
|
||||
// Load quota limit for apikey/bedrock accounts (bedrock quota is also loaded in its own branch above)
|
||||
if (newAccount.type === 'apikey' || newAccount.type === 'bedrock') {
|
||||
const quotaVal = extra?.quota_limit as number | undefined
|
||||
editQuotaLimit.value = (quotaVal && quotaVal > 0) ? quotaVal : null
|
||||
const dailyVal = extra?.quota_daily_limit as number | undefined
|
||||
editQuotaDailyLimit.value = (dailyVal && dailyVal > 0) ? dailyVal : null
|
||||
const weeklyVal = extra?.quota_weekly_limit as number | undefined
|
||||
editQuotaWeeklyLimit.value = (weeklyVal && weeklyVal > 0) ? weeklyVal : null
|
||||
// Load quota reset mode config
|
||||
editDailyResetMode.value = (extra?.quota_daily_reset_mode as 'rolling' | 'fixed') || null
|
||||
editDailyResetHour.value = (extra?.quota_daily_reset_hour as number) ?? null
|
||||
editWeeklyResetMode.value = (extra?.quota_weekly_reset_mode as 'rolling' | 'fixed') || null
|
||||
editWeeklyResetDay.value = (extra?.quota_weekly_reset_day as number) ?? null
|
||||
editWeeklyResetHour.value = (extra?.quota_weekly_reset_hour as number) ?? null
|
||||
editResetTimezone.value = (extra?.quota_reset_timezone as string) || null
|
||||
// Load quota limit for apikey/bedrock accounts (bedrock quota is also loaded in its own branch above)
|
||||
if (newAccount.type === 'apikey' || newAccount.type === 'bedrock') {
|
||||
const quotaVal = extra?.quota_limit as number | undefined
|
||||
editQuotaLimit.value = (quotaVal && quotaVal > 0) ? quotaVal : null
|
||||
const dailyVal = extra?.quota_daily_limit as number | undefined
|
||||
editQuotaDailyLimit.value = (dailyVal && dailyVal > 0) ? dailyVal : null
|
||||
const weeklyVal = extra?.quota_weekly_limit as number | undefined
|
||||
editQuotaWeeklyLimit.value = (weeklyVal && weeklyVal > 0) ? weeklyVal : null
|
||||
// Load quota reset mode config
|
||||
editDailyResetMode.value = (extra?.quota_daily_reset_mode as 'rolling' | 'fixed') || null
|
||||
editDailyResetHour.value = (extra?.quota_daily_reset_hour as number) ?? null
|
||||
editWeeklyResetMode.value = (extra?.quota_weekly_reset_mode as 'rolling' | 'fixed') || null
|
||||
editWeeklyResetDay.value = (extra?.quota_weekly_reset_day as number) ?? null
|
||||
editWeeklyResetHour.value = (extra?.quota_weekly_reset_hour as number) ?? null
|
||||
editResetTimezone.value = (extra?.quota_reset_timezone as string) || null
|
||||
} else {
|
||||
editQuotaLimit.value = null
|
||||
editQuotaDailyLimit.value = null
|
||||
editQuotaWeeklyLimit.value = null
|
||||
editDailyResetMode.value = null
|
||||
editDailyResetHour.value = null
|
||||
editWeeklyResetMode.value = null
|
||||
editWeeklyResetDay.value = null
|
||||
editWeeklyResetHour.value = null
|
||||
editResetTimezone.value = null
|
||||
}
|
||||
|
||||
// Load antigravity model mapping (Antigravity 只支持映射模式)
|
||||
if (newAccount.platform === 'antigravity') {
|
||||
const credentials = newAccount.credentials as Record<string, unknown> | undefined
|
||||
|
||||
// Antigravity 始终使用映射模式
|
||||
antigravityModelRestrictionMode.value = 'mapping'
|
||||
antigravityWhitelistModels.value = []
|
||||
|
||||
// 从 model_mapping 读取映射配置
|
||||
const rawAgMapping = credentials?.model_mapping as Record<string, string> | undefined
|
||||
if (rawAgMapping && typeof rawAgMapping === 'object') {
|
||||
const entries = Object.entries(rawAgMapping)
|
||||
// 无论是白名单样式(key===value)还是真正的映射,都统一转换为映射列表
|
||||
antigravityModelMappings.value = entries.map(([from, to]) => ({ from, to }))
|
||||
} else {
|
||||
// 兼容旧数据:从 model_whitelist 读取,转换为映射格式
|
||||
const rawWhitelist = credentials?.model_whitelist
|
||||
if (Array.isArray(rawWhitelist) && rawWhitelist.length > 0) {
|
||||
antigravityModelMappings.value = rawWhitelist
|
||||
.map((v) => String(v).trim())
|
||||
.filter((v) => v.length > 0)
|
||||
.map((m) => ({ from: m, to: m }))
|
||||
} else {
|
||||
editQuotaLimit.value = null
|
||||
editQuotaDailyLimit.value = null
|
||||
editQuotaWeeklyLimit.value = null
|
||||
editDailyResetMode.value = null
|
||||
editDailyResetHour.value = null
|
||||
editWeeklyResetMode.value = null
|
||||
editWeeklyResetDay.value = null
|
||||
editWeeklyResetHour.value = null
|
||||
editResetTimezone.value = null
|
||||
}
|
||||
|
||||
// Load antigravity model mapping (Antigravity 只支持映射模式)
|
||||
if (newAccount.platform === 'antigravity') {
|
||||
const credentials = newAccount.credentials as Record<string, unknown> | undefined
|
||||
|
||||
// Antigravity 始终使用映射模式
|
||||
antigravityModelRestrictionMode.value = 'mapping'
|
||||
antigravityWhitelistModels.value = []
|
||||
|
||||
// 从 model_mapping 读取映射配置
|
||||
const rawAgMapping = credentials?.model_mapping as Record<string, string> | undefined
|
||||
if (rawAgMapping && typeof rawAgMapping === 'object') {
|
||||
const entries = Object.entries(rawAgMapping)
|
||||
// 无论是白名单样式(key===value)还是真正的映射,都统一转换为映射列表
|
||||
antigravityModelMappings.value = entries.map(([from, to]) => ({ from, to }))
|
||||
} else {
|
||||
// 兼容旧数据:从 model_whitelist 读取,转换为映射格式
|
||||
const rawWhitelist = credentials?.model_whitelist
|
||||
if (Array.isArray(rawWhitelist) && rawWhitelist.length > 0) {
|
||||
antigravityModelMappings.value = rawWhitelist
|
||||
.map((v) => String(v).trim())
|
||||
.filter((v) => v.length > 0)
|
||||
.map((m) => ({ from: m, to: m }))
|
||||
} else {
|
||||
antigravityModelMappings.value = []
|
||||
}
|
||||
}
|
||||
} else {
|
||||
antigravityModelRestrictionMode.value = 'mapping'
|
||||
antigravityWhitelistModels.value = []
|
||||
antigravityModelMappings.value = []
|
||||
}
|
||||
}
|
||||
} else {
|
||||
antigravityModelRestrictionMode.value = 'mapping'
|
||||
antigravityWhitelistModels.value = []
|
||||
antigravityModelMappings.value = []
|
||||
}
|
||||
|
||||
// Load quota control settings (Anthropic OAuth/SetupToken only)
|
||||
loadQuotaControlSettings(newAccount)
|
||||
// Load quota control settings (Anthropic OAuth/SetupToken only)
|
||||
loadQuotaControlSettings(newAccount)
|
||||
|
||||
loadTempUnschedRules(credentials)
|
||||
loadTempUnschedRules(credentials)
|
||||
|
||||
// Initialize API Key fields for apikey type
|
||||
if (newAccount.type === 'apikey' && newAccount.credentials) {
|
||||
const credentials = newAccount.credentials as Record<string, unknown>
|
||||
const platformDefaultUrl =
|
||||
newAccount.platform === 'openai' || newAccount.platform === 'sora'
|
||||
? 'https://api.openai.com'
|
||||
: newAccount.platform === 'gemini'
|
||||
? 'https://generativelanguage.googleapis.com'
|
||||
: 'https://api.anthropic.com'
|
||||
editBaseUrl.value = (credentials.base_url as string) || platformDefaultUrl
|
||||
// Initialize API Key fields for apikey type
|
||||
if (newAccount.type === 'apikey' && newAccount.credentials) {
|
||||
const credentials = newAccount.credentials as Record<string, unknown>
|
||||
const platformDefaultUrl =
|
||||
newAccount.platform === 'openai' || newAccount.platform === 'sora'
|
||||
? 'https://api.openai.com'
|
||||
: newAccount.platform === 'gemini'
|
||||
? 'https://generativelanguage.googleapis.com'
|
||||
: 'https://api.anthropic.com'
|
||||
editBaseUrl.value = (credentials.base_url as string) || platformDefaultUrl
|
||||
|
||||
// Load model mappings and detect mode
|
||||
const existingMappings = credentials.model_mapping as Record<string, string> | undefined
|
||||
if (existingMappings && typeof existingMappings === 'object') {
|
||||
const entries = Object.entries(existingMappings)
|
||||
// Load model mappings and detect mode
|
||||
const existingMappings = credentials.model_mapping as Record<string, string> | undefined
|
||||
if (existingMappings && typeof existingMappings === 'object') {
|
||||
const entries = Object.entries(existingMappings)
|
||||
|
||||
// Detect if this is whitelist mode (all from === to) or mapping mode
|
||||
const isWhitelistMode = entries.length > 0 && entries.every(([from, to]) => from === to)
|
||||
// Detect if this is whitelist mode (all from === to) or mapping mode
|
||||
const isWhitelistMode = entries.length > 0 && entries.every(([from, to]) => from === to)
|
||||
|
||||
if (isWhitelistMode) {
|
||||
// Whitelist mode: populate allowedModels
|
||||
modelRestrictionMode.value = 'whitelist'
|
||||
allowedModels.value = entries.map(([from]) => from)
|
||||
modelMappings.value = []
|
||||
} else {
|
||||
// Mapping mode: populate modelMappings
|
||||
modelRestrictionMode.value = 'mapping'
|
||||
modelMappings.value = entries.map(([from, to]) => ({ from, to }))
|
||||
allowedModels.value = []
|
||||
}
|
||||
} else {
|
||||
// No mappings: default to whitelist mode with empty selection (allow all)
|
||||
modelRestrictionMode.value = 'whitelist'
|
||||
modelMappings.value = []
|
||||
allowedModels.value = []
|
||||
}
|
||||
|
||||
// Load pool mode
|
||||
poolModeEnabled.value = credentials.pool_mode === true
|
||||
poolModeRetryCount.value = normalizePoolModeRetryCount(
|
||||
Number(credentials.pool_mode_retry_count ?? DEFAULT_POOL_MODE_RETRY_COUNT)
|
||||
)
|
||||
|
||||
// Load custom error codes
|
||||
customErrorCodesEnabled.value = credentials.custom_error_codes_enabled === true
|
||||
const existingErrorCodes = credentials.custom_error_codes as number[] | undefined
|
||||
if (existingErrorCodes && Array.isArray(existingErrorCodes)) {
|
||||
selectedErrorCodes.value = [...existingErrorCodes]
|
||||
} else {
|
||||
selectedErrorCodes.value = []
|
||||
}
|
||||
} else if (newAccount.type === 'bedrock' && newAccount.credentials) {
|
||||
const bedrockCreds = newAccount.credentials as Record<string, unknown>
|
||||
const authMode = (bedrockCreds.auth_mode as string) || 'sigv4'
|
||||
editBedrockRegion.value = (bedrockCreds.aws_region as string) || ''
|
||||
editBedrockForceGlobal.value = (bedrockCreds.aws_force_global as string) === 'true'
|
||||
|
||||
if (authMode === 'apikey') {
|
||||
editBedrockApiKeyValue.value = ''
|
||||
} else {
|
||||
editBedrockAccessKeyId.value = (bedrockCreds.aws_access_key_id as string) || ''
|
||||
editBedrockSecretAccessKey.value = ''
|
||||
editBedrockSessionToken.value = ''
|
||||
}
|
||||
|
||||
// Load pool mode for bedrock
|
||||
poolModeEnabled.value = bedrockCreds.pool_mode === true
|
||||
const retryCount = bedrockCreds.pool_mode_retry_count
|
||||
poolModeRetryCount.value = (typeof retryCount === 'number' && retryCount >= 0) ? retryCount : DEFAULT_POOL_MODE_RETRY_COUNT
|
||||
|
||||
// Load quota limits for bedrock
|
||||
const bedrockExtra = (newAccount.extra as Record<string, unknown>) || {}
|
||||
editQuotaLimit.value = typeof bedrockExtra.quota_limit === 'number' ? bedrockExtra.quota_limit : null
|
||||
editQuotaDailyLimit.value = typeof bedrockExtra.quota_daily_limit === 'number' ? bedrockExtra.quota_daily_limit : null
|
||||
editQuotaWeeklyLimit.value = typeof bedrockExtra.quota_weekly_limit === 'number' ? bedrockExtra.quota_weekly_limit : null
|
||||
|
||||
// Load model mappings for bedrock
|
||||
const existingMappings = bedrockCreds.model_mapping as Record<string, string> | undefined
|
||||
if (existingMappings && typeof existingMappings === 'object') {
|
||||
const entries = Object.entries(existingMappings)
|
||||
const isWhitelistMode = entries.length > 0 && entries.every(([from, to]) => from === to)
|
||||
if (isWhitelistMode) {
|
||||
modelRestrictionMode.value = 'whitelist'
|
||||
allowedModels.value = entries.map(([from]) => from)
|
||||
modelMappings.value = []
|
||||
} else {
|
||||
modelRestrictionMode.value = 'mapping'
|
||||
modelMappings.value = entries.map(([from, to]) => ({ from, to }))
|
||||
allowedModels.value = []
|
||||
}
|
||||
} else {
|
||||
modelRestrictionMode.value = 'whitelist'
|
||||
modelMappings.value = []
|
||||
allowedModels.value = []
|
||||
}
|
||||
} else if (newAccount.type === 'upstream' && newAccount.credentials) {
|
||||
const credentials = newAccount.credentials as Record<string, unknown>
|
||||
editBaseUrl.value = (credentials.base_url as string) || ''
|
||||
if (isWhitelistMode) {
|
||||
// Whitelist mode: populate allowedModels
|
||||
modelRestrictionMode.value = 'whitelist'
|
||||
allowedModels.value = entries.map(([from]) => from)
|
||||
modelMappings.value = []
|
||||
} else {
|
||||
const platformDefaultUrl =
|
||||
newAccount.platform === 'openai' || newAccount.platform === 'sora'
|
||||
? 'https://api.openai.com'
|
||||
: newAccount.platform === 'gemini'
|
||||
? 'https://generativelanguage.googleapis.com'
|
||||
: 'https://api.anthropic.com'
|
||||
editBaseUrl.value = platformDefaultUrl
|
||||
// Mapping mode: populate modelMappings
|
||||
modelRestrictionMode.value = 'mapping'
|
||||
modelMappings.value = entries.map(([from, to]) => ({ from, to }))
|
||||
allowedModels.value = []
|
||||
}
|
||||
} else {
|
||||
// No mappings: default to whitelist mode with empty selection (allow all)
|
||||
modelRestrictionMode.value = 'whitelist'
|
||||
modelMappings.value = []
|
||||
allowedModels.value = []
|
||||
}
|
||||
|
||||
// Load model mappings for OpenAI OAuth accounts
|
||||
if (newAccount.platform === 'openai' && newAccount.credentials) {
|
||||
const oauthCredentials = newAccount.credentials as Record<string, unknown>
|
||||
const existingMappings = oauthCredentials.model_mapping as Record<string, string> | undefined
|
||||
if (existingMappings && typeof existingMappings === 'object') {
|
||||
const entries = Object.entries(existingMappings)
|
||||
const isWhitelistMode = entries.length > 0 && entries.every(([from, to]) => from === to)
|
||||
if (isWhitelistMode) {
|
||||
modelRestrictionMode.value = 'whitelist'
|
||||
allowedModels.value = entries.map(([from]) => from)
|
||||
modelMappings.value = []
|
||||
} else {
|
||||
modelRestrictionMode.value = 'mapping'
|
||||
modelMappings.value = entries.map(([from, to]) => ({ from, to }))
|
||||
allowedModels.value = []
|
||||
}
|
||||
} else {
|
||||
modelRestrictionMode.value = 'whitelist'
|
||||
modelMappings.value = []
|
||||
allowedModels.value = []
|
||||
}
|
||||
} else {
|
||||
// Load pool mode
|
||||
poolModeEnabled.value = credentials.pool_mode === true
|
||||
poolModeRetryCount.value = normalizePoolModeRetryCount(
|
||||
Number(credentials.pool_mode_retry_count ?? DEFAULT_POOL_MODE_RETRY_COUNT)
|
||||
)
|
||||
|
||||
// Load custom error codes
|
||||
customErrorCodesEnabled.value = credentials.custom_error_codes_enabled === true
|
||||
const existingErrorCodes = credentials.custom_error_codes as number[] | undefined
|
||||
if (existingErrorCodes && Array.isArray(existingErrorCodes)) {
|
||||
selectedErrorCodes.value = [...existingErrorCodes]
|
||||
} else {
|
||||
selectedErrorCodes.value = []
|
||||
}
|
||||
} else if (newAccount.type === 'bedrock' && newAccount.credentials) {
|
||||
const bedrockCreds = newAccount.credentials as Record<string, unknown>
|
||||
const authMode = (bedrockCreds.auth_mode as string) || 'sigv4'
|
||||
editBedrockRegion.value = (bedrockCreds.aws_region as string) || ''
|
||||
editBedrockForceGlobal.value = (bedrockCreds.aws_force_global as string) === 'true'
|
||||
|
||||
if (authMode === 'apikey') {
|
||||
editBedrockApiKeyValue.value = ''
|
||||
} else {
|
||||
editBedrockAccessKeyId.value = (bedrockCreds.aws_access_key_id as string) || ''
|
||||
editBedrockSecretAccessKey.value = ''
|
||||
editBedrockSessionToken.value = ''
|
||||
}
|
||||
|
||||
// Load pool mode for bedrock
|
||||
poolModeEnabled.value = bedrockCreds.pool_mode === true
|
||||
const retryCount = bedrockCreds.pool_mode_retry_count
|
||||
poolModeRetryCount.value = (typeof retryCount === 'number' && retryCount >= 0) ? retryCount : DEFAULT_POOL_MODE_RETRY_COUNT
|
||||
|
||||
// Load quota limits for bedrock
|
||||
const bedrockExtra = (newAccount.extra as Record<string, unknown>) || {}
|
||||
editQuotaLimit.value = typeof bedrockExtra.quota_limit === 'number' ? bedrockExtra.quota_limit : null
|
||||
editQuotaDailyLimit.value = typeof bedrockExtra.quota_daily_limit === 'number' ? bedrockExtra.quota_daily_limit : null
|
||||
editQuotaWeeklyLimit.value = typeof bedrockExtra.quota_weekly_limit === 'number' ? bedrockExtra.quota_weekly_limit : null
|
||||
|
||||
// Load model mappings for bedrock
|
||||
const existingMappings = bedrockCreds.model_mapping as Record<string, string> | undefined
|
||||
if (existingMappings && typeof existingMappings === 'object') {
|
||||
const entries = Object.entries(existingMappings)
|
||||
const isWhitelistMode = entries.length > 0 && entries.every(([from, to]) => from === to)
|
||||
if (isWhitelistMode) {
|
||||
modelRestrictionMode.value = 'whitelist'
|
||||
allowedModels.value = entries.map(([from]) => from)
|
||||
modelMappings.value = []
|
||||
} else {
|
||||
modelRestrictionMode.value = 'mapping'
|
||||
modelMappings.value = entries.map(([from, to]) => ({ from, to }))
|
||||
allowedModels.value = []
|
||||
}
|
||||
} else {
|
||||
modelRestrictionMode.value = 'whitelist'
|
||||
modelMappings.value = []
|
||||
allowedModels.value = []
|
||||
}
|
||||
} else if (newAccount.type === 'upstream' && newAccount.credentials) {
|
||||
const credentials = newAccount.credentials as Record<string, unknown>
|
||||
editBaseUrl.value = (credentials.base_url as string) || ''
|
||||
} else {
|
||||
const platformDefaultUrl =
|
||||
newAccount.platform === 'openai' || newAccount.platform === 'sora'
|
||||
? 'https://api.openai.com'
|
||||
: newAccount.platform === 'gemini'
|
||||
? 'https://generativelanguage.googleapis.com'
|
||||
: 'https://api.anthropic.com'
|
||||
editBaseUrl.value = platformDefaultUrl
|
||||
|
||||
// Load model mappings for OpenAI OAuth accounts
|
||||
if (newAccount.platform === 'openai' && newAccount.credentials) {
|
||||
const oauthCredentials = newAccount.credentials as Record<string, unknown>
|
||||
const existingMappings = oauthCredentials.model_mapping as Record<string, string> | undefined
|
||||
if (existingMappings && typeof existingMappings === 'object') {
|
||||
const entries = Object.entries(existingMappings)
|
||||
const isWhitelistMode = entries.length > 0 && entries.every(([from, to]) => from === to)
|
||||
if (isWhitelistMode) {
|
||||
modelRestrictionMode.value = 'whitelist'
|
||||
allowedModels.value = entries.map(([from]) => from)
|
||||
modelMappings.value = []
|
||||
} else {
|
||||
modelRestrictionMode.value = 'mapping'
|
||||
modelMappings.value = entries.map(([from, to]) => ({ from, to }))
|
||||
allowedModels.value = []
|
||||
}
|
||||
poolModeEnabled.value = false
|
||||
poolModeRetryCount.value = DEFAULT_POOL_MODE_RETRY_COUNT
|
||||
customErrorCodesEnabled.value = false
|
||||
selectedErrorCodes.value = []
|
||||
} else {
|
||||
modelRestrictionMode.value = 'whitelist'
|
||||
modelMappings.value = []
|
||||
allowedModels.value = []
|
||||
}
|
||||
editApiKey.value = ''
|
||||
} else {
|
||||
modelRestrictionMode.value = 'whitelist'
|
||||
modelMappings.value = []
|
||||
allowedModels.value = []
|
||||
}
|
||||
poolModeEnabled.value = false
|
||||
poolModeRetryCount.value = DEFAULT_POOL_MODE_RETRY_COUNT
|
||||
customErrorCodesEnabled.value = false
|
||||
selectedErrorCodes.value = []
|
||||
}
|
||||
editApiKey.value = ''
|
||||
}
|
||||
|
||||
watch(
|
||||
[() => props.show, () => props.account],
|
||||
([show, newAccount], [wasShow, previousAccount]) => {
|
||||
if (!show || !newAccount) {
|
||||
return
|
||||
}
|
||||
if (!wasShow || newAccount !== previousAccount) {
|
||||
syncFormFromAccount(newAccount)
|
||||
}
|
||||
},
|
||||
{ immediate: true }
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
<div>
|
||||
<!-- Window stats row (above progress bar) -->
|
||||
<div
|
||||
v-if="windowStats"
|
||||
v-if="windowStats && (windowStats.requests > 0 || windowStats.tokens > 0)"
|
||||
class="mb-0.5 flex items-center"
|
||||
>
|
||||
<div class="flex items-center gap-1.5 text-[9px] text-gray-500 dark:text-gray-400">
|
||||
@@ -12,12 +12,13 @@
|
||||
<span class="rounded bg-gray-100 px-1.5 py-0.5 dark:bg-gray-800">
|
||||
{{ formatTokens }}
|
||||
</span>
|
||||
<span class="rounded bg-gray-100 px-1.5 py-0.5 dark:bg-gray-800">
|
||||
<span class="rounded bg-gray-100 px-1.5 py-0.5 dark:bg-gray-800" :title="t('usage.accountBilled')">
|
||||
A ${{ formatAccountCost }}
|
||||
</span>
|
||||
<span
|
||||
v-if="windowStats?.user_cost != null"
|
||||
class="rounded bg-gray-100 px-1.5 py-0.5 dark:bg-gray-800"
|
||||
:title="t('usage.userBilled')"
|
||||
>
|
||||
U ${{ formatUserCost }}
|
||||
</span>
|
||||
@@ -47,7 +48,7 @@
|
||||
</span>
|
||||
|
||||
<!-- Reset time -->
|
||||
<span v-if="resetsAt" class="shrink-0 text-[10px] text-gray-400">
|
||||
<span v-if="shouldShowResetTime" class="shrink-0 text-[10px] text-gray-400">
|
||||
{{ formatResetTime }}
|
||||
</span>
|
||||
</div>
|
||||
@@ -55,8 +56,11 @@
|
||||
</template>
|
||||
|
||||
<script setup lang="ts">
|
||||
import { computed } from 'vue'
|
||||
import { computed, ref, watch } from 'vue'
|
||||
import { useIntervalFn } from '@vueuse/core'
|
||||
import { useI18n } from 'vue-i18n'
|
||||
import type { WindowStats } from '@/types'
|
||||
import { formatCompactNumber } from '@/utils/format'
|
||||
|
||||
const props = defineProps<{
|
||||
label: string
|
||||
@@ -64,8 +68,34 @@ const props = defineProps<{
|
||||
resetsAt?: string | null
|
||||
color: 'indigo' | 'emerald' | 'purple' | 'amber'
|
||||
windowStats?: WindowStats | null
|
||||
showNowWhenIdle?: boolean
|
||||
}>()
|
||||
|
||||
const { t } = useI18n()
|
||||
|
||||
// Reactive clock for countdown — only runs when a reset time is shown,
|
||||
// to avoid creating many idle timers across large account lists.
|
||||
const now = ref(new Date())
|
||||
const { pause: pauseClock, resume: resumeClock } = useIntervalFn(
|
||||
() => {
|
||||
now.value = new Date()
|
||||
},
|
||||
60_000,
|
||||
{ immediate: false },
|
||||
)
|
||||
if (props.resetsAt) resumeClock()
|
||||
watch(
|
||||
() => props.resetsAt,
|
||||
(val) => {
|
||||
if (val) {
|
||||
now.value = new Date()
|
||||
resumeClock()
|
||||
} else {
|
||||
pauseClock()
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
// Label background colors
|
||||
const labelClass = computed(() => {
|
||||
const colors = {
|
||||
@@ -110,12 +140,22 @@ const displayPercent = computed(() => {
|
||||
return percent > 999 ? '>999%' : `${percent}%`
|
||||
})
|
||||
|
||||
const shouldShowResetTime = computed(() => {
|
||||
if (props.resetsAt) return true
|
||||
return Boolean(props.showNowWhenIdle && props.utilization <= 0)
|
||||
})
|
||||
|
||||
// Format reset time
|
||||
const formatResetTime = computed(() => {
|
||||
// For rolling windows, when utilization is 0%, treat as immediately available.
|
||||
if (props.showNowWhenIdle && props.utilization <= 0) {
|
||||
return '现在'
|
||||
}
|
||||
|
||||
if (!props.resetsAt) return '-'
|
||||
|
||||
const date = new Date(props.resetsAt)
|
||||
const now = new Date()
|
||||
const diffMs = date.getTime() - now.getTime()
|
||||
const diffMs = date.getTime() - now.value.getTime()
|
||||
|
||||
if (diffMs <= 0) return '现在'
|
||||
|
||||
@@ -135,19 +175,12 @@ const formatResetTime = computed(() => {
|
||||
// Window stats formatters
|
||||
const formatRequests = computed(() => {
|
||||
if (!props.windowStats) return ''
|
||||
const r = props.windowStats.requests
|
||||
if (r >= 1000000) return `${(r / 1000000).toFixed(1)}M`
|
||||
if (r >= 1000) return `${(r / 1000).toFixed(1)}K`
|
||||
return r.toString()
|
||||
return formatCompactNumber(props.windowStats.requests, { allowBillions: false })
|
||||
})
|
||||
|
||||
const formatTokens = computed(() => {
|
||||
if (!props.windowStats) return ''
|
||||
const t = props.windowStats.tokens
|
||||
if (t >= 1000000000) return `${(t / 1000000000).toFixed(1)}B`
|
||||
if (t >= 1000000) return `${(t / 1000000).toFixed(1)}M`
|
||||
if (t >= 1000) return `${(t / 1000).toFixed(1)}K`
|
||||
return t.toString()
|
||||
return formatCompactNumber(props.windowStats.tokens)
|
||||
})
|
||||
|
||||
const formatAccountCost = computed(() => {
|
||||
|
||||
@@ -198,7 +198,34 @@ describe('AccountUsageCell', () => {
|
||||
expect(wrapper.text()).toContain('7d|77|300')
|
||||
})
|
||||
|
||||
it('OpenAI OAuth 有现成快照且未限额时不会首屏请求 usage', async () => {
|
||||
it('OpenAI OAuth 有 codex 快照时仍然使用 /usage API 数据渲染', async () => {
|
||||
getUsage.mockResolvedValue({
|
||||
five_hour: {
|
||||
utilization: 18,
|
||||
resets_at: '2099-03-07T12:00:00Z',
|
||||
remaining_seconds: 3600,
|
||||
window_stats: {
|
||||
requests: 9,
|
||||
tokens: 900,
|
||||
cost: 0.09,
|
||||
standard_cost: 0.09,
|
||||
user_cost: 0.09
|
||||
}
|
||||
},
|
||||
seven_day: {
|
||||
utilization: 36,
|
||||
resets_at: '2099-03-13T12:00:00Z',
|
||||
remaining_seconds: 3600,
|
||||
window_stats: {
|
||||
requests: 9,
|
||||
tokens: 900,
|
||||
cost: 0.09,
|
||||
standard_cost: 0.09,
|
||||
user_cost: 0.09
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
const wrapper = mount(AccountUsageCell, {
|
||||
props: {
|
||||
account: makeAccount({
|
||||
@@ -218,7 +245,7 @@ describe('AccountUsageCell', () => {
|
||||
stubs: {
|
||||
UsageProgressBar: {
|
||||
props: ['label', 'utilization', 'resetsAt', 'windowStats', 'color'],
|
||||
template: '<div class="usage-bar">{{ label }}|{{ utilization }}</div>'
|
||||
template: '<div class="usage-bar">{{ label }}|{{ utilization }}|{{ windowStats?.tokens }}</div>'
|
||||
},
|
||||
AccountQuotaInfo: true
|
||||
}
|
||||
@@ -227,9 +254,80 @@ describe('AccountUsageCell', () => {
|
||||
|
||||
await flushPromises()
|
||||
|
||||
expect(getUsage).not.toHaveBeenCalled()
|
||||
expect(wrapper.text()).toContain('5h|12')
|
||||
expect(wrapper.text()).toContain('7d|34')
|
||||
expect(getUsage).toHaveBeenCalledWith(2001)
|
||||
// 单一数据源:始终使用 /usage API 返回值,忽略 codex 快照
|
||||
expect(wrapper.text()).toContain('5h|18|900')
|
||||
expect(wrapper.text()).toContain('7d|36|900')
|
||||
})
|
||||
|
||||
it('OpenAI OAuth 有现成快照时,手动刷新信号会触发 usage 重拉', async () => {
|
||||
getUsage.mockResolvedValue({
|
||||
five_hour: {
|
||||
utilization: 18,
|
||||
resets_at: '2099-03-07T12:00:00Z',
|
||||
remaining_seconds: 3600,
|
||||
window_stats: {
|
||||
requests: 9,
|
||||
tokens: 900,
|
||||
cost: 0.09,
|
||||
standard_cost: 0.09,
|
||||
user_cost: 0.09
|
||||
}
|
||||
},
|
||||
seven_day: {
|
||||
utilization: 36,
|
||||
resets_at: '2099-03-13T12:00:00Z',
|
||||
remaining_seconds: 3600,
|
||||
window_stats: {
|
||||
requests: 9,
|
||||
tokens: 900,
|
||||
cost: 0.09,
|
||||
standard_cost: 0.09,
|
||||
user_cost: 0.09
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
const wrapper = mount(AccountUsageCell, {
|
||||
props: {
|
||||
account: makeAccount({
|
||||
id: 2010,
|
||||
platform: 'openai',
|
||||
type: 'oauth',
|
||||
extra: {
|
||||
codex_usage_updated_at: '2099-03-07T10:00:00Z',
|
||||
codex_5h_used_percent: 12,
|
||||
codex_5h_reset_at: '2099-03-07T12:00:00Z',
|
||||
codex_7d_used_percent: 34,
|
||||
codex_7d_reset_at: '2099-03-13T12:00:00Z'
|
||||
},
|
||||
rate_limit_reset_at: null
|
||||
}),
|
||||
manualRefreshToken: 0
|
||||
},
|
||||
global: {
|
||||
stubs: {
|
||||
UsageProgressBar: {
|
||||
props: ['label', 'utilization', 'resetsAt', 'windowStats', 'color'],
|
||||
template: '<div class="usage-bar">{{ label }}|{{ utilization }}|{{ windowStats?.tokens }}</div>'
|
||||
},
|
||||
AccountQuotaInfo: true
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
await flushPromises()
|
||||
// mount 时已经拉取一次
|
||||
expect(getUsage).toHaveBeenCalledTimes(1)
|
||||
|
||||
await wrapper.setProps({ manualRefreshToken: 1 })
|
||||
await flushPromises()
|
||||
|
||||
// 手动刷新再拉一次
|
||||
expect(getUsage).toHaveBeenCalledTimes(2)
|
||||
expect(getUsage).toHaveBeenCalledWith(2010)
|
||||
// 单一数据源:始终使用 /usage API 值
|
||||
expect(wrapper.text()).toContain('5h|18|900')
|
||||
})
|
||||
|
||||
it('OpenAI OAuth 在无 codex 快照时会回退显示 usage 接口窗口', async () => {
|
||||
@@ -360,7 +458,7 @@ describe('AccountUsageCell', () => {
|
||||
expect(wrapper.text()).toContain('5h|0|200')
|
||||
})
|
||||
|
||||
it('OpenAI OAuth 已限额时首屏优先展示重新查询后的 usage,而不是旧 codex 快照', async () => {
|
||||
it('OpenAI OAuth 已限额时显示 /usage API 返回的限额数据', async () => {
|
||||
getUsage.mockResolvedValue({
|
||||
five_hour: {
|
||||
utilization: 100,
|
||||
@@ -414,9 +512,95 @@ describe('AccountUsageCell', () => {
|
||||
|
||||
await flushPromises()
|
||||
|
||||
expect(getUsage).toHaveBeenCalledWith(2004)
|
||||
expect(wrapper.text()).toContain('5h|100|106540000')
|
||||
expect(wrapper.text()).toContain('7d|100|106540000')
|
||||
expect(wrapper.text()).not.toContain('5h|0|')
|
||||
expect(getUsage).toHaveBeenCalledWith(2004)
|
||||
expect(wrapper.text()).toContain('5h|100|106540000')
|
||||
expect(wrapper.text()).toContain('7d|100|106540000')
|
||||
})
|
||||
|
||||
it('Key 账号会展示 today stats 徽章并带 A/U 提示', async () => {
|
||||
const wrapper = mount(AccountUsageCell, {
|
||||
props: {
|
||||
account: makeAccount({
|
||||
id: 3001,
|
||||
platform: 'anthropic',
|
||||
type: 'apikey'
|
||||
}),
|
||||
todayStats: {
|
||||
requests: 1_000_000,
|
||||
tokens: 1_000_000_000,
|
||||
cost: 12.345,
|
||||
standard_cost: 12.345,
|
||||
user_cost: 6.789
|
||||
}
|
||||
},
|
||||
global: {
|
||||
stubs: {
|
||||
UsageProgressBar: true,
|
||||
AccountQuotaInfo: true
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
await flushPromises()
|
||||
|
||||
expect(wrapper.text()).toContain('1.0M req')
|
||||
expect(wrapper.text()).toContain('1.0B')
|
||||
expect(wrapper.text()).toContain('A $12.35')
|
||||
expect(wrapper.text()).toContain('U $6.79')
|
||||
|
||||
const badges = wrapper.findAll('span[title]')
|
||||
expect(badges.some(node => node.attributes('title') === 'usage.accountBilled')).toBe(true)
|
||||
expect(badges.some(node => node.attributes('title') === 'usage.userBilled')).toBe(true)
|
||||
})
|
||||
|
||||
it('Key 账号在 today stats loading 时显示骨架屏', async () => {
|
||||
const wrapper = mount(AccountUsageCell, {
|
||||
props: {
|
||||
account: makeAccount({
|
||||
id: 3002,
|
||||
platform: 'anthropic',
|
||||
type: 'apikey'
|
||||
}),
|
||||
todayStats: null,
|
||||
todayStatsLoading: true
|
||||
},
|
||||
global: {
|
||||
stubs: {
|
||||
UsageProgressBar: true,
|
||||
AccountQuotaInfo: true
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
await flushPromises()
|
||||
|
||||
expect(wrapper.findAll('.animate-pulse').length).toBeGreaterThan(0)
|
||||
})
|
||||
|
||||
it('Key 账号在无 today stats 且无配额时显示兜底短横线', async () => {
|
||||
const wrapper = mount(AccountUsageCell, {
|
||||
props: {
|
||||
account: makeAccount({
|
||||
id: 3003,
|
||||
platform: 'anthropic',
|
||||
type: 'apikey',
|
||||
quota_limit: 0,
|
||||
quota_daily_limit: 0,
|
||||
quota_weekly_limit: 0
|
||||
}),
|
||||
todayStats: null,
|
||||
todayStatsLoading: false
|
||||
},
|
||||
global: {
|
||||
stubs: {
|
||||
UsageProgressBar: true,
|
||||
AccountQuotaInfo: true
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
await flushPromises()
|
||||
|
||||
expect(wrapper.text().trim()).toBe('-')
|
||||
})
|
||||
})
|
||||
|
||||
@@ -0,0 +1,159 @@
|
||||
import { describe, expect, it, vi } from 'vitest'
|
||||
import { defineComponent } from 'vue'
|
||||
import { mount } from '@vue/test-utils'
|
||||
|
||||
const { updateAccountMock, checkMixedChannelRiskMock } = vi.hoisted(() => ({
|
||||
updateAccountMock: vi.fn(),
|
||||
checkMixedChannelRiskMock: vi.fn()
|
||||
}))
|
||||
|
||||
vi.mock('@/stores/app', () => ({
|
||||
useAppStore: () => ({
|
||||
showError: vi.fn(),
|
||||
showSuccess: vi.fn(),
|
||||
showInfo: vi.fn()
|
||||
})
|
||||
}))
|
||||
|
||||
vi.mock('@/stores/auth', () => ({
|
||||
useAuthStore: () => ({
|
||||
isSimpleMode: true
|
||||
})
|
||||
}))
|
||||
|
||||
vi.mock('@/api/admin', () => ({
|
||||
adminAPI: {
|
||||
accounts: {
|
||||
update: updateAccountMock,
|
||||
checkMixedChannelRisk: checkMixedChannelRiskMock
|
||||
}
|
||||
}
|
||||
}))
|
||||
|
||||
vi.mock('@/api/admin/accounts', () => ({
|
||||
getAntigravityDefaultModelMapping: vi.fn()
|
||||
}))
|
||||
|
||||
vi.mock('vue-i18n', async () => {
|
||||
const actual = await vi.importActual<typeof import('vue-i18n')>('vue-i18n')
|
||||
return {
|
||||
...actual,
|
||||
useI18n: () => ({
|
||||
t: (key: string) => key
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
import EditAccountModal from '../EditAccountModal.vue'
|
||||
|
||||
const BaseDialogStub = defineComponent({
|
||||
name: 'BaseDialog',
|
||||
props: {
|
||||
show: {
|
||||
type: Boolean,
|
||||
default: false
|
||||
}
|
||||
},
|
||||
template: '<div v-if="show"><slot /><slot name="footer" /></div>'
|
||||
})
|
||||
|
||||
const ModelWhitelistSelectorStub = defineComponent({
|
||||
name: 'ModelWhitelistSelector',
|
||||
props: {
|
||||
modelValue: {
|
||||
type: Array,
|
||||
default: () => []
|
||||
}
|
||||
},
|
||||
emits: ['update:modelValue'],
|
||||
template: `
|
||||
<div>
|
||||
<button
|
||||
type="button"
|
||||
data-testid="rewrite-to-snapshot"
|
||||
@click="$emit('update:modelValue', ['gpt-5.2-2025-12-11'])"
|
||||
>
|
||||
rewrite
|
||||
</button>
|
||||
<span data-testid="model-whitelist-value">
|
||||
{{ Array.isArray(modelValue) ? modelValue.join(',') : '' }}
|
||||
</span>
|
||||
</div>
|
||||
`
|
||||
})
|
||||
|
||||
function buildAccount() {
|
||||
return {
|
||||
id: 1,
|
||||
name: 'OpenAI Key',
|
||||
notes: '',
|
||||
platform: 'openai',
|
||||
type: 'apikey',
|
||||
credentials: {
|
||||
api_key: 'sk-test',
|
||||
base_url: 'https://api.openai.com',
|
||||
model_mapping: {
|
||||
'gpt-5.2': 'gpt-5.2'
|
||||
}
|
||||
},
|
||||
extra: {},
|
||||
proxy_id: null,
|
||||
concurrency: 1,
|
||||
priority: 1,
|
||||
rate_multiplier: 1,
|
||||
status: 'active',
|
||||
group_ids: [],
|
||||
expires_at: null,
|
||||
auto_pause_on_expired: false
|
||||
} as any
|
||||
}
|
||||
|
||||
function mountModal(account = buildAccount()) {
|
||||
return mount(EditAccountModal, {
|
||||
props: {
|
||||
show: true,
|
||||
account,
|
||||
proxies: [],
|
||||
groups: []
|
||||
},
|
||||
global: {
|
||||
stubs: {
|
||||
BaseDialog: BaseDialogStub,
|
||||
Select: true,
|
||||
Icon: true,
|
||||
ProxySelector: true,
|
||||
GroupSelector: true,
|
||||
ModelWhitelistSelector: ModelWhitelistSelectorStub
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
describe('EditAccountModal', () => {
|
||||
it('reopening the same account rehydrates the OpenAI whitelist from props', async () => {
|
||||
const account = buildAccount()
|
||||
updateAccountMock.mockReset()
|
||||
checkMixedChannelRiskMock.mockReset()
|
||||
checkMixedChannelRiskMock.mockResolvedValue({ has_risk: false })
|
||||
updateAccountMock.mockResolvedValue(account)
|
||||
|
||||
const wrapper = mountModal(account)
|
||||
|
||||
expect(wrapper.get('[data-testid="model-whitelist-value"]').text()).toBe('gpt-5.2')
|
||||
|
||||
await wrapper.get('[data-testid="rewrite-to-snapshot"]').trigger('click')
|
||||
expect(wrapper.get('[data-testid="model-whitelist-value"]').text()).toBe('gpt-5.2-2025-12-11')
|
||||
|
||||
await wrapper.setProps({ show: false })
|
||||
await wrapper.setProps({ show: true })
|
||||
|
||||
expect(wrapper.get('[data-testid="model-whitelist-value"]').text()).toBe('gpt-5.2')
|
||||
|
||||
await wrapper.get('form#edit-account-form').trigger('submit.prevent')
|
||||
|
||||
expect(updateAccountMock).toHaveBeenCalledTimes(1)
|
||||
expect(updateAccountMock.mock.calls[0]?.[1]?.credentials?.model_mapping).toEqual({
|
||||
'gpt-5.2': 'gpt-5.2'
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -0,0 +1,69 @@
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import { mount } from '@vue/test-utils'
|
||||
import UsageProgressBar from '../UsageProgressBar.vue'
|
||||
|
||||
vi.mock('vue-i18n', async () => {
|
||||
const actual = await vi.importActual<typeof import('vue-i18n')>('vue-i18n')
|
||||
return {
|
||||
...actual,
|
||||
useI18n: () => ({
|
||||
t: (key: string) => key
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
describe('UsageProgressBar', () => {
|
||||
beforeEach(() => {
|
||||
vi.useFakeTimers()
|
||||
vi.setSystemTime(new Date('2026-03-17T00:00:00Z'))
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
vi.useRealTimers()
|
||||
})
|
||||
|
||||
it('showNowWhenIdle=true 且利用率为 0 时显示“现在”', () => {
|
||||
const wrapper = mount(UsageProgressBar, {
|
||||
props: {
|
||||
label: '5h',
|
||||
utilization: 0,
|
||||
resetsAt: '2026-03-17T02:30:00Z',
|
||||
showNowWhenIdle: true,
|
||||
color: 'indigo'
|
||||
}
|
||||
})
|
||||
|
||||
expect(wrapper.text()).toContain('现在')
|
||||
expect(wrapper.text()).not.toContain('2h 30m')
|
||||
})
|
||||
|
||||
it('showNowWhenIdle=true 但利用率大于 0 时显示倒计时', () => {
|
||||
const wrapper = mount(UsageProgressBar, {
|
||||
props: {
|
||||
label: '7d',
|
||||
utilization: 12,
|
||||
resetsAt: '2026-03-17T02:30:00Z',
|
||||
showNowWhenIdle: true,
|
||||
color: 'emerald'
|
||||
}
|
||||
})
|
||||
|
||||
expect(wrapper.text()).toContain('2h 30m')
|
||||
expect(wrapper.text()).not.toContain('现在')
|
||||
})
|
||||
|
||||
it('showNowWhenIdle=false 时保持原有倒计时行为', () => {
|
||||
const wrapper = mount(UsageProgressBar, {
|
||||
props: {
|
||||
label: '1d',
|
||||
utilization: 0,
|
||||
resetsAt: '2026-03-17T02:30:00Z',
|
||||
showNowWhenIdle: false,
|
||||
color: 'indigo'
|
||||
}
|
||||
})
|
||||
|
||||
expect(wrapper.text()).toContain('2h 30m')
|
||||
expect(wrapper.text()).not.toContain('现在')
|
||||
})
|
||||
})
|
||||
@@ -139,17 +139,6 @@
|
||||
<Select v-model="filters.group_id" :options="groupOptions" searchable @change="emitChange" />
|
||||
</div>
|
||||
|
||||
<!-- Date Range Filter -->
|
||||
<div class="w-full sm:w-auto [&_.date-picker-trigger]:w-full">
|
||||
<label class="input-label">{{ t('usage.timeRange') }}</label>
|
||||
<DateRangePicker
|
||||
:start-date="startDate"
|
||||
:end-date="endDate"
|
||||
@update:startDate="updateStartDate"
|
||||
@update:endDate="updateEndDate"
|
||||
@change="emitChange"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Right: actions -->
|
||||
@@ -177,7 +166,6 @@ import { ref, onMounted, onUnmounted, toRef, watch } from 'vue'
|
||||
import { useI18n } from 'vue-i18n'
|
||||
import { adminAPI } from '@/api/admin'
|
||||
import Select, { type SelectOption } from '@/components/common/Select.vue'
|
||||
import DateRangePicker from '@/components/common/DateRangePicker.vue'
|
||||
import type { SimpleApiKey, SimpleUser } from '@/api/admin/usage'
|
||||
|
||||
type ModelValue = Record<string, any>
|
||||
@@ -195,8 +183,6 @@ const props = withDefaults(defineProps<Props>(), {
|
||||
})
|
||||
const emit = defineEmits([
|
||||
'update:modelValue',
|
||||
'update:startDate',
|
||||
'update:endDate',
|
||||
'change',
|
||||
'refresh',
|
||||
'reset',
|
||||
@@ -248,16 +234,6 @@ const billingTypeOptions = ref<SelectOption[]>([
|
||||
|
||||
const emitChange = () => emit('change')
|
||||
|
||||
const updateStartDate = (value: string) => {
|
||||
emit('update:startDate', value)
|
||||
filters.value.start_date = value
|
||||
}
|
||||
|
||||
const updateEndDate = (value: string) => {
|
||||
emit('update:endDate', value)
|
||||
filters.value.end_date = value
|
||||
}
|
||||
|
||||
const debounceUserSearch = () => {
|
||||
if (userSearchTimeout) clearTimeout(userSearchTimeout)
|
||||
userSearchTimeout = setTimeout(async () => {
|
||||
@@ -441,7 +417,11 @@ onMounted(async () => {
|
||||
groupOptions.value.push(...gs.items.map((g: any) => ({ value: g.id, label: g.name })))
|
||||
|
||||
const uniqueModels = new Set<string>()
|
||||
ms.models?.forEach((s: any) => s.model && uniqueModels.add(s.model))
|
||||
ms.models?.forEach((s: any) => {
|
||||
if (s.model) {
|
||||
uniqueModels.add(s.model)
|
||||
}
|
||||
})
|
||||
modelOptions.value.push(
|
||||
...Array.from(uniqueModels)
|
||||
.sort()
|
||||
|
||||
@@ -87,27 +87,40 @@
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr
|
||||
v-for="item in displayEndpointStats"
|
||||
:key="item.endpoint"
|
||||
class="border-t border-gray-100 dark:border-gray-700"
|
||||
>
|
||||
<td class="max-w-[180px] truncate py-1.5 font-medium text-gray-900 dark:text-white" :title="item.endpoint">
|
||||
{{ item.endpoint }}
|
||||
</td>
|
||||
<td class="py-1.5 text-right text-gray-600 dark:text-gray-400">
|
||||
{{ formatNumber(item.requests) }}
|
||||
</td>
|
||||
<td class="py-1.5 text-right text-gray-600 dark:text-gray-400">
|
||||
{{ formatTokens(item.total_tokens) }}
|
||||
</td>
|
||||
<td class="py-1.5 text-right text-green-600 dark:text-green-400">
|
||||
${{ formatCost(item.actual_cost) }}
|
||||
</td>
|
||||
<td class="py-1.5 text-right text-gray-400 dark:text-gray-500">
|
||||
${{ formatCost(item.cost) }}
|
||||
</td>
|
||||
</tr>
|
||||
<template v-for="item in displayEndpointStats" :key="item.endpoint">
|
||||
<tr
|
||||
class="border-t border-gray-100 cursor-pointer transition-colors hover:bg-gray-50 dark:border-gray-700 dark:hover:bg-dark-700/40"
|
||||
@click="toggleBreakdown(item.endpoint)"
|
||||
>
|
||||
<td class="max-w-[180px] truncate py-1.5 font-medium text-blue-600 hover:text-blue-800 dark:text-blue-400 dark:hover:text-blue-300" :title="item.endpoint">
|
||||
<span class="inline-flex items-center gap-1">
|
||||
<svg v-if="expandedKey === item.endpoint" class="h-3 w-3 shrink-0" fill="none" stroke="currentColor" viewBox="0 0 24 24"><path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M19 9l-7 7-7-7"/></svg>
|
||||
<svg v-else class="h-3 w-3 shrink-0" fill="none" stroke="currentColor" viewBox="0 0 24 24"><path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M9 5l7 7-7 7"/></svg>
|
||||
{{ item.endpoint }}
|
||||
</span>
|
||||
</td>
|
||||
<td class="py-1.5 text-right text-gray-600 dark:text-gray-400">
|
||||
{{ formatNumber(item.requests) }}
|
||||
</td>
|
||||
<td class="py-1.5 text-right text-gray-600 dark:text-gray-400">
|
||||
{{ formatTokens(item.total_tokens) }}
|
||||
</td>
|
||||
<td class="py-1.5 text-right text-green-600 dark:text-green-400">
|
||||
${{ formatCost(item.actual_cost) }}
|
||||
</td>
|
||||
<td class="py-1.5 text-right text-gray-400 dark:text-gray-500">
|
||||
${{ formatCost(item.cost) }}
|
||||
</td>
|
||||
</tr>
|
||||
<tr v-if="expandedKey === item.endpoint">
|
||||
<td colspan="5" class="p-0">
|
||||
<UserBreakdownSubTable
|
||||
:items="breakdownItems"
|
||||
:loading="breakdownLoading"
|
||||
/>
|
||||
</td>
|
||||
</tr>
|
||||
</template>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
@@ -119,12 +132,14 @@
|
||||
</template>
|
||||
|
||||
<script setup lang="ts">
|
||||
import { computed } from 'vue'
|
||||
import { computed, ref } from 'vue'
|
||||
import { useI18n } from 'vue-i18n'
|
||||
import { Chart as ChartJS, ArcElement, Tooltip, Legend } from 'chart.js'
|
||||
import { Doughnut } from 'vue-chartjs'
|
||||
import LoadingSpinner from '@/components/common/LoadingSpinner.vue'
|
||||
import type { EndpointStat } from '@/types'
|
||||
import UserBreakdownSubTable from './UserBreakdownSubTable.vue'
|
||||
import type { EndpointStat, UserBreakdownItem } from '@/types'
|
||||
import { getUserBreakdown } from '@/api/admin/dashboard'
|
||||
|
||||
ChartJS.register(ArcElement, Tooltip, Legend)
|
||||
|
||||
@@ -144,6 +159,8 @@ const props = withDefaults(
|
||||
source?: EndpointSource
|
||||
showMetricToggle?: boolean
|
||||
showSourceToggle?: boolean
|
||||
startDate?: string
|
||||
endDate?: string
|
||||
}>(),
|
||||
{
|
||||
upstreamEndpointStats: () => [],
|
||||
@@ -162,6 +179,33 @@ const emit = defineEmits<{
|
||||
'update:source': [value: EndpointSource]
|
||||
}>()
|
||||
|
||||
const expandedKey = ref<string | null>(null)
|
||||
const breakdownItems = ref<UserBreakdownItem[]>([])
|
||||
const breakdownLoading = ref(false)
|
||||
|
||||
const toggleBreakdown = async (endpoint: string) => {
|
||||
if (expandedKey.value === endpoint) {
|
||||
expandedKey.value = null
|
||||
return
|
||||
}
|
||||
expandedKey.value = endpoint
|
||||
breakdownLoading.value = true
|
||||
breakdownItems.value = []
|
||||
try {
|
||||
const res = await getUserBreakdown({
|
||||
start_date: props.startDate,
|
||||
end_date: props.endDate,
|
||||
endpoint,
|
||||
endpoint_type: props.source,
|
||||
})
|
||||
breakdownItems.value = res.users || []
|
||||
} catch {
|
||||
breakdownItems.value = []
|
||||
} finally {
|
||||
breakdownLoading.value = false
|
||||
}
|
||||
}
|
||||
|
||||
const chartColors = [
|
||||
'#3b82f6',
|
||||
'#10b981',
|
||||
|
||||
@@ -49,30 +49,46 @@
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr
|
||||
v-for="group in displayGroupStats"
|
||||
:key="group.group_id"
|
||||
class="border-t border-gray-100 dark:border-gray-700"
|
||||
>
|
||||
<td
|
||||
class="max-w-[100px] truncate py-1.5 font-medium text-gray-900 dark:text-white"
|
||||
:title="group.group_name || String(group.group_id)"
|
||||
<template v-for="group in displayGroupStats" :key="group.group_id">
|
||||
<tr
|
||||
class="border-t border-gray-100 transition-colors dark:border-gray-700"
|
||||
:class="group.group_id > 0 ? 'cursor-pointer hover:bg-gray-50 dark:hover:bg-dark-700/40' : ''"
|
||||
@click="group.group_id > 0 && toggleBreakdown('group', group.group_id)"
|
||||
>
|
||||
{{ group.group_name || t('admin.dashboard.noGroup') }}
|
||||
</td>
|
||||
<td class="py-1.5 text-right text-gray-600 dark:text-gray-400">
|
||||
{{ formatNumber(group.requests) }}
|
||||
</td>
|
||||
<td class="py-1.5 text-right text-gray-600 dark:text-gray-400">
|
||||
{{ formatTokens(group.total_tokens) }}
|
||||
</td>
|
||||
<td class="py-1.5 text-right text-green-600 dark:text-green-400">
|
||||
${{ formatCost(group.actual_cost) }}
|
||||
</td>
|
||||
<td class="py-1.5 text-right text-gray-400 dark:text-gray-500">
|
||||
${{ formatCost(group.cost) }}
|
||||
</td>
|
||||
</tr>
|
||||
<td
|
||||
class="max-w-[100px] truncate py-1.5 font-medium"
|
||||
:class="group.group_id > 0 ? 'text-blue-600 hover:text-blue-800 dark:text-blue-400 dark:hover:text-blue-300' : 'text-gray-900 dark:text-white'"
|
||||
:title="group.group_name || String(group.group_id)"
|
||||
>
|
||||
<span class="inline-flex items-center gap-1">
|
||||
<svg v-if="group.group_id > 0 && expandedKey === `group-${group.group_id}`" class="h-3 w-3 shrink-0" fill="none" stroke="currentColor" viewBox="0 0 24 24"><path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M19 9l-7 7-7-7"/></svg>
|
||||
<svg v-else-if="group.group_id > 0" class="h-3 w-3 shrink-0" fill="none" stroke="currentColor" viewBox="0 0 24 24"><path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M9 5l7 7-7 7"/></svg>
|
||||
{{ group.group_name || t('admin.dashboard.noGroup') }}
|
||||
</span>
|
||||
</td>
|
||||
<td class="py-1.5 text-right text-gray-600 dark:text-gray-400">
|
||||
{{ formatNumber(group.requests) }}
|
||||
</td>
|
||||
<td class="py-1.5 text-right text-gray-600 dark:text-gray-400">
|
||||
{{ formatTokens(group.total_tokens) }}
|
||||
</td>
|
||||
<td class="py-1.5 text-right text-green-600 dark:text-green-400">
|
||||
${{ formatCost(group.actual_cost) }}
|
||||
</td>
|
||||
<td class="py-1.5 text-right text-gray-400 dark:text-gray-500">
|
||||
${{ formatCost(group.cost) }}
|
||||
</td>
|
||||
</tr>
|
||||
<!-- User breakdown sub-rows -->
|
||||
<tr v-if="expandedKey === `group-${group.group_id}`">
|
||||
<td colspan="5" class="p-0">
|
||||
<UserBreakdownSubTable
|
||||
:items="breakdownItems"
|
||||
:loading="breakdownLoading"
|
||||
/>
|
||||
</td>
|
||||
</tr>
|
||||
</template>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
@@ -87,12 +103,14 @@
|
||||
</template>
|
||||
|
||||
<script setup lang="ts">
|
||||
import { computed } from 'vue'
|
||||
import { computed, ref } from 'vue'
|
||||
import { useI18n } from 'vue-i18n'
|
||||
import { Chart as ChartJS, ArcElement, Tooltip, Legend } from 'chart.js'
|
||||
import { Doughnut } from 'vue-chartjs'
|
||||
import LoadingSpinner from '@/components/common/LoadingSpinner.vue'
|
||||
import type { GroupStat } from '@/types'
|
||||
import UserBreakdownSubTable from './UserBreakdownSubTable.vue'
|
||||
import type { GroupStat, UserBreakdownItem } from '@/types'
|
||||
import { getUserBreakdown } from '@/api/admin/dashboard'
|
||||
|
||||
ChartJS.register(ArcElement, Tooltip, Legend)
|
||||
|
||||
@@ -105,6 +123,8 @@ const props = withDefaults(defineProps<{
|
||||
loading?: boolean
|
||||
metric?: DistributionMetric
|
||||
showMetricToggle?: boolean
|
||||
startDate?: string
|
||||
endDate?: string
|
||||
}>(), {
|
||||
loading: false,
|
||||
metric: 'tokens',
|
||||
@@ -115,6 +135,33 @@ const emit = defineEmits<{
|
||||
'update:metric': [value: DistributionMetric]
|
||||
}>()
|
||||
|
||||
const expandedKey = ref<string | null>(null)
|
||||
const breakdownItems = ref<UserBreakdownItem[]>([])
|
||||
const breakdownLoading = ref(false)
|
||||
|
||||
const toggleBreakdown = async (type: string, id: number | string) => {
|
||||
const key = `${type}-${id}`
|
||||
if (expandedKey.value === key) {
|
||||
expandedKey.value = null
|
||||
return
|
||||
}
|
||||
expandedKey.value = key
|
||||
breakdownLoading.value = true
|
||||
breakdownItems.value = []
|
||||
try {
|
||||
const res = await getUserBreakdown({
|
||||
start_date: props.startDate,
|
||||
end_date: props.endDate,
|
||||
group_id: Number(id),
|
||||
})
|
||||
breakdownItems.value = res.users || []
|
||||
} catch {
|
||||
breakdownItems.value = []
|
||||
} finally {
|
||||
breakdownLoading.value = false
|
||||
}
|
||||
}
|
||||
|
||||
const chartColors = [
|
||||
'#3b82f6',
|
||||
'#10b981',
|
||||
|
||||
@@ -83,30 +83,43 @@
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr
|
||||
v-for="model in displayModelStats"
|
||||
:key="model.model"
|
||||
class="border-t border-gray-100 dark:border-gray-700"
|
||||
>
|
||||
<td
|
||||
class="max-w-[100px] truncate py-1.5 font-medium text-gray-900 dark:text-white"
|
||||
:title="model.model"
|
||||
<template v-for="model in displayModelStats" :key="model.model">
|
||||
<tr
|
||||
class="border-t border-gray-100 cursor-pointer transition-colors hover:bg-gray-50 dark:border-gray-700 dark:hover:bg-dark-700/40"
|
||||
@click="toggleBreakdown('model', model.model)"
|
||||
>
|
||||
{{ model.model }}
|
||||
</td>
|
||||
<td class="py-1.5 text-right text-gray-600 dark:text-gray-400">
|
||||
{{ formatNumber(model.requests) }}
|
||||
</td>
|
||||
<td class="py-1.5 text-right text-gray-600 dark:text-gray-400">
|
||||
{{ formatTokens(model.total_tokens) }}
|
||||
</td>
|
||||
<td class="py-1.5 text-right text-green-600 dark:text-green-400">
|
||||
${{ formatCost(model.actual_cost) }}
|
||||
</td>
|
||||
<td class="py-1.5 text-right text-gray-400 dark:text-gray-500">
|
||||
${{ formatCost(model.cost) }}
|
||||
</td>
|
||||
</tr>
|
||||
<td
|
||||
class="max-w-[100px] truncate py-1.5 font-medium text-blue-600 hover:text-blue-800 dark:text-blue-400 dark:hover:text-blue-300"
|
||||
:title="model.model"
|
||||
>
|
||||
<span class="inline-flex items-center gap-1">
|
||||
<svg v-if="expandedKey === `model-${model.model}`" class="h-3 w-3 shrink-0" fill="none" stroke="currentColor" viewBox="0 0 24 24"><path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M19 9l-7 7-7-7"/></svg>
|
||||
<svg v-else class="h-3 w-3 shrink-0" fill="none" stroke="currentColor" viewBox="0 0 24 24"><path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M9 5l7 7-7 7"/></svg>
|
||||
{{ model.model }}
|
||||
</span>
|
||||
</td>
|
||||
<td class="py-1.5 text-right text-gray-600 dark:text-gray-400">
|
||||
{{ formatNumber(model.requests) }}
|
||||
</td>
|
||||
<td class="py-1.5 text-right text-gray-600 dark:text-gray-400">
|
||||
{{ formatTokens(model.total_tokens) }}
|
||||
</td>
|
||||
<td class="py-1.5 text-right text-green-600 dark:text-green-400">
|
||||
${{ formatCost(model.actual_cost) }}
|
||||
</td>
|
||||
<td class="py-1.5 text-right text-gray-400 dark:text-gray-500">
|
||||
${{ formatCost(model.cost) }}
|
||||
</td>
|
||||
</tr>
|
||||
<tr v-if="expandedKey === `model-${model.model}`">
|
||||
<td colspan="5" class="p-0">
|
||||
<UserBreakdownSubTable
|
||||
:items="breakdownItems"
|
||||
:loading="breakdownLoading"
|
||||
/>
|
||||
</td>
|
||||
</tr>
|
||||
</template>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
@@ -193,7 +206,9 @@ import { useI18n } from 'vue-i18n'
|
||||
import { Chart as ChartJS, ArcElement, Tooltip, Legend } from 'chart.js'
|
||||
import { Doughnut } from 'vue-chartjs'
|
||||
import LoadingSpinner from '@/components/common/LoadingSpinner.vue'
|
||||
import type { ModelStat, UserSpendingRankingItem } from '@/types'
|
||||
import UserBreakdownSubTable from './UserBreakdownSubTable.vue'
|
||||
import type { ModelStat, UserSpendingRankingItem, UserBreakdownItem } from '@/types'
|
||||
import { getUserBreakdown } from '@/api/admin/dashboard'
|
||||
|
||||
ChartJS.register(ArcElement, Tooltip, Legend)
|
||||
|
||||
@@ -213,6 +228,8 @@ const props = withDefaults(defineProps<{
|
||||
showMetricToggle?: boolean
|
||||
rankingLoading?: boolean
|
||||
rankingError?: boolean
|
||||
startDate?: string
|
||||
endDate?: string
|
||||
}>(), {
|
||||
enableRankingView: false,
|
||||
rankingItems: () => [],
|
||||
@@ -226,6 +243,33 @@ const props = withDefaults(defineProps<{
|
||||
rankingError: false
|
||||
})
|
||||
|
||||
const expandedKey = ref<string | null>(null)
|
||||
const breakdownItems = ref<UserBreakdownItem[]>([])
|
||||
const breakdownLoading = ref(false)
|
||||
|
||||
const toggleBreakdown = async (type: string, id: string) => {
|
||||
const key = `${type}-${id}`
|
||||
if (expandedKey.value === key) {
|
||||
expandedKey.value = null
|
||||
return
|
||||
}
|
||||
expandedKey.value = key
|
||||
breakdownLoading.value = true
|
||||
breakdownItems.value = []
|
||||
try {
|
||||
const res = await getUserBreakdown({
|
||||
start_date: props.startDate,
|
||||
end_date: props.endDate,
|
||||
model: id,
|
||||
})
|
||||
breakdownItems.value = res.users || []
|
||||
} catch {
|
||||
breakdownItems.value = []
|
||||
} finally {
|
||||
breakdownLoading.value = false
|
||||
}
|
||||
}
|
||||
|
||||
const emit = defineEmits<{
|
||||
'update:metric': [value: DistributionMetric]
|
||||
'ranking-click': [item: UserSpendingRankingItem]
|
||||
|
||||
62
frontend/src/components/charts/UserBreakdownSubTable.vue
Normal file
62
frontend/src/components/charts/UserBreakdownSubTable.vue
Normal file
@@ -0,0 +1,62 @@
|
||||
<template>
|
||||
<div class="bg-gray-50/50 dark:bg-dark-700/30">
|
||||
<div v-if="loading" class="flex items-center justify-center py-3">
|
||||
<LoadingSpinner />
|
||||
</div>
|
||||
<div v-else-if="items.length === 0" class="py-2 text-center text-xs text-gray-400">
|
||||
{{ t('admin.dashboard.noDataAvailable') }}
|
||||
</div>
|
||||
<table v-else class="w-full text-xs">
|
||||
<tbody>
|
||||
<tr
|
||||
v-for="user in items"
|
||||
:key="user.user_id"
|
||||
class="border-t border-gray-100/50 dark:border-gray-700/50"
|
||||
>
|
||||
<td class="max-w-[120px] truncate py-1 pl-6 text-gray-600 dark:text-gray-300" :title="user.email">
|
||||
{{ user.email || `User #${user.user_id}` }}
|
||||
</td>
|
||||
<td class="py-1 text-right text-gray-500 dark:text-gray-400">
|
||||
{{ user.requests.toLocaleString() }}
|
||||
</td>
|
||||
<td class="py-1 text-right text-gray-500 dark:text-gray-400">
|
||||
{{ formatTokens(user.total_tokens) }}
|
||||
</td>
|
||||
<td class="py-1 text-right text-green-600 dark:text-green-400">
|
||||
${{ formatCost(user.actual_cost) }}
|
||||
</td>
|
||||
<td class="py-1 pr-1 text-right text-gray-400 dark:text-gray-500">
|
||||
${{ formatCost(user.cost) }}
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<script setup lang="ts">
|
||||
import { useI18n } from 'vue-i18n'
|
||||
import LoadingSpinner from '@/components/common/LoadingSpinner.vue'
|
||||
import type { UserBreakdownItem } from '@/types'
|
||||
|
||||
const { t } = useI18n()
|
||||
|
||||
defineProps<{
|
||||
items: UserBreakdownItem[]
|
||||
loading?: boolean
|
||||
}>()
|
||||
|
||||
const formatTokens = (value: number): string => {
|
||||
if (value >= 1_000_000_000) return `${(value / 1_000_000_000).toFixed(2)}B`
|
||||
if (value >= 1_000_000) return `${(value / 1_000_000).toFixed(2)}M`
|
||||
if (value >= 1_000) return `${(value / 1_000).toFixed(2)}K`
|
||||
return value.toLocaleString()
|
||||
}
|
||||
|
||||
const formatCost = (value: number): string => {
|
||||
if (value >= 1000) return (value / 1000).toFixed(2) + 'K'
|
||||
if (value >= 1) return value.toFixed(2)
|
||||
if (value >= 0.01) return value.toFixed(3)
|
||||
return value.toFixed(4)
|
||||
}
|
||||
</script>
|
||||
@@ -106,7 +106,7 @@ const isOpen = ref(false)
|
||||
const containerRef = ref<HTMLElement | null>(null)
|
||||
const localStartDate = ref(props.startDate)
|
||||
const localEndDate = ref(props.endDate)
|
||||
const activePreset = ref<string | null>('7days')
|
||||
const activePreset = ref<string | null>('last24Hours')
|
||||
|
||||
const today = computed(() => {
|
||||
// Use local timezone to avoid UTC timezone issues
|
||||
@@ -152,6 +152,18 @@ const presets: DatePreset[] = [
|
||||
return { start: yesterday, end: yesterday }
|
||||
}
|
||||
},
|
||||
{
|
||||
labelKey: 'dates.last24Hours',
|
||||
value: 'last24Hours',
|
||||
getRange: () => {
|
||||
const end = new Date()
|
||||
const start = new Date(end.getTime() - 24 * 60 * 60 * 1000)
|
||||
return {
|
||||
start: formatDateToString(start),
|
||||
end: formatDateToString(end)
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
labelKey: 'dates.last7Days',
|
||||
value: '7days',
|
||||
|
||||
84
frontend/src/components/common/GroupCapacityBadge.vue
Normal file
84
frontend/src/components/common/GroupCapacityBadge.vue
Normal file
@@ -0,0 +1,84 @@
|
||||
<template>
|
||||
<div class="flex flex-col gap-1">
|
||||
<!-- 并发槽位 -->
|
||||
<div class="flex items-center gap-1">
|
||||
<span
|
||||
:class="[
|
||||
'inline-flex items-center gap-1 rounded-md px-1.5 py-0.5 text-[10px] font-medium',
|
||||
capacityClass(concurrencyUsed, concurrencyMax)
|
||||
]"
|
||||
>
|
||||
<svg class="h-2.5 w-2.5" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2">
|
||||
<path stroke-linecap="round" stroke-linejoin="round" d="M3.75 6A2.25 2.25 0 016 3.75h2.25A2.25 2.25 0 0110.5 6v2.25a2.25 2.25 0 01-2.25 2.25H6a2.25 2.25 0 01-2.25-2.25V6zM3.75 15.75A2.25 2.25 0 016 13.5h2.25a2.25 2.25 0 012.25 2.25V18a2.25 2.25 0 01-2.25 2.25H6A2.25 2.25 0 013.75 18v-2.25zM13.5 6a2.25 2.25 0 012.25-2.25H18A2.25 2.25 0 0120.25 6v2.25A2.25 2.25 0 0118 10.5h-2.25a2.25 2.25 0 01-2.25-2.25V6zM13.5 15.75a2.25 2.25 0 012.25-2.25H18a2.25 2.25 0 012.25 2.25V18A2.25 2.25 0 0118 20.25h-2.25A2.25 2.25 0 0113.5 18v-2.25z" />
|
||||
</svg>
|
||||
<span class="font-mono">{{ concurrencyUsed }}</span>
|
||||
<span class="text-gray-400 dark:text-gray-500">/</span>
|
||||
<span class="font-mono">{{ concurrencyMax }}</span>
|
||||
</span>
|
||||
</div>
|
||||
|
||||
<!-- 会话数 -->
|
||||
<div v-if="sessionsMax > 0" class="flex items-center gap-1">
|
||||
<span
|
||||
:class="[
|
||||
'inline-flex items-center gap-1 rounded-md px-1.5 py-0.5 text-[10px] font-medium',
|
||||
capacityClass(sessionsUsed, sessionsMax)
|
||||
]"
|
||||
>
|
||||
<svg class="h-2.5 w-2.5" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2">
|
||||
<path stroke-linecap="round" stroke-linejoin="round" d="M15 19.128a9.38 9.38 0 002.625.372 9.337 9.337 0 004.121-.952 4.125 4.125 0 00-7.533-2.493M15 19.128v-.003c0-1.113-.285-2.16-.786-3.07M15 19.128v.106A12.318 12.318 0 018.624 21c-2.331 0-4.512-.645-6.374-1.766l-.001-.109a6.375 6.375 0 0111.964-3.07M12 6.375a3.375 3.375 0 11-6.75 0 3.375 3.375 0 016.75 0zm8.25 2.25a2.625 2.625 0 11-5.25 0 2.625 2.625 0 015.25 0z" />
|
||||
</svg>
|
||||
<span class="font-mono">{{ sessionsUsed }}</span>
|
||||
<span class="text-gray-400 dark:text-gray-500">/</span>
|
||||
<span class="font-mono">{{ sessionsMax }}</span>
|
||||
</span>
|
||||
</div>
|
||||
|
||||
<!-- RPM -->
|
||||
<div v-if="rpmMax > 0" class="flex items-center gap-1">
|
||||
<span
|
||||
:class="[
|
||||
'inline-flex items-center gap-1 rounded-md px-1.5 py-0.5 text-[10px] font-medium',
|
||||
capacityClass(rpmUsed, rpmMax)
|
||||
]"
|
||||
>
|
||||
<svg class="h-2.5 w-2.5" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor">
|
||||
<path stroke-linecap="round" stroke-linejoin="round" d="M12 6v6h4.5m4.5 0a9 9 0 1 1-18 0 9 9 0 0 1 18 0Z" />
|
||||
</svg>
|
||||
<span class="font-mono">{{ rpmUsed }}</span>
|
||||
<span class="text-gray-400 dark:text-gray-500">/</span>
|
||||
<span class="font-mono">{{ rpmMax }}</span>
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<script setup lang="ts">
|
||||
interface Props {
|
||||
concurrencyUsed: number
|
||||
concurrencyMax: number
|
||||
sessionsUsed: number
|
||||
sessionsMax: number
|
||||
rpmUsed: number
|
||||
rpmMax: number
|
||||
}
|
||||
|
||||
withDefaults(defineProps<Props>(), {
|
||||
concurrencyUsed: 0,
|
||||
concurrencyMax: 0,
|
||||
sessionsUsed: 0,
|
||||
sessionsMax: 0,
|
||||
rpmUsed: 0,
|
||||
rpmMax: 0
|
||||
})
|
||||
|
||||
function capacityClass(used: number, max: number): string {
|
||||
if (max > 0 && used >= max) {
|
||||
return 'bg-red-100 text-red-700 dark:bg-red-900/30 dark:text-red-400'
|
||||
}
|
||||
if (used > 0) {
|
||||
return 'bg-yellow-100 text-yellow-700 dark:bg-yellow-900/30 dark:text-yellow-400'
|
||||
}
|
||||
return 'bg-gray-100 text-gray-600 dark:bg-gray-800 dark:text-gray-400'
|
||||
}
|
||||
</script>
|
||||
@@ -0,0 +1,96 @@
|
||||
import { describe, expect, it, vi } from 'vitest'
|
||||
import { mount } from '@vue/test-utils'
|
||||
import { ref } from 'vue'
|
||||
|
||||
import DateRangePicker from '../DateRangePicker.vue'
|
||||
|
||||
const messages: Record<string, string> = {
|
||||
'dates.today': 'Today',
|
||||
'dates.yesterday': 'Yesterday',
|
||||
'dates.last24Hours': 'Last 24 Hours',
|
||||
'dates.last7Days': 'Last 7 Days',
|
||||
'dates.last14Days': 'Last 14 Days',
|
||||
'dates.last30Days': 'Last 30 Days',
|
||||
'dates.thisMonth': 'This Month',
|
||||
'dates.lastMonth': 'Last Month',
|
||||
'dates.startDate': 'Start Date',
|
||||
'dates.endDate': 'End Date',
|
||||
'dates.apply': 'Apply',
|
||||
'dates.selectDateRange': 'Select date range'
|
||||
}
|
||||
|
||||
vi.mock('vue-i18n', () => ({
|
||||
useI18n: () => ({
|
||||
t: (key: string) => messages[key] ?? key,
|
||||
locale: ref('en')
|
||||
})
|
||||
}))
|
||||
|
||||
const formatLocalDate = (date: Date): string => {
|
||||
const year = date.getFullYear()
|
||||
const month = String(date.getMonth() + 1).padStart(2, '0')
|
||||
const day = String(date.getDate()).padStart(2, '0')
|
||||
return `${year}-${month}-${day}`
|
||||
}
|
||||
|
||||
describe('DateRangePicker', () => {
|
||||
it('uses last 24 hours as the default recognized preset', () => {
|
||||
const now = new Date()
|
||||
const yesterday = new Date(now.getTime() - 24 * 60 * 60 * 1000)
|
||||
|
||||
const wrapper = mount(DateRangePicker, {
|
||||
props: {
|
||||
startDate: formatLocalDate(yesterday),
|
||||
endDate: formatLocalDate(now)
|
||||
},
|
||||
global: {
|
||||
stubs: {
|
||||
Icon: true
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
expect(wrapper.text()).toContain('Last 24 Hours')
|
||||
})
|
||||
|
||||
it('emits range updates with last24Hours preset when applied', async () => {
|
||||
const now = new Date()
|
||||
const today = formatLocalDate(now)
|
||||
|
||||
const wrapper = mount(DateRangePicker, {
|
||||
props: {
|
||||
startDate: today,
|
||||
endDate: today
|
||||
},
|
||||
global: {
|
||||
stubs: {
|
||||
Icon: true
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
await wrapper.find('.date-picker-trigger').trigger('click')
|
||||
const presetButton = wrapper.findAll('.date-picker-preset').find((node) =>
|
||||
node.text().includes('Last 24 Hours')
|
||||
)
|
||||
expect(presetButton).toBeDefined()
|
||||
|
||||
await presetButton!.trigger('click')
|
||||
await wrapper.find('.date-picker-apply').trigger('click')
|
||||
|
||||
const nowAfterClick = new Date()
|
||||
const yesterdayAfterClick = new Date(nowAfterClick.getTime() - 24 * 60 * 60 * 1000)
|
||||
const expectedStart = formatLocalDate(yesterdayAfterClick)
|
||||
const expectedEnd = formatLocalDate(nowAfterClick)
|
||||
|
||||
expect(wrapper.emitted('update:startDate')?.[0]).toEqual([expectedStart])
|
||||
expect(wrapper.emitted('update:endDate')?.[0]).toEqual([expectedEnd])
|
||||
expect(wrapper.emitted('change')?.[0]).toEqual([
|
||||
{
|
||||
startDate: expectedStart,
|
||||
endDate: expectedEnd,
|
||||
preset: 'last24Hours'
|
||||
}
|
||||
])
|
||||
})
|
||||
})
|
||||
@@ -123,6 +123,7 @@
|
||||
</router-link>
|
||||
|
||||
<a
|
||||
v-if="authStore.isAdmin"
|
||||
href="https://github.com/Wei-Shaw/sub2api"
|
||||
target="_blank"
|
||||
rel="noopener noreferrer"
|
||||
@@ -138,6 +139,7 @@
|
||||
</svg>
|
||||
{{ t('nav.github') }}
|
||||
</a>
|
||||
|
||||
</div>
|
||||
|
||||
<!-- Contact Support (only show if configured) -->
|
||||
|
||||
@@ -574,7 +574,7 @@ export default {
|
||||
groupRequired: 'Please select a group',
|
||||
usage: 'Usage',
|
||||
today: 'Today',
|
||||
total: 'Total',
|
||||
total: 'Last 30d',
|
||||
quota: 'Quota',
|
||||
lastUsedAt: 'Last Used',
|
||||
useKey: 'Use Key',
|
||||
@@ -920,6 +920,7 @@ export default {
|
||||
lastWeek: 'Last Week',
|
||||
thisMonth: 'This Month',
|
||||
lastMonth: 'Last Month',
|
||||
last24Hours: 'Last 24 Hours',
|
||||
last7Days: 'Last 7 Days',
|
||||
last14Days: 'Last 14 Days',
|
||||
last30Days: 'Last 30 Days',
|
||||
@@ -1025,7 +1026,12 @@ export default {
|
||||
createBackup: 'Create Backup',
|
||||
backing: 'Backing up...',
|
||||
backupCreated: 'Backup created successfully',
|
||||
expireDays: 'Expire Days'
|
||||
expireDays: 'Expire Days',
|
||||
alreadyInProgress: 'A backup is already in progress',
|
||||
backupRunning: 'Backup in progress...',
|
||||
backupFailed: 'Backup failed',
|
||||
restoreRunning: 'Restore in progress...',
|
||||
restoreFailed: 'Restore failed',
|
||||
},
|
||||
columns: {
|
||||
status: 'Status',
|
||||
@@ -1042,6 +1048,11 @@ export default {
|
||||
completed: 'Completed',
|
||||
failed: 'Failed'
|
||||
},
|
||||
progress: {
|
||||
pending: 'Preparing',
|
||||
dumping: 'Dumping database',
|
||||
uploading: 'Uploading',
|
||||
},
|
||||
trigger: {
|
||||
manual: 'Manual',
|
||||
scheduled: 'Scheduled'
|
||||
@@ -1308,7 +1319,7 @@ export default {
|
||||
actions: 'Actions'
|
||||
},
|
||||
today: 'Today',
|
||||
total: 'Total',
|
||||
total: 'Last 30d',
|
||||
noSubscription: 'No subscription',
|
||||
daysRemaining: '{days}d',
|
||||
expired: 'Expired',
|
||||
@@ -1494,6 +1505,8 @@ export default {
|
||||
rateMultiplier: 'Rate Multiplier',
|
||||
type: 'Type',
|
||||
accounts: 'Accounts',
|
||||
capacity: 'Capacity',
|
||||
usage: 'Usage',
|
||||
status: 'Status',
|
||||
actions: 'Actions',
|
||||
billingType: 'Billing Type',
|
||||
@@ -1502,6 +1515,12 @@ export default {
|
||||
userNotes: 'Notes',
|
||||
userStatus: 'Status'
|
||||
},
|
||||
usageToday: 'Today',
|
||||
usageTotal: 'Total',
|
||||
accountsAvailable: 'Avail:',
|
||||
accountsRateLimited: 'Limited:',
|
||||
accountsTotal: 'Total:',
|
||||
accountsUnit: '',
|
||||
rateAndAccounts: '{rate}x rate · {count} accounts',
|
||||
accountsCount: '{count} accounts',
|
||||
form: {
|
||||
@@ -1683,6 +1702,7 @@ export default {
|
||||
revokeSubscription: 'Revoke Subscription',
|
||||
allStatus: 'All Status',
|
||||
allGroups: 'All Groups',
|
||||
allPlatforms: 'All Platforms',
|
||||
daily: 'Daily',
|
||||
weekly: 'Weekly',
|
||||
monthly: 'Monthly',
|
||||
@@ -1748,7 +1768,37 @@ export default {
|
||||
pleaseSelectGroup: 'Please select a group',
|
||||
validityDaysRequired: 'Please enter a valid number of days (at least 1)',
|
||||
revokeConfirm:
|
||||
"Are you sure you want to revoke the subscription for '{user}'? This action cannot be undone."
|
||||
"Are you sure you want to revoke the subscription for '{user}'? This action cannot be undone.",
|
||||
guide: {
|
||||
title: 'Subscription Management Guide',
|
||||
subtitle: 'Subscription mode lets you assign time-based usage quotas to users, with daily/weekly/monthly limits. Follow these steps to get started.',
|
||||
showGuide: 'Usage Guide',
|
||||
step1: {
|
||||
title: 'Create a Subscription Group',
|
||||
line1: 'Go to "Group Management" page, click "Create Group"',
|
||||
line2: 'Set billing type to "Subscription", configure daily/weekly/monthly quota limits',
|
||||
line3: 'Save the group and ensure its status is "Active"',
|
||||
link: 'Go to Group Management'
|
||||
},
|
||||
step2: {
|
||||
title: 'Assign Subscription to User',
|
||||
line1: 'Click the "Assign Subscription" button in the top right',
|
||||
line2: 'Search for a user by email and select them',
|
||||
line3: 'Choose a subscription group, set validity days, then click "Assign"'
|
||||
},
|
||||
step3: {
|
||||
title: 'Manage Existing Subscriptions'
|
||||
},
|
||||
actions: {
|
||||
adjust: 'Adjust',
|
||||
adjustDesc: 'Extend or shorten the subscription validity period',
|
||||
resetQuota: 'Reset Quota',
|
||||
resetQuotaDesc: 'Reset daily/weekly/monthly usage to zero',
|
||||
revoke: 'Revoke',
|
||||
revokeDesc: 'Immediately terminate the subscription (irreversible)'
|
||||
},
|
||||
tip: 'Tip: Only groups with billing type "Subscription" and status "Active" appear in the group dropdown. If no options are available, create one in Group Management first.'
|
||||
}
|
||||
},
|
||||
|
||||
// Accounts
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user