mirror of
https://gitee.com/wanwujie/sub2api
synced 2026-04-03 06:52:13 +08:00
Merge tag 'v0.1.90' into merge/upstream-v0.1.90
注册邮箱域名白名单策略上线,后台大数据场景性能大幅优化。 - 注册邮箱域名白名单:支持管理员配置允许注册的邮箱域名策略 - Keys 页面表单筛选:用户 /keys 页面支持按条件筛选 API Key - Settings 页面分 Tab 拆分:管理后台设置页面按功能模块分 Tab 展示 - 后台大数据场景加载性能优化:仪表盘/用户/账号/Ops 页面大数据集加载显著提速 - Usage 大表分页优化:默认避免全量 COUNT(*),大幅降低分页查询耗时 - 消除重复的 normalizeAccountIDList,补充新增组件的单元测试 - 清理无用文件和过时文档,精简项目结构 - EmailVerifyView 硬编码英文字符串替换为 i18n 调用 - 修复 Anthropic 平台无限流重置时间的 429 误标记账号限流问题 - 修复自定义菜单页面管理员视角菜单不生效问题 - 修复 Ops 错误详情弹窗未展示真实上游 payload 的问题 - 修复充值/订阅菜单 icon 显示问题 # Conflicts: # .gitignore # backend/cmd/server/VERSION # backend/ent/group.go # backend/ent/runtime/runtime.go # backend/ent/schema/group.go # backend/go.sum # backend/internal/handler/admin/account_handler.go # backend/internal/handler/admin/dashboard_handler.go # backend/internal/pkg/usagestats/usage_log_types.go # backend/internal/repository/group_repo.go # backend/internal/repository/usage_log_repo.go # backend/internal/server/middleware/security_headers.go # backend/internal/server/router.go # backend/internal/service/account_usage_service.go # backend/internal/service/admin_service_bulk_update_test.go # backend/internal/service/dashboard_service.go # backend/internal/service/gateway_service.go # frontend/src/api/admin/dashboard.ts # frontend/src/components/account/BulkEditAccountModal.vue # frontend/src/components/charts/GroupDistributionChart.vue # frontend/src/components/layout/AppSidebar.vue # frontend/src/i18n/locales/en.ts # frontend/src/i18n/locales/zh.ts # frontend/src/views/admin/GroupsView.vue # frontend/src/views/admin/SettingsView.vue # frontend/src/views/admin/UsageView.vue # frontend/src/views/user/PurchaseSubscriptionView.vue
This commit is contained in:
10
.github/workflows/backend-ci.yml
vendored
10
.github/workflows/backend-ci.yml
vendored
@@ -11,8 +11,8 @@ jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: backend/go.mod
|
||||
check-latest: false
|
||||
@@ -31,8 +31,8 @@ jobs:
|
||||
golangci-lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: backend/go.mod
|
||||
check-latest: false
|
||||
@@ -45,5 +45,5 @@ jobs:
|
||||
uses: golangci/golangci-lint-action@v9
|
||||
with:
|
||||
version: v2.7
|
||||
args: --timeout=5m
|
||||
args: --timeout=30m
|
||||
working-directory: backend
|
||||
22
.github/workflows/release.yml
vendored
22
.github/workflows/release.yml
vendored
@@ -31,7 +31,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Update VERSION file
|
||||
run: |
|
||||
@@ -45,7 +45,7 @@ jobs:
|
||||
echo "Updated VERSION file to: $VERSION"
|
||||
|
||||
- name: Upload VERSION artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v7
|
||||
with:
|
||||
name: version-file
|
||||
path: backend/cmd/server/VERSION
|
||||
@@ -55,7 +55,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
@@ -63,7 +63,7 @@ jobs:
|
||||
version: 9
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: '20'
|
||||
cache: 'pnpm'
|
||||
@@ -78,7 +78,7 @@ jobs:
|
||||
working-directory: frontend
|
||||
|
||||
- name: Upload frontend artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v7
|
||||
with:
|
||||
name: frontend-dist
|
||||
path: backend/internal/web/dist/
|
||||
@@ -89,25 +89,25 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.inputs.tag || github.ref }}
|
||||
|
||||
- name: Download VERSION artifact
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v8
|
||||
with:
|
||||
name: version-file
|
||||
path: backend/cmd/server/
|
||||
|
||||
- name: Download frontend artifact
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v8
|
||||
with:
|
||||
name: frontend-dist
|
||||
path: backend/internal/web/dist/
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: backend/go.mod
|
||||
check-latest: false
|
||||
@@ -173,7 +173,7 @@ jobs:
|
||||
run: echo "owner=$(echo '${{ github.repository_owner }}' | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
uses: goreleaser/goreleaser-action@v7
|
||||
with:
|
||||
version: '~> v2'
|
||||
args: release --clean --skip=validate ${{ env.SIMPLE_RELEASE == 'true' && '--config=.goreleaser.simple.yaml' || '' }}
|
||||
@@ -188,7 +188,7 @@ jobs:
|
||||
# Update DockerHub description
|
||||
- name: Update DockerHub description
|
||||
if: ${{ env.SIMPLE_RELEASE != 'true' && env.DOCKERHUB_USERNAME != '' }}
|
||||
uses: peter-evans/dockerhub-description@v4
|
||||
uses: peter-evans/dockerhub-description@v5
|
||||
env:
|
||||
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
with:
|
||||
|
||||
14
.github/workflows/security-scan.yml
vendored
14
.github/workflows/security-scan.yml
vendored
@@ -12,10 +12,11 @@ permissions:
|
||||
jobs:
|
||||
backend-security:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v6
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: backend/go.mod
|
||||
check-latest: false
|
||||
@@ -28,22 +29,17 @@ jobs:
|
||||
run: |
|
||||
go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
govulncheck ./...
|
||||
- name: Run gosec
|
||||
working-directory: backend
|
||||
run: |
|
||||
go install github.com/securego/gosec/v2/cmd/gosec@latest
|
||||
gosec -conf .gosec.json -severity high -confidence high ./...
|
||||
|
||||
frontend-security:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v6
|
||||
- name: Set up pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 9
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: '20'
|
||||
cache: 'pnpm'
|
||||
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -117,13 +117,12 @@ backend/.installed
|
||||
# ===================
|
||||
tests
|
||||
CLAUDE.md
|
||||
AGENTS.md
|
||||
.claude
|
||||
scripts
|
||||
.code-review-state
|
||||
openspec/
|
||||
#openspec/
|
||||
code-reviews/
|
||||
AGENTS.md
|
||||
#AGENTS.md
|
||||
backend/cmd/server/server
|
||||
deploy/docker-compose.override.yml
|
||||
.gocache/
|
||||
@@ -141,3 +140,4 @@ antigravity_projectid_fix.patch
|
||||
.codex/
|
||||
frontend/coverage/
|
||||
aicodex
|
||||
output/
|
||||
|
||||
13
Dockerfile
13
Dockerfile
@@ -8,7 +8,7 @@
|
||||
|
||||
ARG NODE_IMAGE=node:24-alpine
|
||||
ARG GOLANG_IMAGE=golang:1.25.7-alpine
|
||||
ARG ALPINE_IMAGE=alpine:3.20
|
||||
ARG ALPINE_IMAGE=alpine:3.21
|
||||
ARG GOPROXY=https://goproxy.cn,direct
|
||||
ARG GOSUMDB=sum.golang.google.cn
|
||||
|
||||
@@ -68,6 +68,7 @@ RUN VERSION_VALUE="${VERSION}" && \
|
||||
CGO_ENABLED=0 GOOS=linux go build \
|
||||
-tags embed \
|
||||
-ldflags="-s -w -X main.Version=${VERSION_VALUE} -X main.Commit=${COMMIT} -X main.Date=${DATE_VALUE} -X main.BuildType=release" \
|
||||
-trimpath \
|
||||
-o /app/sub2api \
|
||||
./cmd/server
|
||||
|
||||
@@ -85,7 +86,6 @@ LABEL org.opencontainers.image.source="https://github.com/Wei-Shaw/sub2api"
|
||||
RUN apk add --no-cache \
|
||||
ca-certificates \
|
||||
tzdata \
|
||||
curl \
|
||||
&& rm -rf /var/cache/apk/*
|
||||
|
||||
# Create non-root user
|
||||
@@ -95,11 +95,12 @@ RUN addgroup -g 1000 sub2api && \
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy binary from builder
|
||||
COPY --from=backend-builder /app/sub2api /app/sub2api
|
||||
# Copy binary/resources with ownership to avoid extra full-layer chown copy
|
||||
COPY --from=backend-builder --chown=sub2api:sub2api /app/sub2api /app/sub2api
|
||||
COPY --from=backend-builder --chown=sub2api:sub2api /app/backend/resources /app/resources
|
||||
|
||||
# Create data directory
|
||||
RUN mkdir -p /app/data && chown -R sub2api:sub2api /app
|
||||
RUN mkdir -p /app/data && chown sub2api:sub2api /app/data
|
||||
|
||||
# Switch to non-root user
|
||||
USER sub2api
|
||||
@@ -109,7 +110,7 @@ EXPOSE 8080
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=10s --retries=3 \
|
||||
CMD curl -f http://localhost:${SERVER_PORT:-8080}/health || exit 1
|
||||
CMD wget -q -T 5 -O /dev/null http://localhost:${SERVER_PORT:-8080}/health || exit 1
|
||||
|
||||
# Run the application
|
||||
ENTRYPOINT ["/app/sub2api"]
|
||||
|
||||
9
Makefile
9
Makefile
@@ -1,4 +1,4 @@
|
||||
.PHONY: build build-backend build-frontend test test-backend test-frontend secret-scan
|
||||
.PHONY: build build-backend build-frontend build-datamanagementd test test-backend test-frontend test-datamanagementd secret-scan
|
||||
|
||||
# 一键编译前后端
|
||||
build: build-backend build-frontend
|
||||
@@ -11,6 +11,10 @@ build-backend:
|
||||
build-frontend:
|
||||
@pnpm --dir frontend run build
|
||||
|
||||
# 编译 datamanagementd(宿主机数据管理进程)
|
||||
build-datamanagementd:
|
||||
@cd datamanagement && go build -o datamanagementd ./cmd/datamanagementd
|
||||
|
||||
# 运行测试(后端 + 前端)
|
||||
test: test-backend test-frontend
|
||||
|
||||
@@ -21,5 +25,8 @@ test-frontend:
|
||||
@pnpm --dir frontend run lint:check
|
||||
@pnpm --dir frontend run typecheck
|
||||
|
||||
test-datamanagementd:
|
||||
@cd datamanagement && go test ./...
|
||||
|
||||
secret-scan:
|
||||
@python3 tools/secret_scan.py
|
||||
|
||||
16
README_CN.md
16
README_CN.md
@@ -62,8 +62,6 @@ Sub2API 是一个 AI API 网关平台,用于分发和管理 AI 产品订阅(
|
||||
- 当请求包含 `function_call_output` 时,需要携带 `previous_response_id`,或在 `input` 中包含带 `call_id` 的 `tool_call`/`function_call`,或带非空 `id` 且与 `function_call_output.call_id` 匹配的 `item_reference`。
|
||||
- 若依赖上游历史记录,网关会强制 `store=true` 并需要复用 `previous_response_id`,以避免出现 “No tool call found for function call output” 错误。
|
||||
|
||||
---
|
||||
|
||||
## 部署方式
|
||||
|
||||
### 方式一:脚本安装(推荐)
|
||||
@@ -139,8 +137,6 @@ curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/install
|
||||
|
||||
使用 Docker Compose 部署,包含 PostgreSQL 和 Redis 容器。
|
||||
|
||||
如果你的服务器是 **Ubuntu 24.04**,建议直接参考:`deploy/ubuntu24-docker-compose-aicodex.md`,其中包含「安装最新版 Docker + docker-compose-aicodex.yml 部署」的完整步骤。
|
||||
|
||||
#### 前置条件
|
||||
|
||||
- Docker 20.10+
|
||||
@@ -246,6 +242,18 @@ docker-compose -f docker-compose.local.yml logs -f sub2api
|
||||
|
||||
**推荐:** 使用 `docker-compose.local.yml`(脚本部署)以便更轻松地管理数据。
|
||||
|
||||
#### 启用“数据管理”功能(datamanagementd)
|
||||
|
||||
如需启用管理后台“数据管理”,需要额外部署宿主机数据管理进程 `datamanagementd`。
|
||||
|
||||
关键点:
|
||||
|
||||
- 主进程固定探测:`/tmp/sub2api-datamanagement.sock`
|
||||
- 只有该 Socket 可连通时,数据管理功能才会开启
|
||||
- Docker 场景需将宿主机 Socket 挂载到容器同路径
|
||||
|
||||
详细部署步骤见:`deploy/DATAMANAGEMENTD_CN.md`
|
||||
|
||||
#### 访问
|
||||
|
||||
在浏览器中打开 `http://你的服务器IP:8080`
|
||||
|
||||
@@ -5,6 +5,7 @@ linters:
|
||||
enable:
|
||||
- depguard
|
||||
- errcheck
|
||||
- gosec
|
||||
- govet
|
||||
- ineffassign
|
||||
- staticcheck
|
||||
@@ -42,6 +43,22 @@ linters:
|
||||
desc: "handler must not import gorm"
|
||||
- pkg: github.com/redis/go-redis/v9
|
||||
desc: "handler must not import redis"
|
||||
gosec:
|
||||
excludes:
|
||||
- G101
|
||||
- G103
|
||||
- G104
|
||||
- G109
|
||||
- G115
|
||||
- G201
|
||||
- G202
|
||||
- G301
|
||||
- G302
|
||||
- G304
|
||||
- G306
|
||||
- G404
|
||||
severity: high
|
||||
confidence: high
|
||||
errcheck:
|
||||
# Report about not checking of errors in type assertions: `a := b.(MyStruct)`.
|
||||
# Such cases aren't reported by default.
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
{
|
||||
"global": {
|
||||
"exclude": "G704"
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,14 @@
|
||||
.PHONY: build test test-unit test-integration test-e2e
|
||||
.PHONY: build generate test test-unit test-integration test-e2e
|
||||
|
||||
VERSION ?= $(shell tr -d '\r\n' < ./cmd/server/VERSION)
|
||||
LDFLAGS ?= -s -w -X main.Version=$(VERSION)
|
||||
|
||||
build:
|
||||
go build -o bin/server ./cmd/server
|
||||
CGO_ENABLED=0 go build -ldflags="$(LDFLAGS)" -trimpath -o bin/server ./cmd/server
|
||||
|
||||
generate:
|
||||
go generate ./ent
|
||||
go generate ./cmd/server
|
||||
|
||||
test:
|
||||
go test ./...
|
||||
|
||||
@@ -33,7 +33,7 @@ func main() {
|
||||
}()
|
||||
|
||||
userRepo := repository.NewUserRepository(client, sqlDB)
|
||||
authService := service.NewAuthService(userRepo, nil, nil, cfg, nil, nil, nil, nil, nil)
|
||||
authService := service.NewAuthService(userRepo, nil, nil, cfg, nil, nil, nil, nil, nil, nil)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
@@ -1 +1 @@
|
||||
0.1.87.18
|
||||
0.1.90.1
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"context"
|
||||
"log"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Wei-Shaw/sub2api/ent"
|
||||
@@ -84,16 +85,19 @@ func provideCleanup(
|
||||
openaiOAuth *service.OpenAIOAuthService,
|
||||
geminiOAuth *service.GeminiOAuthService,
|
||||
antigravityOAuth *service.AntigravityOAuthService,
|
||||
openAIGateway *service.OpenAIGatewayService,
|
||||
) func() {
|
||||
return func() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Cleanup steps in reverse dependency order
|
||||
cleanupSteps := []struct {
|
||||
type cleanupStep struct {
|
||||
name string
|
||||
fn func() error
|
||||
}{
|
||||
}
|
||||
|
||||
// 应用层清理步骤可并行执行,基础设施资源(Redis/Ent)最后按顺序关闭。
|
||||
parallelSteps := []cleanupStep{
|
||||
{"OpsScheduledReportService", func() error {
|
||||
if opsScheduledReport != nil {
|
||||
opsScheduledReport.Stop()
|
||||
@@ -206,23 +210,60 @@ func provideCleanup(
|
||||
antigravityOAuth.Stop()
|
||||
return nil
|
||||
}},
|
||||
{"OpenAIWSPool", func() error {
|
||||
if openAIGateway != nil {
|
||||
openAIGateway.CloseOpenAIWSPool()
|
||||
}
|
||||
return nil
|
||||
}},
|
||||
}
|
||||
|
||||
infraSteps := []cleanupStep{
|
||||
{"Redis", func() error {
|
||||
if rdb == nil {
|
||||
return nil
|
||||
}
|
||||
return rdb.Close()
|
||||
}},
|
||||
{"Ent", func() error {
|
||||
if entClient == nil {
|
||||
return nil
|
||||
}
|
||||
return entClient.Close()
|
||||
}},
|
||||
}
|
||||
|
||||
for _, step := range cleanupSteps {
|
||||
if err := step.fn(); err != nil {
|
||||
log.Printf("[Cleanup] %s failed: %v", step.name, err)
|
||||
// Continue with remaining cleanup steps even if one fails
|
||||
} else {
|
||||
runParallel := func(steps []cleanupStep) {
|
||||
var wg sync.WaitGroup
|
||||
for i := range steps {
|
||||
step := steps[i]
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := step.fn(); err != nil {
|
||||
log.Printf("[Cleanup] %s failed: %v", step.name, err)
|
||||
return
|
||||
}
|
||||
log.Printf("[Cleanup] %s succeeded", step.name)
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
runSequential := func(steps []cleanupStep) {
|
||||
for i := range steps {
|
||||
step := steps[i]
|
||||
if err := step.fn(); err != nil {
|
||||
log.Printf("[Cleanup] %s failed: %v", step.name, err)
|
||||
continue
|
||||
}
|
||||
log.Printf("[Cleanup] %s succeeded", step.name)
|
||||
}
|
||||
}
|
||||
|
||||
runParallel(parallelSteps)
|
||||
runSequential(infraSteps)
|
||||
|
||||
// Check if context timed out
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/redis/go-redis/v9"
|
||||
"log"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -47,7 +48,8 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
||||
redisClient := repository.ProvideRedis(configConfig)
|
||||
refreshTokenCache := repository.NewRefreshTokenCache(redisClient)
|
||||
settingRepository := repository.NewSettingRepository(client)
|
||||
settingService := service.NewSettingService(settingRepository, configConfig)
|
||||
groupRepository := repository.NewGroupRepository(client, db)
|
||||
settingService := service.ProvideSettingService(settingRepository, groupRepository, configConfig)
|
||||
emailCache := repository.NewEmailCache(redisClient)
|
||||
emailService := service.NewEmailService(settingRepository, emailCache)
|
||||
turnstileVerifier := repository.NewTurnstileVerifier()
|
||||
@@ -56,17 +58,17 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
||||
promoCodeRepository := repository.NewPromoCodeRepository(client)
|
||||
billingCache := repository.NewBillingCache(redisClient)
|
||||
userSubscriptionRepository := repository.NewUserSubscriptionRepository(client)
|
||||
billingCacheService := service.NewBillingCacheService(billingCache, userRepository, userSubscriptionRepository, configConfig)
|
||||
apiKeyRepository := repository.NewAPIKeyRepository(client)
|
||||
groupRepository := repository.NewGroupRepository(client, db)
|
||||
apiKeyRepository := repository.NewAPIKeyRepository(client, db)
|
||||
billingCacheService := service.NewBillingCacheService(billingCache, userRepository, userSubscriptionRepository, apiKeyRepository, configConfig)
|
||||
userGroupRateRepository := repository.NewUserGroupRateRepository(db)
|
||||
apiKeyCache := repository.NewAPIKeyCache(redisClient)
|
||||
apiKeyService := service.NewAPIKeyService(apiKeyRepository, userRepository, groupRepository, userSubscriptionRepository, userGroupRateRepository, apiKeyCache, configConfig)
|
||||
apiKeyService.SetRateLimitCacheInvalidator(billingCache)
|
||||
apiKeyAuthCacheInvalidator := service.ProvideAPIKeyAuthCacheInvalidator(apiKeyService)
|
||||
promoService := service.NewPromoService(promoCodeRepository, userRepository, billingCacheService, client, apiKeyAuthCacheInvalidator)
|
||||
authService := service.NewAuthService(userRepository, redeemCodeRepository, refreshTokenCache, configConfig, settingService, emailService, turnstileService, emailQueueService, promoService)
|
||||
userService := service.NewUserService(userRepository, apiKeyAuthCacheInvalidator, billingCache)
|
||||
subscriptionService := service.NewSubscriptionService(groupRepository, userSubscriptionRepository, billingCacheService, client, configConfig)
|
||||
authService := service.NewAuthService(userRepository, redeemCodeRepository, refreshTokenCache, configConfig, settingService, emailService, turnstileService, emailQueueService, promoService, subscriptionService)
|
||||
userService := service.NewUserService(userRepository, apiKeyAuthCacheInvalidator, billingCache)
|
||||
redeemCache := repository.NewRedeemCache(redisClient)
|
||||
redeemService := service.NewRedeemService(redeemCodeRepository, userRepository, subscriptionService, redeemCache, billingCacheService, client, apiKeyAuthCacheInvalidator)
|
||||
secretEncryptor, err := repository.NewAESEncryptor(configConfig)
|
||||
@@ -102,7 +104,7 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
||||
proxyRepository := repository.NewProxyRepository(client, db)
|
||||
proxyExitInfoProber := repository.NewProxyExitInfoProber(configConfig)
|
||||
proxyLatencyCache := repository.NewProxyLatencyCache(redisClient)
|
||||
adminService := service.NewAdminService(userRepository, groupRepository, accountRepository, soraAccountRepository, proxyRepository, apiKeyRepository, redeemCodeRepository, userGroupRateRepository, billingCacheService, proxyExitInfoProber, proxyLatencyCache, apiKeyAuthCacheInvalidator)
|
||||
adminService := service.NewAdminService(userRepository, groupRepository, accountRepository, soraAccountRepository, proxyRepository, apiKeyRepository, redeemCodeRepository, userGroupRateRepository, billingCacheService, proxyExitInfoProber, proxyLatencyCache, apiKeyAuthCacheInvalidator, client, settingService, subscriptionService)
|
||||
concurrencyCache := repository.ProvideConcurrencyCache(redisClient, configConfig)
|
||||
concurrencyService := service.ProvideConcurrencyService(concurrencyCache, accountRepository, configConfig)
|
||||
adminUserHandler := admin.NewUserHandler(adminService, concurrencyService)
|
||||
@@ -137,8 +139,11 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
||||
accountTestService := service.NewAccountTestService(accountRepository, geminiTokenProvider, antigravityGatewayService, httpUpstream, configConfig)
|
||||
crsSyncService := service.NewCRSSyncService(accountRepository, proxyRepository, oAuthService, openAIOAuthService, geminiOAuthService, configConfig)
|
||||
sessionLimitCache := repository.ProvideSessionLimitCache(redisClient, configConfig)
|
||||
accountHandler := admin.NewAccountHandler(adminService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, rateLimitService, accountUsageService, accountTestService, concurrencyService, crsSyncService, sessionLimitCache, compositeTokenCacheInvalidator)
|
||||
rpmCache := repository.NewRPMCache(redisClient)
|
||||
accountHandler := admin.NewAccountHandler(adminService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, rateLimitService, accountUsageService, accountTestService, concurrencyService, crsSyncService, sessionLimitCache, rpmCache, compositeTokenCacheInvalidator)
|
||||
adminAnnouncementHandler := admin.NewAnnouncementHandler(announcementService)
|
||||
dataManagementService := service.NewDataManagementService()
|
||||
dataManagementHandler := admin.NewDataManagementHandler(dataManagementService)
|
||||
oAuthHandler := admin.NewOAuthHandler(oAuthService)
|
||||
openAIOAuthHandler := admin.NewOpenAIOAuthHandler(openAIOAuthService, adminService)
|
||||
geminiOAuthHandler := admin.NewGeminiOAuthHandler(geminiOAuthService)
|
||||
@@ -157,13 +162,18 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
||||
deferredService := service.ProvideDeferredService(accountRepository, timingWheelService)
|
||||
claudeTokenProvider := service.NewClaudeTokenProvider(accountRepository, geminiTokenCache, oAuthService)
|
||||
digestSessionStore := service.NewDigestSessionStore()
|
||||
gatewayService := service.NewGatewayService(accountRepository, groupRepository, usageLogRepository, userRepository, userSubscriptionRepository, userGroupRateRepository, gatewayCache, configConfig, schedulerSnapshotService, concurrencyService, billingService, rateLimitService, billingCacheService, identityService, httpUpstream, deferredService, claudeTokenProvider, sessionLimitCache, digestSessionStore)
|
||||
gatewayService := service.NewGatewayService(accountRepository, groupRepository, usageLogRepository, userRepository, userSubscriptionRepository, userGroupRateRepository, gatewayCache, configConfig, schedulerSnapshotService, concurrencyService, billingService, rateLimitService, billingCacheService, identityService, httpUpstream, deferredService, claudeTokenProvider, sessionLimitCache, rpmCache, digestSessionStore)
|
||||
openAITokenProvider := service.NewOpenAITokenProvider(accountRepository, geminiTokenCache, openAIOAuthService)
|
||||
openAIGatewayService := service.NewOpenAIGatewayService(accountRepository, usageLogRepository, userRepository, userSubscriptionRepository, gatewayCache, configConfig, schedulerSnapshotService, concurrencyService, billingService, rateLimitService, billingCacheService, httpUpstream, deferredService, openAITokenProvider)
|
||||
geminiMessagesCompatService := service.NewGeminiMessagesCompatService(accountRepository, groupRepository, gatewayCache, schedulerSnapshotService, geminiTokenProvider, rateLimitService, httpUpstream, antigravityGatewayService, configConfig)
|
||||
opsSystemLogSink := service.ProvideOpsSystemLogSink(opsRepository)
|
||||
opsService := service.NewOpsService(opsRepository, settingRepository, configConfig, accountRepository, userRepository, concurrencyService, gatewayService, openAIGatewayService, geminiMessagesCompatService, antigravityGatewayService, opsSystemLogSink)
|
||||
settingHandler := admin.NewSettingHandler(settingService, emailService, turnstileService, opsService)
|
||||
soraS3Storage := service.NewSoraS3Storage(settingService)
|
||||
settingService.SetOnS3UpdateCallback(soraS3Storage.RefreshClient)
|
||||
soraGenerationRepository := repository.NewSoraGenerationRepository(db)
|
||||
soraQuotaService := service.NewSoraQuotaService(userRepository, groupRepository, settingService)
|
||||
soraGenerationService := service.NewSoraGenerationService(soraGenerationRepository, soraS3Storage, soraQuotaService)
|
||||
settingHandler := admin.NewSettingHandler(settingService, emailService, turnstileService, opsService, soraS3Storage)
|
||||
opsHandler := admin.NewOpsHandler(opsService)
|
||||
updateCache := repository.NewUpdateCache(redisClient)
|
||||
gitHubReleaseClient := repository.ProvideGitHubReleaseClient(configConfig)
|
||||
@@ -184,19 +194,23 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
||||
errorPassthroughCache := repository.NewErrorPassthroughCache(redisClient)
|
||||
errorPassthroughService := service.NewErrorPassthroughService(errorPassthroughRepository, errorPassthroughCache)
|
||||
errorPassthroughHandler := admin.NewErrorPassthroughHandler(errorPassthroughService)
|
||||
adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, adminAnnouncementHandler, oAuthHandler, openAIOAuthHandler, geminiOAuthHandler, antigravityOAuthHandler, proxyHandler, adminRedeemHandler, promoHandler, settingHandler, opsHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler, userAttributeHandler, errorPassthroughHandler)
|
||||
adminAPIKeyHandler := admin.NewAdminAPIKeyHandler(adminService)
|
||||
adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, adminAnnouncementHandler, dataManagementHandler, oAuthHandler, openAIOAuthHandler, geminiOAuthHandler, antigravityOAuthHandler, proxyHandler, adminRedeemHandler, promoHandler, settingHandler, opsHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler, userAttributeHandler, errorPassthroughHandler, adminAPIKeyHandler)
|
||||
usageRecordWorkerPool := service.NewUsageRecordWorkerPool(configConfig)
|
||||
gatewayHandler := handler.NewGatewayHandler(gatewayService, geminiMessagesCompatService, antigravityGatewayService, userService, concurrencyService, billingCacheService, usageService, apiKeyService, usageRecordWorkerPool, errorPassthroughService, configConfig)
|
||||
userMsgQueueCache := repository.NewUserMsgQueueCache(redisClient)
|
||||
userMessageQueueService := service.ProvideUserMessageQueueService(userMsgQueueCache, rpmCache, configConfig)
|
||||
gatewayHandler := handler.NewGatewayHandler(gatewayService, geminiMessagesCompatService, antigravityGatewayService, userService, concurrencyService, billingCacheService, usageService, apiKeyService, usageRecordWorkerPool, errorPassthroughService, userMessageQueueService, configConfig, settingService)
|
||||
openAIGatewayHandler := handler.NewOpenAIGatewayHandler(openAIGatewayService, concurrencyService, billingCacheService, apiKeyService, usageRecordWorkerPool, errorPassthroughService, configConfig)
|
||||
soraSDKClient := service.ProvideSoraSDKClient(configConfig, httpUpstream, openAITokenProvider, accountRepository, soraAccountRepository)
|
||||
soraMediaStorage := service.ProvideSoraMediaStorage(configConfig)
|
||||
soraGatewayService := service.NewSoraGatewayService(soraSDKClient, soraMediaStorage, rateLimitService, configConfig)
|
||||
soraGatewayService := service.NewSoraGatewayService(soraSDKClient, rateLimitService, httpUpstream, configConfig)
|
||||
soraClientHandler := handler.NewSoraClientHandler(soraGenerationService, soraQuotaService, soraS3Storage, soraGatewayService, gatewayService, soraMediaStorage, apiKeyService)
|
||||
soraGatewayHandler := handler.NewSoraGatewayHandler(gatewayService, soraGatewayService, concurrencyService, billingCacheService, usageRecordWorkerPool, configConfig)
|
||||
handlerSettingHandler := handler.ProvideSettingHandler(settingService, buildInfo)
|
||||
totpHandler := handler.NewTotpHandler(totpService)
|
||||
idempotencyCoordinator := service.ProvideIdempotencyCoordinator(idempotencyRepository, configConfig)
|
||||
idempotencyCleanupService := service.ProvideIdempotencyCleanupService(idempotencyRepository, configConfig)
|
||||
handlers := handler.ProvideHandlers(authHandler, userHandler, apiKeyHandler, usageHandler, redeemHandler, subscriptionHandler, announcementHandler, adminHandlers, gatewayHandler, openAIGatewayHandler, soraGatewayHandler, handlerSettingHandler, totpHandler, idempotencyCoordinator, idempotencyCleanupService)
|
||||
handlers := handler.ProvideHandlers(authHandler, userHandler, apiKeyHandler, usageHandler, redeemHandler, subscriptionHandler, announcementHandler, adminHandlers, gatewayHandler, openAIGatewayHandler, soraGatewayHandler, soraClientHandler, handlerSettingHandler, totpHandler, idempotencyCoordinator, idempotencyCleanupService)
|
||||
jwtAuthMiddleware := middleware.NewJWTAuthMiddleware(authService, userService)
|
||||
adminAuthMiddleware := middleware.NewAdminAuthMiddleware(authService, userService, settingService)
|
||||
apiKeyAuthMiddleware := middleware.NewAPIKeyAuthMiddleware(apiKeyService, subscriptionService, configConfig)
|
||||
@@ -208,10 +222,10 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
||||
opsCleanupService := service.ProvideOpsCleanupService(opsRepository, db, redisClient, configConfig)
|
||||
opsScheduledReportService := service.ProvideOpsScheduledReportService(opsService, userService, emailService, redisClient, configConfig)
|
||||
soraMediaCleanupService := service.ProvideSoraMediaCleanupService(soraMediaStorage, configConfig)
|
||||
tokenRefreshService := service.ProvideTokenRefreshService(accountRepository, soraAccountRepository, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, compositeTokenCacheInvalidator, schedulerCache, configConfig)
|
||||
tokenRefreshService := service.ProvideTokenRefreshService(accountRepository, soraAccountRepository, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, compositeTokenCacheInvalidator, schedulerCache, configConfig, tempUnschedCache)
|
||||
accountExpiryService := service.ProvideAccountExpiryService(accountRepository)
|
||||
subscriptionExpiryService := service.ProvideSubscriptionExpiryService(userSubscriptionRepository)
|
||||
v := provideCleanup(client, redisClient, opsMetricsCollector, opsAggregationService, opsAlertEvaluatorService, opsCleanupService, opsScheduledReportService, opsSystemLogSink, soraMediaCleanupService, schedulerSnapshotService, tokenRefreshService, accountExpiryService, subscriptionExpiryService, usageCleanupService, idempotencyCleanupService, pricingService, emailQueueService, billingCacheService, usageRecordWorkerPool, subscriptionService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService)
|
||||
v := provideCleanup(client, redisClient, opsMetricsCollector, opsAggregationService, opsAlertEvaluatorService, opsCleanupService, opsScheduledReportService, opsSystemLogSink, soraMediaCleanupService, schedulerSnapshotService, tokenRefreshService, accountExpiryService, subscriptionExpiryService, usageCleanupService, idempotencyCleanupService, pricingService, emailQueueService, billingCacheService, usageRecordWorkerPool, subscriptionService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, openAIGatewayService)
|
||||
application := &Application{
|
||||
Server: httpServer,
|
||||
Cleanup: v,
|
||||
@@ -258,15 +272,18 @@ func provideCleanup(
|
||||
openaiOAuth *service.OpenAIOAuthService,
|
||||
geminiOAuth *service.GeminiOAuthService,
|
||||
antigravityOAuth *service.AntigravityOAuthService,
|
||||
openAIGateway *service.OpenAIGatewayService,
|
||||
) func() {
|
||||
return func() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
cleanupSteps := []struct {
|
||||
type cleanupStep struct {
|
||||
name string
|
||||
fn func() error
|
||||
}{
|
||||
}
|
||||
|
||||
parallelSteps := []cleanupStep{
|
||||
{"OpsScheduledReportService", func() error {
|
||||
if opsScheduledReport != nil {
|
||||
opsScheduledReport.Stop()
|
||||
@@ -379,23 +396,60 @@ func provideCleanup(
|
||||
antigravityOAuth.Stop()
|
||||
return nil
|
||||
}},
|
||||
{"OpenAIWSPool", func() error {
|
||||
if openAIGateway != nil {
|
||||
openAIGateway.CloseOpenAIWSPool()
|
||||
}
|
||||
return nil
|
||||
}},
|
||||
}
|
||||
|
||||
infraSteps := []cleanupStep{
|
||||
{"Redis", func() error {
|
||||
if rdb == nil {
|
||||
return nil
|
||||
}
|
||||
return rdb.Close()
|
||||
}},
|
||||
{"Ent", func() error {
|
||||
if entClient == nil {
|
||||
return nil
|
||||
}
|
||||
return entClient.Close()
|
||||
}},
|
||||
}
|
||||
|
||||
for _, step := range cleanupSteps {
|
||||
if err := step.fn(); err != nil {
|
||||
log.Printf("[Cleanup] %s failed: %v", step.name, err)
|
||||
runParallel := func(steps []cleanupStep) {
|
||||
var wg sync.WaitGroup
|
||||
for i := range steps {
|
||||
step := steps[i]
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := step.fn(); err != nil {
|
||||
log.Printf("[Cleanup] %s failed: %v", step.name, err)
|
||||
return
|
||||
}
|
||||
log.Printf("[Cleanup] %s succeeded", step.name)
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
} else {
|
||||
runSequential := func(steps []cleanupStep) {
|
||||
for i := range steps {
|
||||
step := steps[i]
|
||||
if err := step.fn(); err != nil {
|
||||
log.Printf("[Cleanup] %s failed: %v", step.name, err)
|
||||
continue
|
||||
}
|
||||
log.Printf("[Cleanup] %s succeeded", step.name)
|
||||
}
|
||||
}
|
||||
|
||||
runParallel(parallelSteps)
|
||||
runSequential(infraSteps)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Printf("[Cleanup] Warning: cleanup timed out after 10 seconds")
|
||||
|
||||
82
backend/cmd/server/wire_gen_test.go
Normal file
82
backend/cmd/server/wire_gen_test.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Wei-Shaw/sub2api/internal/config"
|
||||
"github.com/Wei-Shaw/sub2api/internal/handler"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestProvideServiceBuildInfo(t *testing.T) {
|
||||
in := handler.BuildInfo{
|
||||
Version: "v-test",
|
||||
BuildType: "release",
|
||||
}
|
||||
out := provideServiceBuildInfo(in)
|
||||
require.Equal(t, in.Version, out.Version)
|
||||
require.Equal(t, in.BuildType, out.BuildType)
|
||||
}
|
||||
|
||||
func TestProvideCleanup_WithMinimalDependencies_NoPanic(t *testing.T) {
|
||||
cfg := &config.Config{}
|
||||
|
||||
oauthSvc := service.NewOAuthService(nil, nil)
|
||||
openAIOAuthSvc := service.NewOpenAIOAuthService(nil, nil)
|
||||
geminiOAuthSvc := service.NewGeminiOAuthService(nil, nil, nil, nil, cfg)
|
||||
antigravityOAuthSvc := service.NewAntigravityOAuthService(nil)
|
||||
|
||||
tokenRefreshSvc := service.NewTokenRefreshService(
|
||||
nil,
|
||||
oauthSvc,
|
||||
openAIOAuthSvc,
|
||||
geminiOAuthSvc,
|
||||
antigravityOAuthSvc,
|
||||
nil,
|
||||
nil,
|
||||
cfg,
|
||||
nil,
|
||||
)
|
||||
accountExpirySvc := service.NewAccountExpiryService(nil, time.Second)
|
||||
subscriptionExpirySvc := service.NewSubscriptionExpiryService(nil, time.Second)
|
||||
pricingSvc := service.NewPricingService(cfg, nil)
|
||||
emailQueueSvc := service.NewEmailQueueService(nil, 1)
|
||||
billingCacheSvc := service.NewBillingCacheService(nil, nil, nil, nil, cfg)
|
||||
idempotencyCleanupSvc := service.NewIdempotencyCleanupService(nil, cfg)
|
||||
schedulerSnapshotSvc := service.NewSchedulerSnapshotService(nil, nil, nil, nil, cfg)
|
||||
opsSystemLogSinkSvc := service.NewOpsSystemLogSink(nil)
|
||||
|
||||
cleanup := provideCleanup(
|
||||
nil, // entClient
|
||||
nil, // redis
|
||||
&service.OpsMetricsCollector{},
|
||||
&service.OpsAggregationService{},
|
||||
&service.OpsAlertEvaluatorService{},
|
||||
&service.OpsCleanupService{},
|
||||
&service.OpsScheduledReportService{},
|
||||
opsSystemLogSinkSvc,
|
||||
&service.SoraMediaCleanupService{},
|
||||
schedulerSnapshotSvc,
|
||||
tokenRefreshSvc,
|
||||
accountExpirySvc,
|
||||
subscriptionExpirySvc,
|
||||
&service.UsageCleanupService{},
|
||||
idempotencyCleanupSvc,
|
||||
pricingSvc,
|
||||
emailQueueSvc,
|
||||
billingCacheSvc,
|
||||
&service.UsageRecordWorkerPool{},
|
||||
&service.SubscriptionService{},
|
||||
oauthSvc,
|
||||
openAIOAuthSvc,
|
||||
geminiOAuthSvc,
|
||||
antigravityOAuthSvc,
|
||||
nil, // openAIGateway
|
||||
)
|
||||
|
||||
require.NotPanics(t, func() {
|
||||
cleanup()
|
||||
})
|
||||
}
|
||||
@@ -63,6 +63,10 @@ type Account struct {
|
||||
RateLimitResetAt *time.Time `json:"rate_limit_reset_at,omitempty"`
|
||||
// OverloadUntil holds the value of the "overload_until" field.
|
||||
OverloadUntil *time.Time `json:"overload_until,omitempty"`
|
||||
// TempUnschedulableUntil holds the value of the "temp_unschedulable_until" field.
|
||||
TempUnschedulableUntil *time.Time `json:"temp_unschedulable_until,omitempty"`
|
||||
// TempUnschedulableReason holds the value of the "temp_unschedulable_reason" field.
|
||||
TempUnschedulableReason *string `json:"temp_unschedulable_reason,omitempty"`
|
||||
// SessionWindowStart holds the value of the "session_window_start" field.
|
||||
SessionWindowStart *time.Time `json:"session_window_start,omitempty"`
|
||||
// SessionWindowEnd holds the value of the "session_window_end" field.
|
||||
@@ -141,9 +145,9 @@ func (*Account) scanValues(columns []string) ([]any, error) {
|
||||
values[i] = new(sql.NullFloat64)
|
||||
case account.FieldID, account.FieldProxyID, account.FieldConcurrency, account.FieldPriority:
|
||||
values[i] = new(sql.NullInt64)
|
||||
case account.FieldName, account.FieldNotes, account.FieldPlatform, account.FieldType, account.FieldStatus, account.FieldErrorMessage, account.FieldSessionWindowStatus:
|
||||
case account.FieldName, account.FieldNotes, account.FieldPlatform, account.FieldType, account.FieldStatus, account.FieldErrorMessage, account.FieldTempUnschedulableReason, account.FieldSessionWindowStatus:
|
||||
values[i] = new(sql.NullString)
|
||||
case account.FieldCreatedAt, account.FieldUpdatedAt, account.FieldDeletedAt, account.FieldLastUsedAt, account.FieldExpiresAt, account.FieldRateLimitedAt, account.FieldRateLimitResetAt, account.FieldOverloadUntil, account.FieldSessionWindowStart, account.FieldSessionWindowEnd:
|
||||
case account.FieldCreatedAt, account.FieldUpdatedAt, account.FieldDeletedAt, account.FieldLastUsedAt, account.FieldExpiresAt, account.FieldRateLimitedAt, account.FieldRateLimitResetAt, account.FieldOverloadUntil, account.FieldTempUnschedulableUntil, account.FieldSessionWindowStart, account.FieldSessionWindowEnd:
|
||||
values[i] = new(sql.NullTime)
|
||||
default:
|
||||
values[i] = new(sql.UnknownType)
|
||||
@@ -311,6 +315,20 @@ func (_m *Account) assignValues(columns []string, values []any) error {
|
||||
_m.OverloadUntil = new(time.Time)
|
||||
*_m.OverloadUntil = value.Time
|
||||
}
|
||||
case account.FieldTempUnschedulableUntil:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field temp_unschedulable_until", values[i])
|
||||
} else if value.Valid {
|
||||
_m.TempUnschedulableUntil = new(time.Time)
|
||||
*_m.TempUnschedulableUntil = value.Time
|
||||
}
|
||||
case account.FieldTempUnschedulableReason:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field temp_unschedulable_reason", values[i])
|
||||
} else if value.Valid {
|
||||
_m.TempUnschedulableReason = new(string)
|
||||
*_m.TempUnschedulableReason = value.String
|
||||
}
|
||||
case account.FieldSessionWindowStart:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field session_window_start", values[i])
|
||||
@@ -472,6 +490,16 @@ func (_m *Account) String() string {
|
||||
builder.WriteString(v.Format(time.ANSIC))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
if v := _m.TempUnschedulableUntil; v != nil {
|
||||
builder.WriteString("temp_unschedulable_until=")
|
||||
builder.WriteString(v.Format(time.ANSIC))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
if v := _m.TempUnschedulableReason; v != nil {
|
||||
builder.WriteString("temp_unschedulable_reason=")
|
||||
builder.WriteString(*v)
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
if v := _m.SessionWindowStart; v != nil {
|
||||
builder.WriteString("session_window_start=")
|
||||
builder.WriteString(v.Format(time.ANSIC))
|
||||
|
||||
@@ -59,6 +59,10 @@ const (
|
||||
FieldRateLimitResetAt = "rate_limit_reset_at"
|
||||
// FieldOverloadUntil holds the string denoting the overload_until field in the database.
|
||||
FieldOverloadUntil = "overload_until"
|
||||
// FieldTempUnschedulableUntil holds the string denoting the temp_unschedulable_until field in the database.
|
||||
FieldTempUnschedulableUntil = "temp_unschedulable_until"
|
||||
// FieldTempUnschedulableReason holds the string denoting the temp_unschedulable_reason field in the database.
|
||||
FieldTempUnschedulableReason = "temp_unschedulable_reason"
|
||||
// FieldSessionWindowStart holds the string denoting the session_window_start field in the database.
|
||||
FieldSessionWindowStart = "session_window_start"
|
||||
// FieldSessionWindowEnd holds the string denoting the session_window_end field in the database.
|
||||
@@ -128,6 +132,8 @@ var Columns = []string{
|
||||
FieldRateLimitedAt,
|
||||
FieldRateLimitResetAt,
|
||||
FieldOverloadUntil,
|
||||
FieldTempUnschedulableUntil,
|
||||
FieldTempUnschedulableReason,
|
||||
FieldSessionWindowStart,
|
||||
FieldSessionWindowEnd,
|
||||
FieldSessionWindowStatus,
|
||||
@@ -299,6 +305,16 @@ func ByOverloadUntil(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldOverloadUntil, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByTempUnschedulableUntil orders the results by the temp_unschedulable_until field.
|
||||
func ByTempUnschedulableUntil(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldTempUnschedulableUntil, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByTempUnschedulableReason orders the results by the temp_unschedulable_reason field.
|
||||
func ByTempUnschedulableReason(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldTempUnschedulableReason, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// BySessionWindowStart orders the results by the session_window_start field.
|
||||
func BySessionWindowStart(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSessionWindowStart, opts...).ToFunc()
|
||||
|
||||
@@ -155,6 +155,16 @@ func OverloadUntil(v time.Time) predicate.Account {
|
||||
return predicate.Account(sql.FieldEQ(FieldOverloadUntil, v))
|
||||
}
|
||||
|
||||
// TempUnschedulableUntil applies equality check predicate on the "temp_unschedulable_until" field. It's identical to TempUnschedulableUntilEQ.
|
||||
func TempUnschedulableUntil(v time.Time) predicate.Account {
|
||||
return predicate.Account(sql.FieldEQ(FieldTempUnschedulableUntil, v))
|
||||
}
|
||||
|
||||
// TempUnschedulableReason applies equality check predicate on the "temp_unschedulable_reason" field. It's identical to TempUnschedulableReasonEQ.
|
||||
func TempUnschedulableReason(v string) predicate.Account {
|
||||
return predicate.Account(sql.FieldEQ(FieldTempUnschedulableReason, v))
|
||||
}
|
||||
|
||||
// SessionWindowStart applies equality check predicate on the "session_window_start" field. It's identical to SessionWindowStartEQ.
|
||||
func SessionWindowStart(v time.Time) predicate.Account {
|
||||
return predicate.Account(sql.FieldEQ(FieldSessionWindowStart, v))
|
||||
@@ -1130,6 +1140,131 @@ func OverloadUntilNotNil() predicate.Account {
|
||||
return predicate.Account(sql.FieldNotNull(FieldOverloadUntil))
|
||||
}
|
||||
|
||||
// TempUnschedulableUntilEQ applies the EQ predicate on the "temp_unschedulable_until" field.
|
||||
func TempUnschedulableUntilEQ(v time.Time) predicate.Account {
|
||||
return predicate.Account(sql.FieldEQ(FieldTempUnschedulableUntil, v))
|
||||
}
|
||||
|
||||
// TempUnschedulableUntilNEQ applies the NEQ predicate on the "temp_unschedulable_until" field.
|
||||
func TempUnschedulableUntilNEQ(v time.Time) predicate.Account {
|
||||
return predicate.Account(sql.FieldNEQ(FieldTempUnschedulableUntil, v))
|
||||
}
|
||||
|
||||
// TempUnschedulableUntilIn applies the In predicate on the "temp_unschedulable_until" field.
|
||||
func TempUnschedulableUntilIn(vs ...time.Time) predicate.Account {
|
||||
return predicate.Account(sql.FieldIn(FieldTempUnschedulableUntil, vs...))
|
||||
}
|
||||
|
||||
// TempUnschedulableUntilNotIn applies the NotIn predicate on the "temp_unschedulable_until" field.
|
||||
func TempUnschedulableUntilNotIn(vs ...time.Time) predicate.Account {
|
||||
return predicate.Account(sql.FieldNotIn(FieldTempUnschedulableUntil, vs...))
|
||||
}
|
||||
|
||||
// TempUnschedulableUntilGT applies the GT predicate on the "temp_unschedulable_until" field.
|
||||
func TempUnschedulableUntilGT(v time.Time) predicate.Account {
|
||||
return predicate.Account(sql.FieldGT(FieldTempUnschedulableUntil, v))
|
||||
}
|
||||
|
||||
// TempUnschedulableUntilGTE applies the GTE predicate on the "temp_unschedulable_until" field.
|
||||
func TempUnschedulableUntilGTE(v time.Time) predicate.Account {
|
||||
return predicate.Account(sql.FieldGTE(FieldTempUnschedulableUntil, v))
|
||||
}
|
||||
|
||||
// TempUnschedulableUntilLT applies the LT predicate on the "temp_unschedulable_until" field.
|
||||
func TempUnschedulableUntilLT(v time.Time) predicate.Account {
|
||||
return predicate.Account(sql.FieldLT(FieldTempUnschedulableUntil, v))
|
||||
}
|
||||
|
||||
// TempUnschedulableUntilLTE applies the LTE predicate on the "temp_unschedulable_until" field.
|
||||
func TempUnschedulableUntilLTE(v time.Time) predicate.Account {
|
||||
return predicate.Account(sql.FieldLTE(FieldTempUnschedulableUntil, v))
|
||||
}
|
||||
|
||||
// TempUnschedulableUntilIsNil applies the IsNil predicate on the "temp_unschedulable_until" field.
|
||||
func TempUnschedulableUntilIsNil() predicate.Account {
|
||||
return predicate.Account(sql.FieldIsNull(FieldTempUnschedulableUntil))
|
||||
}
|
||||
|
||||
// TempUnschedulableUntilNotNil applies the NotNil predicate on the "temp_unschedulable_until" field.
|
||||
func TempUnschedulableUntilNotNil() predicate.Account {
|
||||
return predicate.Account(sql.FieldNotNull(FieldTempUnschedulableUntil))
|
||||
}
|
||||
|
||||
// TempUnschedulableReasonEQ applies the EQ predicate on the "temp_unschedulable_reason" field.
|
||||
func TempUnschedulableReasonEQ(v string) predicate.Account {
|
||||
return predicate.Account(sql.FieldEQ(FieldTempUnschedulableReason, v))
|
||||
}
|
||||
|
||||
// TempUnschedulableReasonNEQ applies the NEQ predicate on the "temp_unschedulable_reason" field.
|
||||
func TempUnschedulableReasonNEQ(v string) predicate.Account {
|
||||
return predicate.Account(sql.FieldNEQ(FieldTempUnschedulableReason, v))
|
||||
}
|
||||
|
||||
// TempUnschedulableReasonIn applies the In predicate on the "temp_unschedulable_reason" field.
|
||||
func TempUnschedulableReasonIn(vs ...string) predicate.Account {
|
||||
return predicate.Account(sql.FieldIn(FieldTempUnschedulableReason, vs...))
|
||||
}
|
||||
|
||||
// TempUnschedulableReasonNotIn applies the NotIn predicate on the "temp_unschedulable_reason" field.
|
||||
func TempUnschedulableReasonNotIn(vs ...string) predicate.Account {
|
||||
return predicate.Account(sql.FieldNotIn(FieldTempUnschedulableReason, vs...))
|
||||
}
|
||||
|
||||
// TempUnschedulableReasonGT applies the GT predicate on the "temp_unschedulable_reason" field.
|
||||
func TempUnschedulableReasonGT(v string) predicate.Account {
|
||||
return predicate.Account(sql.FieldGT(FieldTempUnschedulableReason, v))
|
||||
}
|
||||
|
||||
// TempUnschedulableReasonGTE applies the GTE predicate on the "temp_unschedulable_reason" field.
|
||||
func TempUnschedulableReasonGTE(v string) predicate.Account {
|
||||
return predicate.Account(sql.FieldGTE(FieldTempUnschedulableReason, v))
|
||||
}
|
||||
|
||||
// TempUnschedulableReasonLT applies the LT predicate on the "temp_unschedulable_reason" field.
|
||||
func TempUnschedulableReasonLT(v string) predicate.Account {
|
||||
return predicate.Account(sql.FieldLT(FieldTempUnschedulableReason, v))
|
||||
}
|
||||
|
||||
// TempUnschedulableReasonLTE applies the LTE predicate on the "temp_unschedulable_reason" field.
|
||||
func TempUnschedulableReasonLTE(v string) predicate.Account {
|
||||
return predicate.Account(sql.FieldLTE(FieldTempUnschedulableReason, v))
|
||||
}
|
||||
|
||||
// TempUnschedulableReasonContains applies the Contains predicate on the "temp_unschedulable_reason" field.
|
||||
func TempUnschedulableReasonContains(v string) predicate.Account {
|
||||
return predicate.Account(sql.FieldContains(FieldTempUnschedulableReason, v))
|
||||
}
|
||||
|
||||
// TempUnschedulableReasonHasPrefix applies the HasPrefix predicate on the "temp_unschedulable_reason" field.
|
||||
func TempUnschedulableReasonHasPrefix(v string) predicate.Account {
|
||||
return predicate.Account(sql.FieldHasPrefix(FieldTempUnschedulableReason, v))
|
||||
}
|
||||
|
||||
// TempUnschedulableReasonHasSuffix applies the HasSuffix predicate on the "temp_unschedulable_reason" field.
|
||||
func TempUnschedulableReasonHasSuffix(v string) predicate.Account {
|
||||
return predicate.Account(sql.FieldHasSuffix(FieldTempUnschedulableReason, v))
|
||||
}
|
||||
|
||||
// TempUnschedulableReasonIsNil applies the IsNil predicate on the "temp_unschedulable_reason" field.
|
||||
func TempUnschedulableReasonIsNil() predicate.Account {
|
||||
return predicate.Account(sql.FieldIsNull(FieldTempUnschedulableReason))
|
||||
}
|
||||
|
||||
// TempUnschedulableReasonNotNil applies the NotNil predicate on the "temp_unschedulable_reason" field.
|
||||
func TempUnschedulableReasonNotNil() predicate.Account {
|
||||
return predicate.Account(sql.FieldNotNull(FieldTempUnschedulableReason))
|
||||
}
|
||||
|
||||
// TempUnschedulableReasonEqualFold applies the EqualFold predicate on the "temp_unschedulable_reason" field.
|
||||
func TempUnschedulableReasonEqualFold(v string) predicate.Account {
|
||||
return predicate.Account(sql.FieldEqualFold(FieldTempUnschedulableReason, v))
|
||||
}
|
||||
|
||||
// TempUnschedulableReasonContainsFold applies the ContainsFold predicate on the "temp_unschedulable_reason" field.
|
||||
func TempUnschedulableReasonContainsFold(v string) predicate.Account {
|
||||
return predicate.Account(sql.FieldContainsFold(FieldTempUnschedulableReason, v))
|
||||
}
|
||||
|
||||
// SessionWindowStartEQ applies the EQ predicate on the "session_window_start" field.
|
||||
func SessionWindowStartEQ(v time.Time) predicate.Account {
|
||||
return predicate.Account(sql.FieldEQ(FieldSessionWindowStart, v))
|
||||
|
||||
@@ -293,6 +293,34 @@ func (_c *AccountCreate) SetNillableOverloadUntil(v *time.Time) *AccountCreate {
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetTempUnschedulableUntil sets the "temp_unschedulable_until" field.
|
||||
func (_c *AccountCreate) SetTempUnschedulableUntil(v time.Time) *AccountCreate {
|
||||
_c.mutation.SetTempUnschedulableUntil(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableTempUnschedulableUntil sets the "temp_unschedulable_until" field if the given value is not nil.
|
||||
func (_c *AccountCreate) SetNillableTempUnschedulableUntil(v *time.Time) *AccountCreate {
|
||||
if v != nil {
|
||||
_c.SetTempUnschedulableUntil(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetTempUnschedulableReason sets the "temp_unschedulable_reason" field.
|
||||
func (_c *AccountCreate) SetTempUnschedulableReason(v string) *AccountCreate {
|
||||
_c.mutation.SetTempUnschedulableReason(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableTempUnschedulableReason sets the "temp_unschedulable_reason" field if the given value is not nil.
|
||||
func (_c *AccountCreate) SetNillableTempUnschedulableReason(v *string) *AccountCreate {
|
||||
if v != nil {
|
||||
_c.SetTempUnschedulableReason(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetSessionWindowStart sets the "session_window_start" field.
|
||||
func (_c *AccountCreate) SetSessionWindowStart(v time.Time) *AccountCreate {
|
||||
_c.mutation.SetSessionWindowStart(v)
|
||||
@@ -639,6 +667,14 @@ func (_c *AccountCreate) createSpec() (*Account, *sqlgraph.CreateSpec) {
|
||||
_spec.SetField(account.FieldOverloadUntil, field.TypeTime, value)
|
||||
_node.OverloadUntil = &value
|
||||
}
|
||||
if value, ok := _c.mutation.TempUnschedulableUntil(); ok {
|
||||
_spec.SetField(account.FieldTempUnschedulableUntil, field.TypeTime, value)
|
||||
_node.TempUnschedulableUntil = &value
|
||||
}
|
||||
if value, ok := _c.mutation.TempUnschedulableReason(); ok {
|
||||
_spec.SetField(account.FieldTempUnschedulableReason, field.TypeString, value)
|
||||
_node.TempUnschedulableReason = &value
|
||||
}
|
||||
if value, ok := _c.mutation.SessionWindowStart(); ok {
|
||||
_spec.SetField(account.FieldSessionWindowStart, field.TypeTime, value)
|
||||
_node.SessionWindowStart = &value
|
||||
@@ -1080,6 +1116,42 @@ func (u *AccountUpsert) ClearOverloadUntil() *AccountUpsert {
|
||||
return u
|
||||
}
|
||||
|
||||
// SetTempUnschedulableUntil sets the "temp_unschedulable_until" field.
|
||||
func (u *AccountUpsert) SetTempUnschedulableUntil(v time.Time) *AccountUpsert {
|
||||
u.Set(account.FieldTempUnschedulableUntil, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateTempUnschedulableUntil sets the "temp_unschedulable_until" field to the value that was provided on create.
|
||||
func (u *AccountUpsert) UpdateTempUnschedulableUntil() *AccountUpsert {
|
||||
u.SetExcluded(account.FieldTempUnschedulableUntil)
|
||||
return u
|
||||
}
|
||||
|
||||
// ClearTempUnschedulableUntil clears the value of the "temp_unschedulable_until" field.
|
||||
func (u *AccountUpsert) ClearTempUnschedulableUntil() *AccountUpsert {
|
||||
u.SetNull(account.FieldTempUnschedulableUntil)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetTempUnschedulableReason sets the "temp_unschedulable_reason" field.
|
||||
func (u *AccountUpsert) SetTempUnschedulableReason(v string) *AccountUpsert {
|
||||
u.Set(account.FieldTempUnschedulableReason, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateTempUnschedulableReason sets the "temp_unschedulable_reason" field to the value that was provided on create.
|
||||
func (u *AccountUpsert) UpdateTempUnschedulableReason() *AccountUpsert {
|
||||
u.SetExcluded(account.FieldTempUnschedulableReason)
|
||||
return u
|
||||
}
|
||||
|
||||
// ClearTempUnschedulableReason clears the value of the "temp_unschedulable_reason" field.
|
||||
func (u *AccountUpsert) ClearTempUnschedulableReason() *AccountUpsert {
|
||||
u.SetNull(account.FieldTempUnschedulableReason)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetSessionWindowStart sets the "session_window_start" field.
|
||||
func (u *AccountUpsert) SetSessionWindowStart(v time.Time) *AccountUpsert {
|
||||
u.Set(account.FieldSessionWindowStart, v)
|
||||
@@ -1557,6 +1629,48 @@ func (u *AccountUpsertOne) ClearOverloadUntil() *AccountUpsertOne {
|
||||
})
|
||||
}
|
||||
|
||||
// SetTempUnschedulableUntil sets the "temp_unschedulable_until" field.
|
||||
func (u *AccountUpsertOne) SetTempUnschedulableUntil(v time.Time) *AccountUpsertOne {
|
||||
return u.Update(func(s *AccountUpsert) {
|
||||
s.SetTempUnschedulableUntil(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateTempUnschedulableUntil sets the "temp_unschedulable_until" field to the value that was provided on create.
|
||||
func (u *AccountUpsertOne) UpdateTempUnschedulableUntil() *AccountUpsertOne {
|
||||
return u.Update(func(s *AccountUpsert) {
|
||||
s.UpdateTempUnschedulableUntil()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearTempUnschedulableUntil clears the value of the "temp_unschedulable_until" field.
|
||||
func (u *AccountUpsertOne) ClearTempUnschedulableUntil() *AccountUpsertOne {
|
||||
return u.Update(func(s *AccountUpsert) {
|
||||
s.ClearTempUnschedulableUntil()
|
||||
})
|
||||
}
|
||||
|
||||
// SetTempUnschedulableReason sets the "temp_unschedulable_reason" field.
|
||||
func (u *AccountUpsertOne) SetTempUnschedulableReason(v string) *AccountUpsertOne {
|
||||
return u.Update(func(s *AccountUpsert) {
|
||||
s.SetTempUnschedulableReason(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateTempUnschedulableReason sets the "temp_unschedulable_reason" field to the value that was provided on create.
|
||||
func (u *AccountUpsertOne) UpdateTempUnschedulableReason() *AccountUpsertOne {
|
||||
return u.Update(func(s *AccountUpsert) {
|
||||
s.UpdateTempUnschedulableReason()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearTempUnschedulableReason clears the value of the "temp_unschedulable_reason" field.
|
||||
func (u *AccountUpsertOne) ClearTempUnschedulableReason() *AccountUpsertOne {
|
||||
return u.Update(func(s *AccountUpsert) {
|
||||
s.ClearTempUnschedulableReason()
|
||||
})
|
||||
}
|
||||
|
||||
// SetSessionWindowStart sets the "session_window_start" field.
|
||||
func (u *AccountUpsertOne) SetSessionWindowStart(v time.Time) *AccountUpsertOne {
|
||||
return u.Update(func(s *AccountUpsert) {
|
||||
@@ -2209,6 +2323,48 @@ func (u *AccountUpsertBulk) ClearOverloadUntil() *AccountUpsertBulk {
|
||||
})
|
||||
}
|
||||
|
||||
// SetTempUnschedulableUntil sets the "temp_unschedulable_until" field.
|
||||
func (u *AccountUpsertBulk) SetTempUnschedulableUntil(v time.Time) *AccountUpsertBulk {
|
||||
return u.Update(func(s *AccountUpsert) {
|
||||
s.SetTempUnschedulableUntil(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateTempUnschedulableUntil sets the "temp_unschedulable_until" field to the value that was provided on create.
|
||||
func (u *AccountUpsertBulk) UpdateTempUnschedulableUntil() *AccountUpsertBulk {
|
||||
return u.Update(func(s *AccountUpsert) {
|
||||
s.UpdateTempUnschedulableUntil()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearTempUnschedulableUntil clears the value of the "temp_unschedulable_until" field.
|
||||
func (u *AccountUpsertBulk) ClearTempUnschedulableUntil() *AccountUpsertBulk {
|
||||
return u.Update(func(s *AccountUpsert) {
|
||||
s.ClearTempUnschedulableUntil()
|
||||
})
|
||||
}
|
||||
|
||||
// SetTempUnschedulableReason sets the "temp_unschedulable_reason" field.
|
||||
func (u *AccountUpsertBulk) SetTempUnschedulableReason(v string) *AccountUpsertBulk {
|
||||
return u.Update(func(s *AccountUpsert) {
|
||||
s.SetTempUnschedulableReason(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateTempUnschedulableReason sets the "temp_unschedulable_reason" field to the value that was provided on create.
|
||||
func (u *AccountUpsertBulk) UpdateTempUnschedulableReason() *AccountUpsertBulk {
|
||||
return u.Update(func(s *AccountUpsert) {
|
||||
s.UpdateTempUnschedulableReason()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearTempUnschedulableReason clears the value of the "temp_unschedulable_reason" field.
|
||||
func (u *AccountUpsertBulk) ClearTempUnschedulableReason() *AccountUpsertBulk {
|
||||
return u.Update(func(s *AccountUpsert) {
|
||||
s.ClearTempUnschedulableReason()
|
||||
})
|
||||
}
|
||||
|
||||
// SetSessionWindowStart sets the "session_window_start" field.
|
||||
func (u *AccountUpsertBulk) SetSessionWindowStart(v time.Time) *AccountUpsertBulk {
|
||||
return u.Update(func(s *AccountUpsert) {
|
||||
|
||||
@@ -376,6 +376,46 @@ func (_u *AccountUpdate) ClearOverloadUntil() *AccountUpdate {
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetTempUnschedulableUntil sets the "temp_unschedulable_until" field.
|
||||
func (_u *AccountUpdate) SetTempUnschedulableUntil(v time.Time) *AccountUpdate {
|
||||
_u.mutation.SetTempUnschedulableUntil(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableTempUnschedulableUntil sets the "temp_unschedulable_until" field if the given value is not nil.
|
||||
func (_u *AccountUpdate) SetNillableTempUnschedulableUntil(v *time.Time) *AccountUpdate {
|
||||
if v != nil {
|
||||
_u.SetTempUnschedulableUntil(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearTempUnschedulableUntil clears the value of the "temp_unschedulable_until" field.
|
||||
func (_u *AccountUpdate) ClearTempUnschedulableUntil() *AccountUpdate {
|
||||
_u.mutation.ClearTempUnschedulableUntil()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetTempUnschedulableReason sets the "temp_unschedulable_reason" field.
|
||||
func (_u *AccountUpdate) SetTempUnschedulableReason(v string) *AccountUpdate {
|
||||
_u.mutation.SetTempUnschedulableReason(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableTempUnschedulableReason sets the "temp_unschedulable_reason" field if the given value is not nil.
|
||||
func (_u *AccountUpdate) SetNillableTempUnschedulableReason(v *string) *AccountUpdate {
|
||||
if v != nil {
|
||||
_u.SetTempUnschedulableReason(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearTempUnschedulableReason clears the value of the "temp_unschedulable_reason" field.
|
||||
func (_u *AccountUpdate) ClearTempUnschedulableReason() *AccountUpdate {
|
||||
_u.mutation.ClearTempUnschedulableReason()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetSessionWindowStart sets the "session_window_start" field.
|
||||
func (_u *AccountUpdate) SetSessionWindowStart(v time.Time) *AccountUpdate {
|
||||
_u.mutation.SetSessionWindowStart(v)
|
||||
@@ -701,6 +741,18 @@ func (_u *AccountUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
||||
if _u.mutation.OverloadUntilCleared() {
|
||||
_spec.ClearField(account.FieldOverloadUntil, field.TypeTime)
|
||||
}
|
||||
if value, ok := _u.mutation.TempUnschedulableUntil(); ok {
|
||||
_spec.SetField(account.FieldTempUnschedulableUntil, field.TypeTime, value)
|
||||
}
|
||||
if _u.mutation.TempUnschedulableUntilCleared() {
|
||||
_spec.ClearField(account.FieldTempUnschedulableUntil, field.TypeTime)
|
||||
}
|
||||
if value, ok := _u.mutation.TempUnschedulableReason(); ok {
|
||||
_spec.SetField(account.FieldTempUnschedulableReason, field.TypeString, value)
|
||||
}
|
||||
if _u.mutation.TempUnschedulableReasonCleared() {
|
||||
_spec.ClearField(account.FieldTempUnschedulableReason, field.TypeString)
|
||||
}
|
||||
if value, ok := _u.mutation.SessionWindowStart(); ok {
|
||||
_spec.SetField(account.FieldSessionWindowStart, field.TypeTime, value)
|
||||
}
|
||||
@@ -1215,6 +1267,46 @@ func (_u *AccountUpdateOne) ClearOverloadUntil() *AccountUpdateOne {
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetTempUnschedulableUntil sets the "temp_unschedulable_until" field.
|
||||
func (_u *AccountUpdateOne) SetTempUnschedulableUntil(v time.Time) *AccountUpdateOne {
|
||||
_u.mutation.SetTempUnschedulableUntil(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableTempUnschedulableUntil sets the "temp_unschedulable_until" field if the given value is not nil.
|
||||
func (_u *AccountUpdateOne) SetNillableTempUnschedulableUntil(v *time.Time) *AccountUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetTempUnschedulableUntil(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearTempUnschedulableUntil clears the value of the "temp_unschedulable_until" field.
|
||||
func (_u *AccountUpdateOne) ClearTempUnschedulableUntil() *AccountUpdateOne {
|
||||
_u.mutation.ClearTempUnschedulableUntil()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetTempUnschedulableReason sets the "temp_unschedulable_reason" field.
|
||||
func (_u *AccountUpdateOne) SetTempUnschedulableReason(v string) *AccountUpdateOne {
|
||||
_u.mutation.SetTempUnschedulableReason(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableTempUnschedulableReason sets the "temp_unschedulable_reason" field if the given value is not nil.
|
||||
func (_u *AccountUpdateOne) SetNillableTempUnschedulableReason(v *string) *AccountUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetTempUnschedulableReason(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearTempUnschedulableReason clears the value of the "temp_unschedulable_reason" field.
|
||||
func (_u *AccountUpdateOne) ClearTempUnschedulableReason() *AccountUpdateOne {
|
||||
_u.mutation.ClearTempUnschedulableReason()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetSessionWindowStart sets the "session_window_start" field.
|
||||
func (_u *AccountUpdateOne) SetSessionWindowStart(v time.Time) *AccountUpdateOne {
|
||||
_u.mutation.SetSessionWindowStart(v)
|
||||
@@ -1570,6 +1662,18 @@ func (_u *AccountUpdateOne) sqlSave(ctx context.Context) (_node *Account, err er
|
||||
if _u.mutation.OverloadUntilCleared() {
|
||||
_spec.ClearField(account.FieldOverloadUntil, field.TypeTime)
|
||||
}
|
||||
if value, ok := _u.mutation.TempUnschedulableUntil(); ok {
|
||||
_spec.SetField(account.FieldTempUnschedulableUntil, field.TypeTime, value)
|
||||
}
|
||||
if _u.mutation.TempUnschedulableUntilCleared() {
|
||||
_spec.ClearField(account.FieldTempUnschedulableUntil, field.TypeTime)
|
||||
}
|
||||
if value, ok := _u.mutation.TempUnschedulableReason(); ok {
|
||||
_spec.SetField(account.FieldTempUnschedulableReason, field.TypeString, value)
|
||||
}
|
||||
if _u.mutation.TempUnschedulableReasonCleared() {
|
||||
_spec.ClearField(account.FieldTempUnschedulableReason, field.TypeString)
|
||||
}
|
||||
if value, ok := _u.mutation.SessionWindowStart(); ok {
|
||||
_spec.SetField(account.FieldSessionWindowStart, field.TypeTime, value)
|
||||
}
|
||||
|
||||
@@ -48,6 +48,24 @@ type APIKey struct {
|
||||
QuotaUsed float64 `json:"quota_used,omitempty"`
|
||||
// Expiration time for this API key (null = never expires)
|
||||
ExpiresAt *time.Time `json:"expires_at,omitempty"`
|
||||
// Rate limit in USD per 5 hours (0 = unlimited)
|
||||
RateLimit5h float64 `json:"rate_limit_5h,omitempty"`
|
||||
// Rate limit in USD per day (0 = unlimited)
|
||||
RateLimit1d float64 `json:"rate_limit_1d,omitempty"`
|
||||
// Rate limit in USD per 7 days (0 = unlimited)
|
||||
RateLimit7d float64 `json:"rate_limit_7d,omitempty"`
|
||||
// Used amount in USD for the current 5h window
|
||||
Usage5h float64 `json:"usage_5h,omitempty"`
|
||||
// Used amount in USD for the current 1d window
|
||||
Usage1d float64 `json:"usage_1d,omitempty"`
|
||||
// Used amount in USD for the current 7d window
|
||||
Usage7d float64 `json:"usage_7d,omitempty"`
|
||||
// Start time of the current 5h rate limit window
|
||||
Window5hStart *time.Time `json:"window_5h_start,omitempty"`
|
||||
// Start time of the current 1d rate limit window
|
||||
Window1dStart *time.Time `json:"window_1d_start,omitempty"`
|
||||
// Start time of the current 7d rate limit window
|
||||
Window7dStart *time.Time `json:"window_7d_start,omitempty"`
|
||||
// Edges holds the relations/edges for other nodes in the graph.
|
||||
// The values are being populated by the APIKeyQuery when eager-loading is set.
|
||||
Edges APIKeyEdges `json:"edges"`
|
||||
@@ -105,13 +123,13 @@ func (*APIKey) scanValues(columns []string) ([]any, error) {
|
||||
switch columns[i] {
|
||||
case apikey.FieldIPWhitelist, apikey.FieldIPBlacklist:
|
||||
values[i] = new([]byte)
|
||||
case apikey.FieldQuota, apikey.FieldQuotaUsed:
|
||||
case apikey.FieldQuota, apikey.FieldQuotaUsed, apikey.FieldRateLimit5h, apikey.FieldRateLimit1d, apikey.FieldRateLimit7d, apikey.FieldUsage5h, apikey.FieldUsage1d, apikey.FieldUsage7d:
|
||||
values[i] = new(sql.NullFloat64)
|
||||
case apikey.FieldID, apikey.FieldUserID, apikey.FieldGroupID:
|
||||
values[i] = new(sql.NullInt64)
|
||||
case apikey.FieldKey, apikey.FieldName, apikey.FieldStatus:
|
||||
values[i] = new(sql.NullString)
|
||||
case apikey.FieldCreatedAt, apikey.FieldUpdatedAt, apikey.FieldDeletedAt, apikey.FieldLastUsedAt, apikey.FieldExpiresAt:
|
||||
case apikey.FieldCreatedAt, apikey.FieldUpdatedAt, apikey.FieldDeletedAt, apikey.FieldLastUsedAt, apikey.FieldExpiresAt, apikey.FieldWindow5hStart, apikey.FieldWindow1dStart, apikey.FieldWindow7dStart:
|
||||
values[i] = new(sql.NullTime)
|
||||
default:
|
||||
values[i] = new(sql.UnknownType)
|
||||
@@ -226,6 +244,63 @@ func (_m *APIKey) assignValues(columns []string, values []any) error {
|
||||
_m.ExpiresAt = new(time.Time)
|
||||
*_m.ExpiresAt = value.Time
|
||||
}
|
||||
case apikey.FieldRateLimit5h:
|
||||
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field rate_limit_5h", values[i])
|
||||
} else if value.Valid {
|
||||
_m.RateLimit5h = value.Float64
|
||||
}
|
||||
case apikey.FieldRateLimit1d:
|
||||
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field rate_limit_1d", values[i])
|
||||
} else if value.Valid {
|
||||
_m.RateLimit1d = value.Float64
|
||||
}
|
||||
case apikey.FieldRateLimit7d:
|
||||
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field rate_limit_7d", values[i])
|
||||
} else if value.Valid {
|
||||
_m.RateLimit7d = value.Float64
|
||||
}
|
||||
case apikey.FieldUsage5h:
|
||||
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field usage_5h", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Usage5h = value.Float64
|
||||
}
|
||||
case apikey.FieldUsage1d:
|
||||
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field usage_1d", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Usage1d = value.Float64
|
||||
}
|
||||
case apikey.FieldUsage7d:
|
||||
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field usage_7d", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Usage7d = value.Float64
|
||||
}
|
||||
case apikey.FieldWindow5hStart:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field window_5h_start", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Window5hStart = new(time.Time)
|
||||
*_m.Window5hStart = value.Time
|
||||
}
|
||||
case apikey.FieldWindow1dStart:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field window_1d_start", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Window1dStart = new(time.Time)
|
||||
*_m.Window1dStart = value.Time
|
||||
}
|
||||
case apikey.FieldWindow7dStart:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field window_7d_start", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Window7dStart = new(time.Time)
|
||||
*_m.Window7dStart = value.Time
|
||||
}
|
||||
default:
|
||||
_m.selectValues.Set(columns[i], values[i])
|
||||
}
|
||||
@@ -326,6 +401,39 @@ func (_m *APIKey) String() string {
|
||||
builder.WriteString("expires_at=")
|
||||
builder.WriteString(v.Format(time.ANSIC))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("rate_limit_5h=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.RateLimit5h))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("rate_limit_1d=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.RateLimit1d))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("rate_limit_7d=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.RateLimit7d))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("usage_5h=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.Usage5h))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("usage_1d=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.Usage1d))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("usage_7d=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.Usage7d))
|
||||
builder.WriteString(", ")
|
||||
if v := _m.Window5hStart; v != nil {
|
||||
builder.WriteString("window_5h_start=")
|
||||
builder.WriteString(v.Format(time.ANSIC))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
if v := _m.Window1dStart; v != nil {
|
||||
builder.WriteString("window_1d_start=")
|
||||
builder.WriteString(v.Format(time.ANSIC))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
if v := _m.Window7dStart; v != nil {
|
||||
builder.WriteString("window_7d_start=")
|
||||
builder.WriteString(v.Format(time.ANSIC))
|
||||
}
|
||||
builder.WriteByte(')')
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
@@ -43,6 +43,24 @@ const (
|
||||
FieldQuotaUsed = "quota_used"
|
||||
// FieldExpiresAt holds the string denoting the expires_at field in the database.
|
||||
FieldExpiresAt = "expires_at"
|
||||
// FieldRateLimit5h holds the string denoting the rate_limit_5h field in the database.
|
||||
FieldRateLimit5h = "rate_limit_5h"
|
||||
// FieldRateLimit1d holds the string denoting the rate_limit_1d field in the database.
|
||||
FieldRateLimit1d = "rate_limit_1d"
|
||||
// FieldRateLimit7d holds the string denoting the rate_limit_7d field in the database.
|
||||
FieldRateLimit7d = "rate_limit_7d"
|
||||
// FieldUsage5h holds the string denoting the usage_5h field in the database.
|
||||
FieldUsage5h = "usage_5h"
|
||||
// FieldUsage1d holds the string denoting the usage_1d field in the database.
|
||||
FieldUsage1d = "usage_1d"
|
||||
// FieldUsage7d holds the string denoting the usage_7d field in the database.
|
||||
FieldUsage7d = "usage_7d"
|
||||
// FieldWindow5hStart holds the string denoting the window_5h_start field in the database.
|
||||
FieldWindow5hStart = "window_5h_start"
|
||||
// FieldWindow1dStart holds the string denoting the window_1d_start field in the database.
|
||||
FieldWindow1dStart = "window_1d_start"
|
||||
// FieldWindow7dStart holds the string denoting the window_7d_start field in the database.
|
||||
FieldWindow7dStart = "window_7d_start"
|
||||
// EdgeUser holds the string denoting the user edge name in mutations.
|
||||
EdgeUser = "user"
|
||||
// EdgeGroup holds the string denoting the group edge name in mutations.
|
||||
@@ -91,6 +109,15 @@ var Columns = []string{
|
||||
FieldQuota,
|
||||
FieldQuotaUsed,
|
||||
FieldExpiresAt,
|
||||
FieldRateLimit5h,
|
||||
FieldRateLimit1d,
|
||||
FieldRateLimit7d,
|
||||
FieldUsage5h,
|
||||
FieldUsage1d,
|
||||
FieldUsage7d,
|
||||
FieldWindow5hStart,
|
||||
FieldWindow1dStart,
|
||||
FieldWindow7dStart,
|
||||
}
|
||||
|
||||
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||
@@ -129,6 +156,18 @@ var (
|
||||
DefaultQuota float64
|
||||
// DefaultQuotaUsed holds the default value on creation for the "quota_used" field.
|
||||
DefaultQuotaUsed float64
|
||||
// DefaultRateLimit5h holds the default value on creation for the "rate_limit_5h" field.
|
||||
DefaultRateLimit5h float64
|
||||
// DefaultRateLimit1d holds the default value on creation for the "rate_limit_1d" field.
|
||||
DefaultRateLimit1d float64
|
||||
// DefaultRateLimit7d holds the default value on creation for the "rate_limit_7d" field.
|
||||
DefaultRateLimit7d float64
|
||||
// DefaultUsage5h holds the default value on creation for the "usage_5h" field.
|
||||
DefaultUsage5h float64
|
||||
// DefaultUsage1d holds the default value on creation for the "usage_1d" field.
|
||||
DefaultUsage1d float64
|
||||
// DefaultUsage7d holds the default value on creation for the "usage_7d" field.
|
||||
DefaultUsage7d float64
|
||||
)
|
||||
|
||||
// OrderOption defines the ordering options for the APIKey queries.
|
||||
@@ -199,6 +238,51 @@ func ByExpiresAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldExpiresAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByRateLimit5h orders the results by the rate_limit_5h field.
|
||||
func ByRateLimit5h(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldRateLimit5h, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByRateLimit1d orders the results by the rate_limit_1d field.
|
||||
func ByRateLimit1d(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldRateLimit1d, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByRateLimit7d orders the results by the rate_limit_7d field.
|
||||
func ByRateLimit7d(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldRateLimit7d, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUsage5h orders the results by the usage_5h field.
|
||||
func ByUsage5h(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUsage5h, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUsage1d orders the results by the usage_1d field.
|
||||
func ByUsage1d(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUsage1d, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUsage7d orders the results by the usage_7d field.
|
||||
func ByUsage7d(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUsage7d, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByWindow5hStart orders the results by the window_5h_start field.
|
||||
func ByWindow5hStart(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldWindow5hStart, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByWindow1dStart orders the results by the window_1d_start field.
|
||||
func ByWindow1dStart(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldWindow1dStart, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByWindow7dStart orders the results by the window_7d_start field.
|
||||
func ByWindow7dStart(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldWindow7dStart, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUserField orders the results by user field.
|
||||
func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
|
||||
@@ -115,6 +115,51 @@ func ExpiresAt(v time.Time) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldEQ(FieldExpiresAt, v))
|
||||
}
|
||||
|
||||
// RateLimit5h applies equality check predicate on the "rate_limit_5h" field. It's identical to RateLimit5hEQ.
|
||||
func RateLimit5h(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldEQ(FieldRateLimit5h, v))
|
||||
}
|
||||
|
||||
// RateLimit1d applies equality check predicate on the "rate_limit_1d" field. It's identical to RateLimit1dEQ.
|
||||
func RateLimit1d(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldEQ(FieldRateLimit1d, v))
|
||||
}
|
||||
|
||||
// RateLimit7d applies equality check predicate on the "rate_limit_7d" field. It's identical to RateLimit7dEQ.
|
||||
func RateLimit7d(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldEQ(FieldRateLimit7d, v))
|
||||
}
|
||||
|
||||
// Usage5h applies equality check predicate on the "usage_5h" field. It's identical to Usage5hEQ.
|
||||
func Usage5h(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldEQ(FieldUsage5h, v))
|
||||
}
|
||||
|
||||
// Usage1d applies equality check predicate on the "usage_1d" field. It's identical to Usage1dEQ.
|
||||
func Usage1d(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldEQ(FieldUsage1d, v))
|
||||
}
|
||||
|
||||
// Usage7d applies equality check predicate on the "usage_7d" field. It's identical to Usage7dEQ.
|
||||
func Usage7d(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldEQ(FieldUsage7d, v))
|
||||
}
|
||||
|
||||
// Window5hStart applies equality check predicate on the "window_5h_start" field. It's identical to Window5hStartEQ.
|
||||
func Window5hStart(v time.Time) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldEQ(FieldWindow5hStart, v))
|
||||
}
|
||||
|
||||
// Window1dStart applies equality check predicate on the "window_1d_start" field. It's identical to Window1dStartEQ.
|
||||
func Window1dStart(v time.Time) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldEQ(FieldWindow1dStart, v))
|
||||
}
|
||||
|
||||
// Window7dStart applies equality check predicate on the "window_7d_start" field. It's identical to Window7dStartEQ.
|
||||
func Window7dStart(v time.Time) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldEQ(FieldWindow7dStart, v))
|
||||
}
|
||||
|
||||
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||
func CreatedAtEQ(v time.Time) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldEQ(FieldCreatedAt, v))
|
||||
@@ -690,6 +735,396 @@ func ExpiresAtNotNil() predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldNotNull(FieldExpiresAt))
|
||||
}
|
||||
|
||||
// RateLimit5hEQ applies the EQ predicate on the "rate_limit_5h" field.
|
||||
func RateLimit5hEQ(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldEQ(FieldRateLimit5h, v))
|
||||
}
|
||||
|
||||
// RateLimit5hNEQ applies the NEQ predicate on the "rate_limit_5h" field.
|
||||
func RateLimit5hNEQ(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldNEQ(FieldRateLimit5h, v))
|
||||
}
|
||||
|
||||
// RateLimit5hIn applies the In predicate on the "rate_limit_5h" field.
|
||||
func RateLimit5hIn(vs ...float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldIn(FieldRateLimit5h, vs...))
|
||||
}
|
||||
|
||||
// RateLimit5hNotIn applies the NotIn predicate on the "rate_limit_5h" field.
|
||||
func RateLimit5hNotIn(vs ...float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldNotIn(FieldRateLimit5h, vs...))
|
||||
}
|
||||
|
||||
// RateLimit5hGT applies the GT predicate on the "rate_limit_5h" field.
|
||||
func RateLimit5hGT(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldGT(FieldRateLimit5h, v))
|
||||
}
|
||||
|
||||
// RateLimit5hGTE applies the GTE predicate on the "rate_limit_5h" field.
|
||||
func RateLimit5hGTE(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldGTE(FieldRateLimit5h, v))
|
||||
}
|
||||
|
||||
// RateLimit5hLT applies the LT predicate on the "rate_limit_5h" field.
|
||||
func RateLimit5hLT(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldLT(FieldRateLimit5h, v))
|
||||
}
|
||||
|
||||
// RateLimit5hLTE applies the LTE predicate on the "rate_limit_5h" field.
|
||||
func RateLimit5hLTE(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldLTE(FieldRateLimit5h, v))
|
||||
}
|
||||
|
||||
// RateLimit1dEQ applies the EQ predicate on the "rate_limit_1d" field.
|
||||
func RateLimit1dEQ(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldEQ(FieldRateLimit1d, v))
|
||||
}
|
||||
|
||||
// RateLimit1dNEQ applies the NEQ predicate on the "rate_limit_1d" field.
|
||||
func RateLimit1dNEQ(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldNEQ(FieldRateLimit1d, v))
|
||||
}
|
||||
|
||||
// RateLimit1dIn applies the In predicate on the "rate_limit_1d" field.
|
||||
func RateLimit1dIn(vs ...float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldIn(FieldRateLimit1d, vs...))
|
||||
}
|
||||
|
||||
// RateLimit1dNotIn applies the NotIn predicate on the "rate_limit_1d" field.
|
||||
func RateLimit1dNotIn(vs ...float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldNotIn(FieldRateLimit1d, vs...))
|
||||
}
|
||||
|
||||
// RateLimit1dGT applies the GT predicate on the "rate_limit_1d" field.
|
||||
func RateLimit1dGT(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldGT(FieldRateLimit1d, v))
|
||||
}
|
||||
|
||||
// RateLimit1dGTE applies the GTE predicate on the "rate_limit_1d" field.
|
||||
func RateLimit1dGTE(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldGTE(FieldRateLimit1d, v))
|
||||
}
|
||||
|
||||
// RateLimit1dLT applies the LT predicate on the "rate_limit_1d" field.
|
||||
func RateLimit1dLT(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldLT(FieldRateLimit1d, v))
|
||||
}
|
||||
|
||||
// RateLimit1dLTE applies the LTE predicate on the "rate_limit_1d" field.
|
||||
func RateLimit1dLTE(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldLTE(FieldRateLimit1d, v))
|
||||
}
|
||||
|
||||
// RateLimit7dEQ applies the EQ predicate on the "rate_limit_7d" field.
|
||||
func RateLimit7dEQ(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldEQ(FieldRateLimit7d, v))
|
||||
}
|
||||
|
||||
// RateLimit7dNEQ applies the NEQ predicate on the "rate_limit_7d" field.
|
||||
func RateLimit7dNEQ(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldNEQ(FieldRateLimit7d, v))
|
||||
}
|
||||
|
||||
// RateLimit7dIn applies the In predicate on the "rate_limit_7d" field.
|
||||
func RateLimit7dIn(vs ...float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldIn(FieldRateLimit7d, vs...))
|
||||
}
|
||||
|
||||
// RateLimit7dNotIn applies the NotIn predicate on the "rate_limit_7d" field.
|
||||
func RateLimit7dNotIn(vs ...float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldNotIn(FieldRateLimit7d, vs...))
|
||||
}
|
||||
|
||||
// RateLimit7dGT applies the GT predicate on the "rate_limit_7d" field.
|
||||
func RateLimit7dGT(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldGT(FieldRateLimit7d, v))
|
||||
}
|
||||
|
||||
// RateLimit7dGTE applies the GTE predicate on the "rate_limit_7d" field.
|
||||
func RateLimit7dGTE(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldGTE(FieldRateLimit7d, v))
|
||||
}
|
||||
|
||||
// RateLimit7dLT applies the LT predicate on the "rate_limit_7d" field.
|
||||
func RateLimit7dLT(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldLT(FieldRateLimit7d, v))
|
||||
}
|
||||
|
||||
// RateLimit7dLTE applies the LTE predicate on the "rate_limit_7d" field.
|
||||
func RateLimit7dLTE(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldLTE(FieldRateLimit7d, v))
|
||||
}
|
||||
|
||||
// Usage5hEQ applies the EQ predicate on the "usage_5h" field.
|
||||
func Usage5hEQ(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldEQ(FieldUsage5h, v))
|
||||
}
|
||||
|
||||
// Usage5hNEQ applies the NEQ predicate on the "usage_5h" field.
|
||||
func Usage5hNEQ(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldNEQ(FieldUsage5h, v))
|
||||
}
|
||||
|
||||
// Usage5hIn applies the In predicate on the "usage_5h" field.
|
||||
func Usage5hIn(vs ...float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldIn(FieldUsage5h, vs...))
|
||||
}
|
||||
|
||||
// Usage5hNotIn applies the NotIn predicate on the "usage_5h" field.
|
||||
func Usage5hNotIn(vs ...float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldNotIn(FieldUsage5h, vs...))
|
||||
}
|
||||
|
||||
// Usage5hGT applies the GT predicate on the "usage_5h" field.
|
||||
func Usage5hGT(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldGT(FieldUsage5h, v))
|
||||
}
|
||||
|
||||
// Usage5hGTE applies the GTE predicate on the "usage_5h" field.
|
||||
func Usage5hGTE(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldGTE(FieldUsage5h, v))
|
||||
}
|
||||
|
||||
// Usage5hLT applies the LT predicate on the "usage_5h" field.
|
||||
func Usage5hLT(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldLT(FieldUsage5h, v))
|
||||
}
|
||||
|
||||
// Usage5hLTE applies the LTE predicate on the "usage_5h" field.
|
||||
func Usage5hLTE(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldLTE(FieldUsage5h, v))
|
||||
}
|
||||
|
||||
// Usage1dEQ applies the EQ predicate on the "usage_1d" field.
|
||||
func Usage1dEQ(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldEQ(FieldUsage1d, v))
|
||||
}
|
||||
|
||||
// Usage1dNEQ applies the NEQ predicate on the "usage_1d" field.
|
||||
func Usage1dNEQ(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldNEQ(FieldUsage1d, v))
|
||||
}
|
||||
|
||||
// Usage1dIn applies the In predicate on the "usage_1d" field.
|
||||
func Usage1dIn(vs ...float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldIn(FieldUsage1d, vs...))
|
||||
}
|
||||
|
||||
// Usage1dNotIn applies the NotIn predicate on the "usage_1d" field.
|
||||
func Usage1dNotIn(vs ...float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldNotIn(FieldUsage1d, vs...))
|
||||
}
|
||||
|
||||
// Usage1dGT applies the GT predicate on the "usage_1d" field.
|
||||
func Usage1dGT(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldGT(FieldUsage1d, v))
|
||||
}
|
||||
|
||||
// Usage1dGTE applies the GTE predicate on the "usage_1d" field.
|
||||
func Usage1dGTE(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldGTE(FieldUsage1d, v))
|
||||
}
|
||||
|
||||
// Usage1dLT applies the LT predicate on the "usage_1d" field.
|
||||
func Usage1dLT(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldLT(FieldUsage1d, v))
|
||||
}
|
||||
|
||||
// Usage1dLTE applies the LTE predicate on the "usage_1d" field.
|
||||
func Usage1dLTE(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldLTE(FieldUsage1d, v))
|
||||
}
|
||||
|
||||
// Usage7dEQ applies the EQ predicate on the "usage_7d" field.
|
||||
func Usage7dEQ(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldEQ(FieldUsage7d, v))
|
||||
}
|
||||
|
||||
// Usage7dNEQ applies the NEQ predicate on the "usage_7d" field.
|
||||
func Usage7dNEQ(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldNEQ(FieldUsage7d, v))
|
||||
}
|
||||
|
||||
// Usage7dIn applies the In predicate on the "usage_7d" field.
|
||||
func Usage7dIn(vs ...float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldIn(FieldUsage7d, vs...))
|
||||
}
|
||||
|
||||
// Usage7dNotIn applies the NotIn predicate on the "usage_7d" field.
|
||||
func Usage7dNotIn(vs ...float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldNotIn(FieldUsage7d, vs...))
|
||||
}
|
||||
|
||||
// Usage7dGT applies the GT predicate on the "usage_7d" field.
|
||||
func Usage7dGT(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldGT(FieldUsage7d, v))
|
||||
}
|
||||
|
||||
// Usage7dGTE applies the GTE predicate on the "usage_7d" field.
|
||||
func Usage7dGTE(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldGTE(FieldUsage7d, v))
|
||||
}
|
||||
|
||||
// Usage7dLT applies the LT predicate on the "usage_7d" field.
|
||||
func Usage7dLT(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldLT(FieldUsage7d, v))
|
||||
}
|
||||
|
||||
// Usage7dLTE applies the LTE predicate on the "usage_7d" field.
|
||||
func Usage7dLTE(v float64) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldLTE(FieldUsage7d, v))
|
||||
}
|
||||
|
||||
// Window5hStartEQ applies the EQ predicate on the "window_5h_start" field.
|
||||
func Window5hStartEQ(v time.Time) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldEQ(FieldWindow5hStart, v))
|
||||
}
|
||||
|
||||
// Window5hStartNEQ applies the NEQ predicate on the "window_5h_start" field.
|
||||
func Window5hStartNEQ(v time.Time) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldNEQ(FieldWindow5hStart, v))
|
||||
}
|
||||
|
||||
// Window5hStartIn applies the In predicate on the "window_5h_start" field.
|
||||
func Window5hStartIn(vs ...time.Time) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldIn(FieldWindow5hStart, vs...))
|
||||
}
|
||||
|
||||
// Window5hStartNotIn applies the NotIn predicate on the "window_5h_start" field.
|
||||
func Window5hStartNotIn(vs ...time.Time) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldNotIn(FieldWindow5hStart, vs...))
|
||||
}
|
||||
|
||||
// Window5hStartGT applies the GT predicate on the "window_5h_start" field.
|
||||
func Window5hStartGT(v time.Time) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldGT(FieldWindow5hStart, v))
|
||||
}
|
||||
|
||||
// Window5hStartGTE applies the GTE predicate on the "window_5h_start" field.
|
||||
func Window5hStartGTE(v time.Time) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldGTE(FieldWindow5hStart, v))
|
||||
}
|
||||
|
||||
// Window5hStartLT applies the LT predicate on the "window_5h_start" field.
|
||||
func Window5hStartLT(v time.Time) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldLT(FieldWindow5hStart, v))
|
||||
}
|
||||
|
||||
// Window5hStartLTE applies the LTE predicate on the "window_5h_start" field.
|
||||
func Window5hStartLTE(v time.Time) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldLTE(FieldWindow5hStart, v))
|
||||
}
|
||||
|
||||
// Window5hStartIsNil applies the IsNil predicate on the "window_5h_start" field.
|
||||
func Window5hStartIsNil() predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldIsNull(FieldWindow5hStart))
|
||||
}
|
||||
|
||||
// Window5hStartNotNil applies the NotNil predicate on the "window_5h_start" field.
|
||||
func Window5hStartNotNil() predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldNotNull(FieldWindow5hStart))
|
||||
}
|
||||
|
||||
// Window1dStartEQ applies the EQ predicate on the "window_1d_start" field.
|
||||
func Window1dStartEQ(v time.Time) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldEQ(FieldWindow1dStart, v))
|
||||
}
|
||||
|
||||
// Window1dStartNEQ applies the NEQ predicate on the "window_1d_start" field.
|
||||
func Window1dStartNEQ(v time.Time) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldNEQ(FieldWindow1dStart, v))
|
||||
}
|
||||
|
||||
// Window1dStartIn applies the In predicate on the "window_1d_start" field.
|
||||
func Window1dStartIn(vs ...time.Time) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldIn(FieldWindow1dStart, vs...))
|
||||
}
|
||||
|
||||
// Window1dStartNotIn applies the NotIn predicate on the "window_1d_start" field.
|
||||
func Window1dStartNotIn(vs ...time.Time) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldNotIn(FieldWindow1dStart, vs...))
|
||||
}
|
||||
|
||||
// Window1dStartGT applies the GT predicate on the "window_1d_start" field.
|
||||
func Window1dStartGT(v time.Time) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldGT(FieldWindow1dStart, v))
|
||||
}
|
||||
|
||||
// Window1dStartGTE applies the GTE predicate on the "window_1d_start" field.
|
||||
func Window1dStartGTE(v time.Time) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldGTE(FieldWindow1dStart, v))
|
||||
}
|
||||
|
||||
// Window1dStartLT applies the LT predicate on the "window_1d_start" field.
|
||||
func Window1dStartLT(v time.Time) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldLT(FieldWindow1dStart, v))
|
||||
}
|
||||
|
||||
// Window1dStartLTE applies the LTE predicate on the "window_1d_start" field.
|
||||
func Window1dStartLTE(v time.Time) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldLTE(FieldWindow1dStart, v))
|
||||
}
|
||||
|
||||
// Window1dStartIsNil applies the IsNil predicate on the "window_1d_start" field.
|
||||
func Window1dStartIsNil() predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldIsNull(FieldWindow1dStart))
|
||||
}
|
||||
|
||||
// Window1dStartNotNil applies the NotNil predicate on the "window_1d_start" field.
|
||||
func Window1dStartNotNil() predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldNotNull(FieldWindow1dStart))
|
||||
}
|
||||
|
||||
// Window7dStartEQ applies the EQ predicate on the "window_7d_start" field.
|
||||
func Window7dStartEQ(v time.Time) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldEQ(FieldWindow7dStart, v))
|
||||
}
|
||||
|
||||
// Window7dStartNEQ applies the NEQ predicate on the "window_7d_start" field.
|
||||
func Window7dStartNEQ(v time.Time) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldNEQ(FieldWindow7dStart, v))
|
||||
}
|
||||
|
||||
// Window7dStartIn applies the In predicate on the "window_7d_start" field.
|
||||
func Window7dStartIn(vs ...time.Time) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldIn(FieldWindow7dStart, vs...))
|
||||
}
|
||||
|
||||
// Window7dStartNotIn applies the NotIn predicate on the "window_7d_start" field.
|
||||
func Window7dStartNotIn(vs ...time.Time) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldNotIn(FieldWindow7dStart, vs...))
|
||||
}
|
||||
|
||||
// Window7dStartGT applies the GT predicate on the "window_7d_start" field.
|
||||
func Window7dStartGT(v time.Time) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldGT(FieldWindow7dStart, v))
|
||||
}
|
||||
|
||||
// Window7dStartGTE applies the GTE predicate on the "window_7d_start" field.
|
||||
func Window7dStartGTE(v time.Time) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldGTE(FieldWindow7dStart, v))
|
||||
}
|
||||
|
||||
// Window7dStartLT applies the LT predicate on the "window_7d_start" field.
|
||||
func Window7dStartLT(v time.Time) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldLT(FieldWindow7dStart, v))
|
||||
}
|
||||
|
||||
// Window7dStartLTE applies the LTE predicate on the "window_7d_start" field.
|
||||
func Window7dStartLTE(v time.Time) predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldLTE(FieldWindow7dStart, v))
|
||||
}
|
||||
|
||||
// Window7dStartIsNil applies the IsNil predicate on the "window_7d_start" field.
|
||||
func Window7dStartIsNil() predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldIsNull(FieldWindow7dStart))
|
||||
}
|
||||
|
||||
// Window7dStartNotNil applies the NotNil predicate on the "window_7d_start" field.
|
||||
func Window7dStartNotNil() predicate.APIKey {
|
||||
return predicate.APIKey(sql.FieldNotNull(FieldWindow7dStart))
|
||||
}
|
||||
|
||||
// HasUser applies the HasEdge predicate on the "user" edge.
|
||||
func HasUser() predicate.APIKey {
|
||||
return predicate.APIKey(func(s *sql.Selector) {
|
||||
|
||||
@@ -181,6 +181,132 @@ func (_c *APIKeyCreate) SetNillableExpiresAt(v *time.Time) *APIKeyCreate {
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetRateLimit5h sets the "rate_limit_5h" field.
|
||||
func (_c *APIKeyCreate) SetRateLimit5h(v float64) *APIKeyCreate {
|
||||
_c.mutation.SetRateLimit5h(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableRateLimit5h sets the "rate_limit_5h" field if the given value is not nil.
|
||||
func (_c *APIKeyCreate) SetNillableRateLimit5h(v *float64) *APIKeyCreate {
|
||||
if v != nil {
|
||||
_c.SetRateLimit5h(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetRateLimit1d sets the "rate_limit_1d" field.
|
||||
func (_c *APIKeyCreate) SetRateLimit1d(v float64) *APIKeyCreate {
|
||||
_c.mutation.SetRateLimit1d(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableRateLimit1d sets the "rate_limit_1d" field if the given value is not nil.
|
||||
func (_c *APIKeyCreate) SetNillableRateLimit1d(v *float64) *APIKeyCreate {
|
||||
if v != nil {
|
||||
_c.SetRateLimit1d(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetRateLimit7d sets the "rate_limit_7d" field.
|
||||
func (_c *APIKeyCreate) SetRateLimit7d(v float64) *APIKeyCreate {
|
||||
_c.mutation.SetRateLimit7d(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableRateLimit7d sets the "rate_limit_7d" field if the given value is not nil.
|
||||
func (_c *APIKeyCreate) SetNillableRateLimit7d(v *float64) *APIKeyCreate {
|
||||
if v != nil {
|
||||
_c.SetRateLimit7d(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetUsage5h sets the "usage_5h" field.
|
||||
func (_c *APIKeyCreate) SetUsage5h(v float64) *APIKeyCreate {
|
||||
_c.mutation.SetUsage5h(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableUsage5h sets the "usage_5h" field if the given value is not nil.
|
||||
func (_c *APIKeyCreate) SetNillableUsage5h(v *float64) *APIKeyCreate {
|
||||
if v != nil {
|
||||
_c.SetUsage5h(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetUsage1d sets the "usage_1d" field.
|
||||
func (_c *APIKeyCreate) SetUsage1d(v float64) *APIKeyCreate {
|
||||
_c.mutation.SetUsage1d(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableUsage1d sets the "usage_1d" field if the given value is not nil.
|
||||
func (_c *APIKeyCreate) SetNillableUsage1d(v *float64) *APIKeyCreate {
|
||||
if v != nil {
|
||||
_c.SetUsage1d(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetUsage7d sets the "usage_7d" field.
|
||||
func (_c *APIKeyCreate) SetUsage7d(v float64) *APIKeyCreate {
|
||||
_c.mutation.SetUsage7d(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableUsage7d sets the "usage_7d" field if the given value is not nil.
|
||||
func (_c *APIKeyCreate) SetNillableUsage7d(v *float64) *APIKeyCreate {
|
||||
if v != nil {
|
||||
_c.SetUsage7d(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetWindow5hStart sets the "window_5h_start" field.
|
||||
func (_c *APIKeyCreate) SetWindow5hStart(v time.Time) *APIKeyCreate {
|
||||
_c.mutation.SetWindow5hStart(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableWindow5hStart sets the "window_5h_start" field if the given value is not nil.
|
||||
func (_c *APIKeyCreate) SetNillableWindow5hStart(v *time.Time) *APIKeyCreate {
|
||||
if v != nil {
|
||||
_c.SetWindow5hStart(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetWindow1dStart sets the "window_1d_start" field.
|
||||
func (_c *APIKeyCreate) SetWindow1dStart(v time.Time) *APIKeyCreate {
|
||||
_c.mutation.SetWindow1dStart(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableWindow1dStart sets the "window_1d_start" field if the given value is not nil.
|
||||
func (_c *APIKeyCreate) SetNillableWindow1dStart(v *time.Time) *APIKeyCreate {
|
||||
if v != nil {
|
||||
_c.SetWindow1dStart(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetWindow7dStart sets the "window_7d_start" field.
|
||||
func (_c *APIKeyCreate) SetWindow7dStart(v time.Time) *APIKeyCreate {
|
||||
_c.mutation.SetWindow7dStart(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableWindow7dStart sets the "window_7d_start" field if the given value is not nil.
|
||||
func (_c *APIKeyCreate) SetNillableWindow7dStart(v *time.Time) *APIKeyCreate {
|
||||
if v != nil {
|
||||
_c.SetWindow7dStart(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetUser sets the "user" edge to the User entity.
|
||||
func (_c *APIKeyCreate) SetUser(v *User) *APIKeyCreate {
|
||||
return _c.SetUserID(v.ID)
|
||||
@@ -269,6 +395,30 @@ func (_c *APIKeyCreate) defaults() error {
|
||||
v := apikey.DefaultQuotaUsed
|
||||
_c.mutation.SetQuotaUsed(v)
|
||||
}
|
||||
if _, ok := _c.mutation.RateLimit5h(); !ok {
|
||||
v := apikey.DefaultRateLimit5h
|
||||
_c.mutation.SetRateLimit5h(v)
|
||||
}
|
||||
if _, ok := _c.mutation.RateLimit1d(); !ok {
|
||||
v := apikey.DefaultRateLimit1d
|
||||
_c.mutation.SetRateLimit1d(v)
|
||||
}
|
||||
if _, ok := _c.mutation.RateLimit7d(); !ok {
|
||||
v := apikey.DefaultRateLimit7d
|
||||
_c.mutation.SetRateLimit7d(v)
|
||||
}
|
||||
if _, ok := _c.mutation.Usage5h(); !ok {
|
||||
v := apikey.DefaultUsage5h
|
||||
_c.mutation.SetUsage5h(v)
|
||||
}
|
||||
if _, ok := _c.mutation.Usage1d(); !ok {
|
||||
v := apikey.DefaultUsage1d
|
||||
_c.mutation.SetUsage1d(v)
|
||||
}
|
||||
if _, ok := _c.mutation.Usage7d(); !ok {
|
||||
v := apikey.DefaultUsage7d
|
||||
_c.mutation.SetUsage7d(v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -313,6 +463,24 @@ func (_c *APIKeyCreate) check() error {
|
||||
if _, ok := _c.mutation.QuotaUsed(); !ok {
|
||||
return &ValidationError{Name: "quota_used", err: errors.New(`ent: missing required field "APIKey.quota_used"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.RateLimit5h(); !ok {
|
||||
return &ValidationError{Name: "rate_limit_5h", err: errors.New(`ent: missing required field "APIKey.rate_limit_5h"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.RateLimit1d(); !ok {
|
||||
return &ValidationError{Name: "rate_limit_1d", err: errors.New(`ent: missing required field "APIKey.rate_limit_1d"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.RateLimit7d(); !ok {
|
||||
return &ValidationError{Name: "rate_limit_7d", err: errors.New(`ent: missing required field "APIKey.rate_limit_7d"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.Usage5h(); !ok {
|
||||
return &ValidationError{Name: "usage_5h", err: errors.New(`ent: missing required field "APIKey.usage_5h"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.Usage1d(); !ok {
|
||||
return &ValidationError{Name: "usage_1d", err: errors.New(`ent: missing required field "APIKey.usage_1d"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.Usage7d(); !ok {
|
||||
return &ValidationError{Name: "usage_7d", err: errors.New(`ent: missing required field "APIKey.usage_7d"`)}
|
||||
}
|
||||
if len(_c.mutation.UserIDs()) == 0 {
|
||||
return &ValidationError{Name: "user", err: errors.New(`ent: missing required edge "APIKey.user"`)}
|
||||
}
|
||||
@@ -391,6 +559,42 @@ func (_c *APIKeyCreate) createSpec() (*APIKey, *sqlgraph.CreateSpec) {
|
||||
_spec.SetField(apikey.FieldExpiresAt, field.TypeTime, value)
|
||||
_node.ExpiresAt = &value
|
||||
}
|
||||
if value, ok := _c.mutation.RateLimit5h(); ok {
|
||||
_spec.SetField(apikey.FieldRateLimit5h, field.TypeFloat64, value)
|
||||
_node.RateLimit5h = value
|
||||
}
|
||||
if value, ok := _c.mutation.RateLimit1d(); ok {
|
||||
_spec.SetField(apikey.FieldRateLimit1d, field.TypeFloat64, value)
|
||||
_node.RateLimit1d = value
|
||||
}
|
||||
if value, ok := _c.mutation.RateLimit7d(); ok {
|
||||
_spec.SetField(apikey.FieldRateLimit7d, field.TypeFloat64, value)
|
||||
_node.RateLimit7d = value
|
||||
}
|
||||
if value, ok := _c.mutation.Usage5h(); ok {
|
||||
_spec.SetField(apikey.FieldUsage5h, field.TypeFloat64, value)
|
||||
_node.Usage5h = value
|
||||
}
|
||||
if value, ok := _c.mutation.Usage1d(); ok {
|
||||
_spec.SetField(apikey.FieldUsage1d, field.TypeFloat64, value)
|
||||
_node.Usage1d = value
|
||||
}
|
||||
if value, ok := _c.mutation.Usage7d(); ok {
|
||||
_spec.SetField(apikey.FieldUsage7d, field.TypeFloat64, value)
|
||||
_node.Usage7d = value
|
||||
}
|
||||
if value, ok := _c.mutation.Window5hStart(); ok {
|
||||
_spec.SetField(apikey.FieldWindow5hStart, field.TypeTime, value)
|
||||
_node.Window5hStart = &value
|
||||
}
|
||||
if value, ok := _c.mutation.Window1dStart(); ok {
|
||||
_spec.SetField(apikey.FieldWindow1dStart, field.TypeTime, value)
|
||||
_node.Window1dStart = &value
|
||||
}
|
||||
if value, ok := _c.mutation.Window7dStart(); ok {
|
||||
_spec.SetField(apikey.FieldWindow7dStart, field.TypeTime, value)
|
||||
_node.Window7dStart = &value
|
||||
}
|
||||
if nodes := _c.mutation.UserIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
@@ -697,6 +901,168 @@ func (u *APIKeyUpsert) ClearExpiresAt() *APIKeyUpsert {
|
||||
return u
|
||||
}
|
||||
|
||||
// SetRateLimit5h sets the "rate_limit_5h" field.
|
||||
func (u *APIKeyUpsert) SetRateLimit5h(v float64) *APIKeyUpsert {
|
||||
u.Set(apikey.FieldRateLimit5h, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateRateLimit5h sets the "rate_limit_5h" field to the value that was provided on create.
|
||||
func (u *APIKeyUpsert) UpdateRateLimit5h() *APIKeyUpsert {
|
||||
u.SetExcluded(apikey.FieldRateLimit5h)
|
||||
return u
|
||||
}
|
||||
|
||||
// AddRateLimit5h adds v to the "rate_limit_5h" field.
|
||||
func (u *APIKeyUpsert) AddRateLimit5h(v float64) *APIKeyUpsert {
|
||||
u.Add(apikey.FieldRateLimit5h, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetRateLimit1d sets the "rate_limit_1d" field.
|
||||
func (u *APIKeyUpsert) SetRateLimit1d(v float64) *APIKeyUpsert {
|
||||
u.Set(apikey.FieldRateLimit1d, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateRateLimit1d sets the "rate_limit_1d" field to the value that was provided on create.
|
||||
func (u *APIKeyUpsert) UpdateRateLimit1d() *APIKeyUpsert {
|
||||
u.SetExcluded(apikey.FieldRateLimit1d)
|
||||
return u
|
||||
}
|
||||
|
||||
// AddRateLimit1d adds v to the "rate_limit_1d" field.
|
||||
func (u *APIKeyUpsert) AddRateLimit1d(v float64) *APIKeyUpsert {
|
||||
u.Add(apikey.FieldRateLimit1d, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetRateLimit7d sets the "rate_limit_7d" field.
|
||||
func (u *APIKeyUpsert) SetRateLimit7d(v float64) *APIKeyUpsert {
|
||||
u.Set(apikey.FieldRateLimit7d, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateRateLimit7d sets the "rate_limit_7d" field to the value that was provided on create.
|
||||
func (u *APIKeyUpsert) UpdateRateLimit7d() *APIKeyUpsert {
|
||||
u.SetExcluded(apikey.FieldRateLimit7d)
|
||||
return u
|
||||
}
|
||||
|
||||
// AddRateLimit7d adds v to the "rate_limit_7d" field.
|
||||
func (u *APIKeyUpsert) AddRateLimit7d(v float64) *APIKeyUpsert {
|
||||
u.Add(apikey.FieldRateLimit7d, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetUsage5h sets the "usage_5h" field.
|
||||
func (u *APIKeyUpsert) SetUsage5h(v float64) *APIKeyUpsert {
|
||||
u.Set(apikey.FieldUsage5h, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateUsage5h sets the "usage_5h" field to the value that was provided on create.
|
||||
func (u *APIKeyUpsert) UpdateUsage5h() *APIKeyUpsert {
|
||||
u.SetExcluded(apikey.FieldUsage5h)
|
||||
return u
|
||||
}
|
||||
|
||||
// AddUsage5h adds v to the "usage_5h" field.
|
||||
func (u *APIKeyUpsert) AddUsage5h(v float64) *APIKeyUpsert {
|
||||
u.Add(apikey.FieldUsage5h, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetUsage1d sets the "usage_1d" field.
|
||||
func (u *APIKeyUpsert) SetUsage1d(v float64) *APIKeyUpsert {
|
||||
u.Set(apikey.FieldUsage1d, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateUsage1d sets the "usage_1d" field to the value that was provided on create.
|
||||
func (u *APIKeyUpsert) UpdateUsage1d() *APIKeyUpsert {
|
||||
u.SetExcluded(apikey.FieldUsage1d)
|
||||
return u
|
||||
}
|
||||
|
||||
// AddUsage1d adds v to the "usage_1d" field.
|
||||
func (u *APIKeyUpsert) AddUsage1d(v float64) *APIKeyUpsert {
|
||||
u.Add(apikey.FieldUsage1d, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetUsage7d sets the "usage_7d" field.
|
||||
func (u *APIKeyUpsert) SetUsage7d(v float64) *APIKeyUpsert {
|
||||
u.Set(apikey.FieldUsage7d, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateUsage7d sets the "usage_7d" field to the value that was provided on create.
|
||||
func (u *APIKeyUpsert) UpdateUsage7d() *APIKeyUpsert {
|
||||
u.SetExcluded(apikey.FieldUsage7d)
|
||||
return u
|
||||
}
|
||||
|
||||
// AddUsage7d adds v to the "usage_7d" field.
|
||||
func (u *APIKeyUpsert) AddUsage7d(v float64) *APIKeyUpsert {
|
||||
u.Add(apikey.FieldUsage7d, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetWindow5hStart sets the "window_5h_start" field.
|
||||
func (u *APIKeyUpsert) SetWindow5hStart(v time.Time) *APIKeyUpsert {
|
||||
u.Set(apikey.FieldWindow5hStart, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateWindow5hStart sets the "window_5h_start" field to the value that was provided on create.
|
||||
func (u *APIKeyUpsert) UpdateWindow5hStart() *APIKeyUpsert {
|
||||
u.SetExcluded(apikey.FieldWindow5hStart)
|
||||
return u
|
||||
}
|
||||
|
||||
// ClearWindow5hStart clears the value of the "window_5h_start" field.
|
||||
func (u *APIKeyUpsert) ClearWindow5hStart() *APIKeyUpsert {
|
||||
u.SetNull(apikey.FieldWindow5hStart)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetWindow1dStart sets the "window_1d_start" field.
|
||||
func (u *APIKeyUpsert) SetWindow1dStart(v time.Time) *APIKeyUpsert {
|
||||
u.Set(apikey.FieldWindow1dStart, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateWindow1dStart sets the "window_1d_start" field to the value that was provided on create.
|
||||
func (u *APIKeyUpsert) UpdateWindow1dStart() *APIKeyUpsert {
|
||||
u.SetExcluded(apikey.FieldWindow1dStart)
|
||||
return u
|
||||
}
|
||||
|
||||
// ClearWindow1dStart clears the value of the "window_1d_start" field.
|
||||
func (u *APIKeyUpsert) ClearWindow1dStart() *APIKeyUpsert {
|
||||
u.SetNull(apikey.FieldWindow1dStart)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetWindow7dStart sets the "window_7d_start" field.
|
||||
func (u *APIKeyUpsert) SetWindow7dStart(v time.Time) *APIKeyUpsert {
|
||||
u.Set(apikey.FieldWindow7dStart, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateWindow7dStart sets the "window_7d_start" field to the value that was provided on create.
|
||||
func (u *APIKeyUpsert) UpdateWindow7dStart() *APIKeyUpsert {
|
||||
u.SetExcluded(apikey.FieldWindow7dStart)
|
||||
return u
|
||||
}
|
||||
|
||||
// ClearWindow7dStart clears the value of the "window_7d_start" field.
|
||||
func (u *APIKeyUpsert) ClearWindow7dStart() *APIKeyUpsert {
|
||||
u.SetNull(apikey.FieldWindow7dStart)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateNewValues updates the mutable fields using the new values that were set on create.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
@@ -980,6 +1346,195 @@ func (u *APIKeyUpsertOne) ClearExpiresAt() *APIKeyUpsertOne {
|
||||
})
|
||||
}
|
||||
|
||||
// SetRateLimit5h sets the "rate_limit_5h" field.
|
||||
func (u *APIKeyUpsertOne) SetRateLimit5h(v float64) *APIKeyUpsertOne {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.SetRateLimit5h(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddRateLimit5h adds v to the "rate_limit_5h" field.
|
||||
func (u *APIKeyUpsertOne) AddRateLimit5h(v float64) *APIKeyUpsertOne {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.AddRateLimit5h(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateRateLimit5h sets the "rate_limit_5h" field to the value that was provided on create.
|
||||
func (u *APIKeyUpsertOne) UpdateRateLimit5h() *APIKeyUpsertOne {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.UpdateRateLimit5h()
|
||||
})
|
||||
}
|
||||
|
||||
// SetRateLimit1d sets the "rate_limit_1d" field.
|
||||
func (u *APIKeyUpsertOne) SetRateLimit1d(v float64) *APIKeyUpsertOne {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.SetRateLimit1d(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddRateLimit1d adds v to the "rate_limit_1d" field.
|
||||
func (u *APIKeyUpsertOne) AddRateLimit1d(v float64) *APIKeyUpsertOne {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.AddRateLimit1d(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateRateLimit1d sets the "rate_limit_1d" field to the value that was provided on create.
|
||||
func (u *APIKeyUpsertOne) UpdateRateLimit1d() *APIKeyUpsertOne {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.UpdateRateLimit1d()
|
||||
})
|
||||
}
|
||||
|
||||
// SetRateLimit7d sets the "rate_limit_7d" field.
|
||||
func (u *APIKeyUpsertOne) SetRateLimit7d(v float64) *APIKeyUpsertOne {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.SetRateLimit7d(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddRateLimit7d adds v to the "rate_limit_7d" field.
|
||||
func (u *APIKeyUpsertOne) AddRateLimit7d(v float64) *APIKeyUpsertOne {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.AddRateLimit7d(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateRateLimit7d sets the "rate_limit_7d" field to the value that was provided on create.
|
||||
func (u *APIKeyUpsertOne) UpdateRateLimit7d() *APIKeyUpsertOne {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.UpdateRateLimit7d()
|
||||
})
|
||||
}
|
||||
|
||||
// SetUsage5h sets the "usage_5h" field.
|
||||
func (u *APIKeyUpsertOne) SetUsage5h(v float64) *APIKeyUpsertOne {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.SetUsage5h(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddUsage5h adds v to the "usage_5h" field.
|
||||
func (u *APIKeyUpsertOne) AddUsage5h(v float64) *APIKeyUpsertOne {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.AddUsage5h(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateUsage5h sets the "usage_5h" field to the value that was provided on create.
|
||||
func (u *APIKeyUpsertOne) UpdateUsage5h() *APIKeyUpsertOne {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.UpdateUsage5h()
|
||||
})
|
||||
}
|
||||
|
||||
// SetUsage1d sets the "usage_1d" field.
|
||||
func (u *APIKeyUpsertOne) SetUsage1d(v float64) *APIKeyUpsertOne {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.SetUsage1d(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddUsage1d adds v to the "usage_1d" field.
|
||||
func (u *APIKeyUpsertOne) AddUsage1d(v float64) *APIKeyUpsertOne {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.AddUsage1d(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateUsage1d sets the "usage_1d" field to the value that was provided on create.
|
||||
func (u *APIKeyUpsertOne) UpdateUsage1d() *APIKeyUpsertOne {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.UpdateUsage1d()
|
||||
})
|
||||
}
|
||||
|
||||
// SetUsage7d sets the "usage_7d" field.
|
||||
func (u *APIKeyUpsertOne) SetUsage7d(v float64) *APIKeyUpsertOne {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.SetUsage7d(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddUsage7d adds v to the "usage_7d" field.
|
||||
func (u *APIKeyUpsertOne) AddUsage7d(v float64) *APIKeyUpsertOne {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.AddUsage7d(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateUsage7d sets the "usage_7d" field to the value that was provided on create.
|
||||
func (u *APIKeyUpsertOne) UpdateUsage7d() *APIKeyUpsertOne {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.UpdateUsage7d()
|
||||
})
|
||||
}
|
||||
|
||||
// SetWindow5hStart sets the "window_5h_start" field.
|
||||
func (u *APIKeyUpsertOne) SetWindow5hStart(v time.Time) *APIKeyUpsertOne {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.SetWindow5hStart(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateWindow5hStart sets the "window_5h_start" field to the value that was provided on create.
|
||||
func (u *APIKeyUpsertOne) UpdateWindow5hStart() *APIKeyUpsertOne {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.UpdateWindow5hStart()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearWindow5hStart clears the value of the "window_5h_start" field.
|
||||
func (u *APIKeyUpsertOne) ClearWindow5hStart() *APIKeyUpsertOne {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.ClearWindow5hStart()
|
||||
})
|
||||
}
|
||||
|
||||
// SetWindow1dStart sets the "window_1d_start" field.
|
||||
func (u *APIKeyUpsertOne) SetWindow1dStart(v time.Time) *APIKeyUpsertOne {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.SetWindow1dStart(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateWindow1dStart sets the "window_1d_start" field to the value that was provided on create.
|
||||
func (u *APIKeyUpsertOne) UpdateWindow1dStart() *APIKeyUpsertOne {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.UpdateWindow1dStart()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearWindow1dStart clears the value of the "window_1d_start" field.
|
||||
func (u *APIKeyUpsertOne) ClearWindow1dStart() *APIKeyUpsertOne {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.ClearWindow1dStart()
|
||||
})
|
||||
}
|
||||
|
||||
// SetWindow7dStart sets the "window_7d_start" field.
|
||||
func (u *APIKeyUpsertOne) SetWindow7dStart(v time.Time) *APIKeyUpsertOne {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.SetWindow7dStart(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateWindow7dStart sets the "window_7d_start" field to the value that was provided on create.
|
||||
func (u *APIKeyUpsertOne) UpdateWindow7dStart() *APIKeyUpsertOne {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.UpdateWindow7dStart()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearWindow7dStart clears the value of the "window_7d_start" field.
|
||||
func (u *APIKeyUpsertOne) ClearWindow7dStart() *APIKeyUpsertOne {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.ClearWindow7dStart()
|
||||
})
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (u *APIKeyUpsertOne) Exec(ctx context.Context) error {
|
||||
if len(u.create.conflict) == 0 {
|
||||
@@ -1429,6 +1984,195 @@ func (u *APIKeyUpsertBulk) ClearExpiresAt() *APIKeyUpsertBulk {
|
||||
})
|
||||
}
|
||||
|
||||
// SetRateLimit5h sets the "rate_limit_5h" field.
|
||||
func (u *APIKeyUpsertBulk) SetRateLimit5h(v float64) *APIKeyUpsertBulk {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.SetRateLimit5h(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddRateLimit5h adds v to the "rate_limit_5h" field.
|
||||
func (u *APIKeyUpsertBulk) AddRateLimit5h(v float64) *APIKeyUpsertBulk {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.AddRateLimit5h(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateRateLimit5h sets the "rate_limit_5h" field to the value that was provided on create.
|
||||
func (u *APIKeyUpsertBulk) UpdateRateLimit5h() *APIKeyUpsertBulk {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.UpdateRateLimit5h()
|
||||
})
|
||||
}
|
||||
|
||||
// SetRateLimit1d sets the "rate_limit_1d" field.
|
||||
func (u *APIKeyUpsertBulk) SetRateLimit1d(v float64) *APIKeyUpsertBulk {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.SetRateLimit1d(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddRateLimit1d adds v to the "rate_limit_1d" field.
|
||||
func (u *APIKeyUpsertBulk) AddRateLimit1d(v float64) *APIKeyUpsertBulk {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.AddRateLimit1d(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateRateLimit1d sets the "rate_limit_1d" field to the value that was provided on create.
|
||||
func (u *APIKeyUpsertBulk) UpdateRateLimit1d() *APIKeyUpsertBulk {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.UpdateRateLimit1d()
|
||||
})
|
||||
}
|
||||
|
||||
// SetRateLimit7d sets the "rate_limit_7d" field.
|
||||
func (u *APIKeyUpsertBulk) SetRateLimit7d(v float64) *APIKeyUpsertBulk {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.SetRateLimit7d(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddRateLimit7d adds v to the "rate_limit_7d" field.
|
||||
func (u *APIKeyUpsertBulk) AddRateLimit7d(v float64) *APIKeyUpsertBulk {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.AddRateLimit7d(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateRateLimit7d sets the "rate_limit_7d" field to the value that was provided on create.
|
||||
func (u *APIKeyUpsertBulk) UpdateRateLimit7d() *APIKeyUpsertBulk {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.UpdateRateLimit7d()
|
||||
})
|
||||
}
|
||||
|
||||
// SetUsage5h sets the "usage_5h" field.
|
||||
func (u *APIKeyUpsertBulk) SetUsage5h(v float64) *APIKeyUpsertBulk {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.SetUsage5h(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddUsage5h adds v to the "usage_5h" field.
|
||||
func (u *APIKeyUpsertBulk) AddUsage5h(v float64) *APIKeyUpsertBulk {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.AddUsage5h(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateUsage5h sets the "usage_5h" field to the value that was provided on create.
|
||||
func (u *APIKeyUpsertBulk) UpdateUsage5h() *APIKeyUpsertBulk {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.UpdateUsage5h()
|
||||
})
|
||||
}
|
||||
|
||||
// SetUsage1d sets the "usage_1d" field.
|
||||
func (u *APIKeyUpsertBulk) SetUsage1d(v float64) *APIKeyUpsertBulk {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.SetUsage1d(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddUsage1d adds v to the "usage_1d" field.
|
||||
func (u *APIKeyUpsertBulk) AddUsage1d(v float64) *APIKeyUpsertBulk {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.AddUsage1d(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateUsage1d sets the "usage_1d" field to the value that was provided on create.
|
||||
func (u *APIKeyUpsertBulk) UpdateUsage1d() *APIKeyUpsertBulk {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.UpdateUsage1d()
|
||||
})
|
||||
}
|
||||
|
||||
// SetUsage7d sets the "usage_7d" field.
|
||||
func (u *APIKeyUpsertBulk) SetUsage7d(v float64) *APIKeyUpsertBulk {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.SetUsage7d(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddUsage7d adds v to the "usage_7d" field.
|
||||
func (u *APIKeyUpsertBulk) AddUsage7d(v float64) *APIKeyUpsertBulk {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.AddUsage7d(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateUsage7d sets the "usage_7d" field to the value that was provided on create.
|
||||
func (u *APIKeyUpsertBulk) UpdateUsage7d() *APIKeyUpsertBulk {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.UpdateUsage7d()
|
||||
})
|
||||
}
|
||||
|
||||
// SetWindow5hStart sets the "window_5h_start" field.
|
||||
func (u *APIKeyUpsertBulk) SetWindow5hStart(v time.Time) *APIKeyUpsertBulk {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.SetWindow5hStart(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateWindow5hStart sets the "window_5h_start" field to the value that was provided on create.
|
||||
func (u *APIKeyUpsertBulk) UpdateWindow5hStart() *APIKeyUpsertBulk {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.UpdateWindow5hStart()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearWindow5hStart clears the value of the "window_5h_start" field.
|
||||
func (u *APIKeyUpsertBulk) ClearWindow5hStart() *APIKeyUpsertBulk {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.ClearWindow5hStart()
|
||||
})
|
||||
}
|
||||
|
||||
// SetWindow1dStart sets the "window_1d_start" field.
|
||||
func (u *APIKeyUpsertBulk) SetWindow1dStart(v time.Time) *APIKeyUpsertBulk {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.SetWindow1dStart(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateWindow1dStart sets the "window_1d_start" field to the value that was provided on create.
|
||||
func (u *APIKeyUpsertBulk) UpdateWindow1dStart() *APIKeyUpsertBulk {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.UpdateWindow1dStart()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearWindow1dStart clears the value of the "window_1d_start" field.
|
||||
func (u *APIKeyUpsertBulk) ClearWindow1dStart() *APIKeyUpsertBulk {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.ClearWindow1dStart()
|
||||
})
|
||||
}
|
||||
|
||||
// SetWindow7dStart sets the "window_7d_start" field.
|
||||
func (u *APIKeyUpsertBulk) SetWindow7dStart(v time.Time) *APIKeyUpsertBulk {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.SetWindow7dStart(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateWindow7dStart sets the "window_7d_start" field to the value that was provided on create.
|
||||
func (u *APIKeyUpsertBulk) UpdateWindow7dStart() *APIKeyUpsertBulk {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.UpdateWindow7dStart()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearWindow7dStart clears the value of the "window_7d_start" field.
|
||||
func (u *APIKeyUpsertBulk) ClearWindow7dStart() *APIKeyUpsertBulk {
|
||||
return u.Update(func(s *APIKeyUpsert) {
|
||||
s.ClearWindow7dStart()
|
||||
})
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (u *APIKeyUpsertBulk) Exec(ctx context.Context) error {
|
||||
if u.create.err != nil {
|
||||
|
||||
@@ -252,6 +252,192 @@ func (_u *APIKeyUpdate) ClearExpiresAt() *APIKeyUpdate {
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetRateLimit5h sets the "rate_limit_5h" field.
|
||||
func (_u *APIKeyUpdate) SetRateLimit5h(v float64) *APIKeyUpdate {
|
||||
_u.mutation.ResetRateLimit5h()
|
||||
_u.mutation.SetRateLimit5h(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableRateLimit5h sets the "rate_limit_5h" field if the given value is not nil.
|
||||
func (_u *APIKeyUpdate) SetNillableRateLimit5h(v *float64) *APIKeyUpdate {
|
||||
if v != nil {
|
||||
_u.SetRateLimit5h(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddRateLimit5h adds value to the "rate_limit_5h" field.
|
||||
func (_u *APIKeyUpdate) AddRateLimit5h(v float64) *APIKeyUpdate {
|
||||
_u.mutation.AddRateLimit5h(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetRateLimit1d sets the "rate_limit_1d" field.
|
||||
func (_u *APIKeyUpdate) SetRateLimit1d(v float64) *APIKeyUpdate {
|
||||
_u.mutation.ResetRateLimit1d()
|
||||
_u.mutation.SetRateLimit1d(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableRateLimit1d sets the "rate_limit_1d" field if the given value is not nil.
|
||||
func (_u *APIKeyUpdate) SetNillableRateLimit1d(v *float64) *APIKeyUpdate {
|
||||
if v != nil {
|
||||
_u.SetRateLimit1d(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddRateLimit1d adds value to the "rate_limit_1d" field.
|
||||
func (_u *APIKeyUpdate) AddRateLimit1d(v float64) *APIKeyUpdate {
|
||||
_u.mutation.AddRateLimit1d(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetRateLimit7d sets the "rate_limit_7d" field.
|
||||
func (_u *APIKeyUpdate) SetRateLimit7d(v float64) *APIKeyUpdate {
|
||||
_u.mutation.ResetRateLimit7d()
|
||||
_u.mutation.SetRateLimit7d(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableRateLimit7d sets the "rate_limit_7d" field if the given value is not nil.
|
||||
func (_u *APIKeyUpdate) SetNillableRateLimit7d(v *float64) *APIKeyUpdate {
|
||||
if v != nil {
|
||||
_u.SetRateLimit7d(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddRateLimit7d adds value to the "rate_limit_7d" field.
|
||||
func (_u *APIKeyUpdate) AddRateLimit7d(v float64) *APIKeyUpdate {
|
||||
_u.mutation.AddRateLimit7d(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetUsage5h sets the "usage_5h" field.
|
||||
func (_u *APIKeyUpdate) SetUsage5h(v float64) *APIKeyUpdate {
|
||||
_u.mutation.ResetUsage5h()
|
||||
_u.mutation.SetUsage5h(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableUsage5h sets the "usage_5h" field if the given value is not nil.
|
||||
func (_u *APIKeyUpdate) SetNillableUsage5h(v *float64) *APIKeyUpdate {
|
||||
if v != nil {
|
||||
_u.SetUsage5h(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddUsage5h adds value to the "usage_5h" field.
|
||||
func (_u *APIKeyUpdate) AddUsage5h(v float64) *APIKeyUpdate {
|
||||
_u.mutation.AddUsage5h(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetUsage1d sets the "usage_1d" field.
|
||||
func (_u *APIKeyUpdate) SetUsage1d(v float64) *APIKeyUpdate {
|
||||
_u.mutation.ResetUsage1d()
|
||||
_u.mutation.SetUsage1d(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableUsage1d sets the "usage_1d" field if the given value is not nil.
|
||||
func (_u *APIKeyUpdate) SetNillableUsage1d(v *float64) *APIKeyUpdate {
|
||||
if v != nil {
|
||||
_u.SetUsage1d(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddUsage1d adds value to the "usage_1d" field.
|
||||
func (_u *APIKeyUpdate) AddUsage1d(v float64) *APIKeyUpdate {
|
||||
_u.mutation.AddUsage1d(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetUsage7d sets the "usage_7d" field.
|
||||
func (_u *APIKeyUpdate) SetUsage7d(v float64) *APIKeyUpdate {
|
||||
_u.mutation.ResetUsage7d()
|
||||
_u.mutation.SetUsage7d(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableUsage7d sets the "usage_7d" field if the given value is not nil.
|
||||
func (_u *APIKeyUpdate) SetNillableUsage7d(v *float64) *APIKeyUpdate {
|
||||
if v != nil {
|
||||
_u.SetUsage7d(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddUsage7d adds value to the "usage_7d" field.
|
||||
func (_u *APIKeyUpdate) AddUsage7d(v float64) *APIKeyUpdate {
|
||||
_u.mutation.AddUsage7d(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetWindow5hStart sets the "window_5h_start" field.
|
||||
func (_u *APIKeyUpdate) SetWindow5hStart(v time.Time) *APIKeyUpdate {
|
||||
_u.mutation.SetWindow5hStart(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableWindow5hStart sets the "window_5h_start" field if the given value is not nil.
|
||||
func (_u *APIKeyUpdate) SetNillableWindow5hStart(v *time.Time) *APIKeyUpdate {
|
||||
if v != nil {
|
||||
_u.SetWindow5hStart(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearWindow5hStart clears the value of the "window_5h_start" field.
|
||||
func (_u *APIKeyUpdate) ClearWindow5hStart() *APIKeyUpdate {
|
||||
_u.mutation.ClearWindow5hStart()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetWindow1dStart sets the "window_1d_start" field.
|
||||
func (_u *APIKeyUpdate) SetWindow1dStart(v time.Time) *APIKeyUpdate {
|
||||
_u.mutation.SetWindow1dStart(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableWindow1dStart sets the "window_1d_start" field if the given value is not nil.
|
||||
func (_u *APIKeyUpdate) SetNillableWindow1dStart(v *time.Time) *APIKeyUpdate {
|
||||
if v != nil {
|
||||
_u.SetWindow1dStart(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearWindow1dStart clears the value of the "window_1d_start" field.
|
||||
func (_u *APIKeyUpdate) ClearWindow1dStart() *APIKeyUpdate {
|
||||
_u.mutation.ClearWindow1dStart()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetWindow7dStart sets the "window_7d_start" field.
|
||||
func (_u *APIKeyUpdate) SetWindow7dStart(v time.Time) *APIKeyUpdate {
|
||||
_u.mutation.SetWindow7dStart(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableWindow7dStart sets the "window_7d_start" field if the given value is not nil.
|
||||
func (_u *APIKeyUpdate) SetNillableWindow7dStart(v *time.Time) *APIKeyUpdate {
|
||||
if v != nil {
|
||||
_u.SetWindow7dStart(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearWindow7dStart clears the value of the "window_7d_start" field.
|
||||
func (_u *APIKeyUpdate) ClearWindow7dStart() *APIKeyUpdate {
|
||||
_u.mutation.ClearWindow7dStart()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetUser sets the "user" edge to the User entity.
|
||||
func (_u *APIKeyUpdate) SetUser(v *User) *APIKeyUpdate {
|
||||
return _u.SetUserID(v.ID)
|
||||
@@ -456,6 +642,60 @@ func (_u *APIKeyUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
||||
if _u.mutation.ExpiresAtCleared() {
|
||||
_spec.ClearField(apikey.FieldExpiresAt, field.TypeTime)
|
||||
}
|
||||
if value, ok := _u.mutation.RateLimit5h(); ok {
|
||||
_spec.SetField(apikey.FieldRateLimit5h, field.TypeFloat64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedRateLimit5h(); ok {
|
||||
_spec.AddField(apikey.FieldRateLimit5h, field.TypeFloat64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.RateLimit1d(); ok {
|
||||
_spec.SetField(apikey.FieldRateLimit1d, field.TypeFloat64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedRateLimit1d(); ok {
|
||||
_spec.AddField(apikey.FieldRateLimit1d, field.TypeFloat64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.RateLimit7d(); ok {
|
||||
_spec.SetField(apikey.FieldRateLimit7d, field.TypeFloat64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedRateLimit7d(); ok {
|
||||
_spec.AddField(apikey.FieldRateLimit7d, field.TypeFloat64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Usage5h(); ok {
|
||||
_spec.SetField(apikey.FieldUsage5h, field.TypeFloat64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedUsage5h(); ok {
|
||||
_spec.AddField(apikey.FieldUsage5h, field.TypeFloat64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Usage1d(); ok {
|
||||
_spec.SetField(apikey.FieldUsage1d, field.TypeFloat64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedUsage1d(); ok {
|
||||
_spec.AddField(apikey.FieldUsage1d, field.TypeFloat64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Usage7d(); ok {
|
||||
_spec.SetField(apikey.FieldUsage7d, field.TypeFloat64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedUsage7d(); ok {
|
||||
_spec.AddField(apikey.FieldUsage7d, field.TypeFloat64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Window5hStart(); ok {
|
||||
_spec.SetField(apikey.FieldWindow5hStart, field.TypeTime, value)
|
||||
}
|
||||
if _u.mutation.Window5hStartCleared() {
|
||||
_spec.ClearField(apikey.FieldWindow5hStart, field.TypeTime)
|
||||
}
|
||||
if value, ok := _u.mutation.Window1dStart(); ok {
|
||||
_spec.SetField(apikey.FieldWindow1dStart, field.TypeTime, value)
|
||||
}
|
||||
if _u.mutation.Window1dStartCleared() {
|
||||
_spec.ClearField(apikey.FieldWindow1dStart, field.TypeTime)
|
||||
}
|
||||
if value, ok := _u.mutation.Window7dStart(); ok {
|
||||
_spec.SetField(apikey.FieldWindow7dStart, field.TypeTime, value)
|
||||
}
|
||||
if _u.mutation.Window7dStartCleared() {
|
||||
_spec.ClearField(apikey.FieldWindow7dStart, field.TypeTime)
|
||||
}
|
||||
if _u.mutation.UserCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
@@ -799,6 +1039,192 @@ func (_u *APIKeyUpdateOne) ClearExpiresAt() *APIKeyUpdateOne {
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetRateLimit5h sets the "rate_limit_5h" field.
|
||||
func (_u *APIKeyUpdateOne) SetRateLimit5h(v float64) *APIKeyUpdateOne {
|
||||
_u.mutation.ResetRateLimit5h()
|
||||
_u.mutation.SetRateLimit5h(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableRateLimit5h sets the "rate_limit_5h" field if the given value is not nil.
|
||||
func (_u *APIKeyUpdateOne) SetNillableRateLimit5h(v *float64) *APIKeyUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetRateLimit5h(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddRateLimit5h adds value to the "rate_limit_5h" field.
|
||||
func (_u *APIKeyUpdateOne) AddRateLimit5h(v float64) *APIKeyUpdateOne {
|
||||
_u.mutation.AddRateLimit5h(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetRateLimit1d sets the "rate_limit_1d" field.
|
||||
func (_u *APIKeyUpdateOne) SetRateLimit1d(v float64) *APIKeyUpdateOne {
|
||||
_u.mutation.ResetRateLimit1d()
|
||||
_u.mutation.SetRateLimit1d(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableRateLimit1d sets the "rate_limit_1d" field if the given value is not nil.
|
||||
func (_u *APIKeyUpdateOne) SetNillableRateLimit1d(v *float64) *APIKeyUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetRateLimit1d(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddRateLimit1d adds value to the "rate_limit_1d" field.
|
||||
func (_u *APIKeyUpdateOne) AddRateLimit1d(v float64) *APIKeyUpdateOne {
|
||||
_u.mutation.AddRateLimit1d(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetRateLimit7d sets the "rate_limit_7d" field.
|
||||
func (_u *APIKeyUpdateOne) SetRateLimit7d(v float64) *APIKeyUpdateOne {
|
||||
_u.mutation.ResetRateLimit7d()
|
||||
_u.mutation.SetRateLimit7d(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableRateLimit7d sets the "rate_limit_7d" field if the given value is not nil.
|
||||
func (_u *APIKeyUpdateOne) SetNillableRateLimit7d(v *float64) *APIKeyUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetRateLimit7d(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddRateLimit7d adds value to the "rate_limit_7d" field.
|
||||
func (_u *APIKeyUpdateOne) AddRateLimit7d(v float64) *APIKeyUpdateOne {
|
||||
_u.mutation.AddRateLimit7d(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetUsage5h sets the "usage_5h" field.
|
||||
func (_u *APIKeyUpdateOne) SetUsage5h(v float64) *APIKeyUpdateOne {
|
||||
_u.mutation.ResetUsage5h()
|
||||
_u.mutation.SetUsage5h(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableUsage5h sets the "usage_5h" field if the given value is not nil.
|
||||
func (_u *APIKeyUpdateOne) SetNillableUsage5h(v *float64) *APIKeyUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetUsage5h(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddUsage5h adds value to the "usage_5h" field.
|
||||
func (_u *APIKeyUpdateOne) AddUsage5h(v float64) *APIKeyUpdateOne {
|
||||
_u.mutation.AddUsage5h(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetUsage1d sets the "usage_1d" field.
|
||||
func (_u *APIKeyUpdateOne) SetUsage1d(v float64) *APIKeyUpdateOne {
|
||||
_u.mutation.ResetUsage1d()
|
||||
_u.mutation.SetUsage1d(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableUsage1d sets the "usage_1d" field if the given value is not nil.
|
||||
func (_u *APIKeyUpdateOne) SetNillableUsage1d(v *float64) *APIKeyUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetUsage1d(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddUsage1d adds value to the "usage_1d" field.
|
||||
func (_u *APIKeyUpdateOne) AddUsage1d(v float64) *APIKeyUpdateOne {
|
||||
_u.mutation.AddUsage1d(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetUsage7d sets the "usage_7d" field.
|
||||
func (_u *APIKeyUpdateOne) SetUsage7d(v float64) *APIKeyUpdateOne {
|
||||
_u.mutation.ResetUsage7d()
|
||||
_u.mutation.SetUsage7d(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableUsage7d sets the "usage_7d" field if the given value is not nil.
|
||||
func (_u *APIKeyUpdateOne) SetNillableUsage7d(v *float64) *APIKeyUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetUsage7d(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddUsage7d adds value to the "usage_7d" field.
|
||||
func (_u *APIKeyUpdateOne) AddUsage7d(v float64) *APIKeyUpdateOne {
|
||||
_u.mutation.AddUsage7d(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetWindow5hStart sets the "window_5h_start" field.
|
||||
func (_u *APIKeyUpdateOne) SetWindow5hStart(v time.Time) *APIKeyUpdateOne {
|
||||
_u.mutation.SetWindow5hStart(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableWindow5hStart sets the "window_5h_start" field if the given value is not nil.
|
||||
func (_u *APIKeyUpdateOne) SetNillableWindow5hStart(v *time.Time) *APIKeyUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetWindow5hStart(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearWindow5hStart clears the value of the "window_5h_start" field.
|
||||
func (_u *APIKeyUpdateOne) ClearWindow5hStart() *APIKeyUpdateOne {
|
||||
_u.mutation.ClearWindow5hStart()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetWindow1dStart sets the "window_1d_start" field.
|
||||
func (_u *APIKeyUpdateOne) SetWindow1dStart(v time.Time) *APIKeyUpdateOne {
|
||||
_u.mutation.SetWindow1dStart(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableWindow1dStart sets the "window_1d_start" field if the given value is not nil.
|
||||
func (_u *APIKeyUpdateOne) SetNillableWindow1dStart(v *time.Time) *APIKeyUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetWindow1dStart(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearWindow1dStart clears the value of the "window_1d_start" field.
|
||||
func (_u *APIKeyUpdateOne) ClearWindow1dStart() *APIKeyUpdateOne {
|
||||
_u.mutation.ClearWindow1dStart()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetWindow7dStart sets the "window_7d_start" field.
|
||||
func (_u *APIKeyUpdateOne) SetWindow7dStart(v time.Time) *APIKeyUpdateOne {
|
||||
_u.mutation.SetWindow7dStart(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableWindow7dStart sets the "window_7d_start" field if the given value is not nil.
|
||||
func (_u *APIKeyUpdateOne) SetNillableWindow7dStart(v *time.Time) *APIKeyUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetWindow7dStart(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearWindow7dStart clears the value of the "window_7d_start" field.
|
||||
func (_u *APIKeyUpdateOne) ClearWindow7dStart() *APIKeyUpdateOne {
|
||||
_u.mutation.ClearWindow7dStart()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetUser sets the "user" edge to the User entity.
|
||||
func (_u *APIKeyUpdateOne) SetUser(v *User) *APIKeyUpdateOne {
|
||||
return _u.SetUserID(v.ID)
|
||||
@@ -1033,6 +1459,60 @@ func (_u *APIKeyUpdateOne) sqlSave(ctx context.Context) (_node *APIKey, err erro
|
||||
if _u.mutation.ExpiresAtCleared() {
|
||||
_spec.ClearField(apikey.FieldExpiresAt, field.TypeTime)
|
||||
}
|
||||
if value, ok := _u.mutation.RateLimit5h(); ok {
|
||||
_spec.SetField(apikey.FieldRateLimit5h, field.TypeFloat64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedRateLimit5h(); ok {
|
||||
_spec.AddField(apikey.FieldRateLimit5h, field.TypeFloat64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.RateLimit1d(); ok {
|
||||
_spec.SetField(apikey.FieldRateLimit1d, field.TypeFloat64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedRateLimit1d(); ok {
|
||||
_spec.AddField(apikey.FieldRateLimit1d, field.TypeFloat64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.RateLimit7d(); ok {
|
||||
_spec.SetField(apikey.FieldRateLimit7d, field.TypeFloat64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedRateLimit7d(); ok {
|
||||
_spec.AddField(apikey.FieldRateLimit7d, field.TypeFloat64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Usage5h(); ok {
|
||||
_spec.SetField(apikey.FieldUsage5h, field.TypeFloat64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedUsage5h(); ok {
|
||||
_spec.AddField(apikey.FieldUsage5h, field.TypeFloat64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Usage1d(); ok {
|
||||
_spec.SetField(apikey.FieldUsage1d, field.TypeFloat64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedUsage1d(); ok {
|
||||
_spec.AddField(apikey.FieldUsage1d, field.TypeFloat64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Usage7d(); ok {
|
||||
_spec.SetField(apikey.FieldUsage7d, field.TypeFloat64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedUsage7d(); ok {
|
||||
_spec.AddField(apikey.FieldUsage7d, field.TypeFloat64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Window5hStart(); ok {
|
||||
_spec.SetField(apikey.FieldWindow5hStart, field.TypeTime, value)
|
||||
}
|
||||
if _u.mutation.Window5hStartCleared() {
|
||||
_spec.ClearField(apikey.FieldWindow5hStart, field.TypeTime)
|
||||
}
|
||||
if value, ok := _u.mutation.Window1dStart(); ok {
|
||||
_spec.SetField(apikey.FieldWindow1dStart, field.TypeTime, value)
|
||||
}
|
||||
if _u.mutation.Window1dStartCleared() {
|
||||
_spec.ClearField(apikey.FieldWindow1dStart, field.TypeTime)
|
||||
}
|
||||
if value, ok := _u.mutation.Window7dStart(); ok {
|
||||
_spec.SetField(apikey.FieldWindow7dStart, field.TypeTime, value)
|
||||
}
|
||||
if _u.mutation.Window7dStartCleared() {
|
||||
_spec.ClearField(apikey.FieldWindow7dStart, field.TypeTime)
|
||||
}
|
||||
if _u.mutation.UserCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
|
||||
@@ -60,6 +60,8 @@ type Group struct {
|
||||
SoraVideoPricePerRequest *float64 `json:"sora_video_price_per_request,omitempty"`
|
||||
// SoraVideoPricePerRequestHd holds the value of the "sora_video_price_per_request_hd" field.
|
||||
SoraVideoPricePerRequestHd *float64 `json:"sora_video_price_per_request_hd,omitempty"`
|
||||
// SoraStorageQuotaBytes holds the value of the "sora_storage_quota_bytes" field.
|
||||
SoraStorageQuotaBytes int64 `json:"sora_storage_quota_bytes,omitempty"`
|
||||
// allow Claude Code client only
|
||||
ClaudeCodeOnly bool `json:"claude_code_only,omitempty"`
|
||||
// fallback group for non-Claude-Code requests
|
||||
@@ -190,7 +192,7 @@ func (*Group) scanValues(columns []string) ([]any, error) {
|
||||
values[i] = new(sql.NullBool)
|
||||
case group.FieldRateMultiplier, group.FieldDailyLimitUsd, group.FieldWeeklyLimitUsd, group.FieldMonthlyLimitUsd, group.FieldImagePrice1k, group.FieldImagePrice2k, group.FieldImagePrice4k, group.FieldSoraImagePrice360, group.FieldSoraImagePrice540, group.FieldSoraVideoPricePerRequest, group.FieldSoraVideoPricePerRequestHd:
|
||||
values[i] = new(sql.NullFloat64)
|
||||
case group.FieldID, group.FieldDefaultValidityDays, group.FieldFallbackGroupID, group.FieldFallbackGroupIDOnInvalidRequest, group.FieldSortOrder:
|
||||
case group.FieldID, group.FieldDefaultValidityDays, group.FieldSoraStorageQuotaBytes, group.FieldFallbackGroupID, group.FieldFallbackGroupIDOnInvalidRequest, group.FieldSortOrder:
|
||||
values[i] = new(sql.NullInt64)
|
||||
case group.FieldName, group.FieldDescription, group.FieldStatus, group.FieldPlatform, group.FieldSubscriptionType:
|
||||
values[i] = new(sql.NullString)
|
||||
@@ -355,6 +357,12 @@ func (_m *Group) assignValues(columns []string, values []any) error {
|
||||
_m.SoraVideoPricePerRequestHd = new(float64)
|
||||
*_m.SoraVideoPricePerRequestHd = value.Float64
|
||||
}
|
||||
case group.FieldSoraStorageQuotaBytes:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field sora_storage_quota_bytes", values[i])
|
||||
} else if value.Valid {
|
||||
_m.SoraStorageQuotaBytes = value.Int64
|
||||
}
|
||||
case group.FieldClaudeCodeOnly:
|
||||
if value, ok := values[i].(*sql.NullBool); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field claude_code_only", values[i])
|
||||
@@ -578,6 +586,9 @@ func (_m *Group) String() string {
|
||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("sora_storage_quota_bytes=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.SoraStorageQuotaBytes))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("claude_code_only=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.ClaudeCodeOnly))
|
||||
builder.WriteString(", ")
|
||||
|
||||
@@ -57,6 +57,8 @@ const (
|
||||
FieldSoraVideoPricePerRequest = "sora_video_price_per_request"
|
||||
// FieldSoraVideoPricePerRequestHd holds the string denoting the sora_video_price_per_request_hd field in the database.
|
||||
FieldSoraVideoPricePerRequestHd = "sora_video_price_per_request_hd"
|
||||
// FieldSoraStorageQuotaBytes holds the string denoting the sora_storage_quota_bytes field in the database.
|
||||
FieldSoraStorageQuotaBytes = "sora_storage_quota_bytes"
|
||||
// FieldClaudeCodeOnly holds the string denoting the claude_code_only field in the database.
|
||||
FieldClaudeCodeOnly = "claude_code_only"
|
||||
// FieldFallbackGroupID holds the string denoting the fallback_group_id field in the database.
|
||||
@@ -171,6 +173,7 @@ var Columns = []string{
|
||||
FieldSoraImagePrice540,
|
||||
FieldSoraVideoPricePerRequest,
|
||||
FieldSoraVideoPricePerRequestHd,
|
||||
FieldSoraStorageQuotaBytes,
|
||||
FieldClaudeCodeOnly,
|
||||
FieldFallbackGroupID,
|
||||
FieldFallbackGroupIDOnInvalidRequest,
|
||||
@@ -235,6 +238,8 @@ var (
|
||||
SubscriptionTypeValidator func(string) error
|
||||
// DefaultDefaultValidityDays holds the default value on creation for the "default_validity_days" field.
|
||||
DefaultDefaultValidityDays int
|
||||
// DefaultSoraStorageQuotaBytes holds the default value on creation for the "sora_storage_quota_bytes" field.
|
||||
DefaultSoraStorageQuotaBytes int64
|
||||
// DefaultClaudeCodeOnly holds the default value on creation for the "claude_code_only" field.
|
||||
DefaultClaudeCodeOnly bool
|
||||
// DefaultModelRoutingEnabled holds the default value on creation for the "model_routing_enabled" field.
|
||||
@@ -362,6 +367,11 @@ func BySoraVideoPricePerRequestHd(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSoraVideoPricePerRequestHd, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// BySoraStorageQuotaBytes orders the results by the sora_storage_quota_bytes field.
|
||||
func BySoraStorageQuotaBytes(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSoraStorageQuotaBytes, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByClaudeCodeOnly orders the results by the claude_code_only field.
|
||||
func ByClaudeCodeOnly(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldClaudeCodeOnly, opts...).ToFunc()
|
||||
|
||||
@@ -160,6 +160,11 @@ func SoraVideoPricePerRequestHd(v float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldEQ(FieldSoraVideoPricePerRequestHd, v))
|
||||
}
|
||||
|
||||
// SoraStorageQuotaBytes applies equality check predicate on the "sora_storage_quota_bytes" field. It's identical to SoraStorageQuotaBytesEQ.
|
||||
func SoraStorageQuotaBytes(v int64) predicate.Group {
|
||||
return predicate.Group(sql.FieldEQ(FieldSoraStorageQuotaBytes, v))
|
||||
}
|
||||
|
||||
// ClaudeCodeOnly applies equality check predicate on the "claude_code_only" field. It's identical to ClaudeCodeOnlyEQ.
|
||||
func ClaudeCodeOnly(v bool) predicate.Group {
|
||||
return predicate.Group(sql.FieldEQ(FieldClaudeCodeOnly, v))
|
||||
@@ -1250,6 +1255,46 @@ func SoraVideoPricePerRequestHdNotNil() predicate.Group {
|
||||
return predicate.Group(sql.FieldNotNull(FieldSoraVideoPricePerRequestHd))
|
||||
}
|
||||
|
||||
// SoraStorageQuotaBytesEQ applies the EQ predicate on the "sora_storage_quota_bytes" field.
|
||||
func SoraStorageQuotaBytesEQ(v int64) predicate.Group {
|
||||
return predicate.Group(sql.FieldEQ(FieldSoraStorageQuotaBytes, v))
|
||||
}
|
||||
|
||||
// SoraStorageQuotaBytesNEQ applies the NEQ predicate on the "sora_storage_quota_bytes" field.
|
||||
func SoraStorageQuotaBytesNEQ(v int64) predicate.Group {
|
||||
return predicate.Group(sql.FieldNEQ(FieldSoraStorageQuotaBytes, v))
|
||||
}
|
||||
|
||||
// SoraStorageQuotaBytesIn applies the In predicate on the "sora_storage_quota_bytes" field.
|
||||
func SoraStorageQuotaBytesIn(vs ...int64) predicate.Group {
|
||||
return predicate.Group(sql.FieldIn(FieldSoraStorageQuotaBytes, vs...))
|
||||
}
|
||||
|
||||
// SoraStorageQuotaBytesNotIn applies the NotIn predicate on the "sora_storage_quota_bytes" field.
|
||||
func SoraStorageQuotaBytesNotIn(vs ...int64) predicate.Group {
|
||||
return predicate.Group(sql.FieldNotIn(FieldSoraStorageQuotaBytes, vs...))
|
||||
}
|
||||
|
||||
// SoraStorageQuotaBytesGT applies the GT predicate on the "sora_storage_quota_bytes" field.
|
||||
func SoraStorageQuotaBytesGT(v int64) predicate.Group {
|
||||
return predicate.Group(sql.FieldGT(FieldSoraStorageQuotaBytes, v))
|
||||
}
|
||||
|
||||
// SoraStorageQuotaBytesGTE applies the GTE predicate on the "sora_storage_quota_bytes" field.
|
||||
func SoraStorageQuotaBytesGTE(v int64) predicate.Group {
|
||||
return predicate.Group(sql.FieldGTE(FieldSoraStorageQuotaBytes, v))
|
||||
}
|
||||
|
||||
// SoraStorageQuotaBytesLT applies the LT predicate on the "sora_storage_quota_bytes" field.
|
||||
func SoraStorageQuotaBytesLT(v int64) predicate.Group {
|
||||
return predicate.Group(sql.FieldLT(FieldSoraStorageQuotaBytes, v))
|
||||
}
|
||||
|
||||
// SoraStorageQuotaBytesLTE applies the LTE predicate on the "sora_storage_quota_bytes" field.
|
||||
func SoraStorageQuotaBytesLTE(v int64) predicate.Group {
|
||||
return predicate.Group(sql.FieldLTE(FieldSoraStorageQuotaBytes, v))
|
||||
}
|
||||
|
||||
// ClaudeCodeOnlyEQ applies the EQ predicate on the "claude_code_only" field.
|
||||
func ClaudeCodeOnlyEQ(v bool) predicate.Group {
|
||||
return predicate.Group(sql.FieldEQ(FieldClaudeCodeOnly, v))
|
||||
|
||||
@@ -314,6 +314,20 @@ func (_c *GroupCreate) SetNillableSoraVideoPricePerRequestHd(v *float64) *GroupC
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field.
|
||||
func (_c *GroupCreate) SetSoraStorageQuotaBytes(v int64) *GroupCreate {
|
||||
_c.mutation.SetSoraStorageQuotaBytes(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field if the given value is not nil.
|
||||
func (_c *GroupCreate) SetNillableSoraStorageQuotaBytes(v *int64) *GroupCreate {
|
||||
if v != nil {
|
||||
_c.SetSoraStorageQuotaBytes(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetClaudeCodeOnly sets the "claude_code_only" field.
|
||||
func (_c *GroupCreate) SetClaudeCodeOnly(v bool) *GroupCreate {
|
||||
_c.mutation.SetClaudeCodeOnly(v)
|
||||
@@ -589,6 +603,10 @@ func (_c *GroupCreate) defaults() error {
|
||||
v := group.DefaultDefaultValidityDays
|
||||
_c.mutation.SetDefaultValidityDays(v)
|
||||
}
|
||||
if _, ok := _c.mutation.SoraStorageQuotaBytes(); !ok {
|
||||
v := group.DefaultSoraStorageQuotaBytes
|
||||
_c.mutation.SetSoraStorageQuotaBytes(v)
|
||||
}
|
||||
if _, ok := _c.mutation.ClaudeCodeOnly(); !ok {
|
||||
v := group.DefaultClaudeCodeOnly
|
||||
_c.mutation.SetClaudeCodeOnly(v)
|
||||
@@ -665,6 +683,9 @@ func (_c *GroupCreate) check() error {
|
||||
if _, ok := _c.mutation.DefaultValidityDays(); !ok {
|
||||
return &ValidationError{Name: "default_validity_days", err: errors.New(`ent: missing required field "Group.default_validity_days"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.SoraStorageQuotaBytes(); !ok {
|
||||
return &ValidationError{Name: "sora_storage_quota_bytes", err: errors.New(`ent: missing required field "Group.sora_storage_quota_bytes"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.ClaudeCodeOnly(); !ok {
|
||||
return &ValidationError{Name: "claude_code_only", err: errors.New(`ent: missing required field "Group.claude_code_only"`)}
|
||||
}
|
||||
@@ -794,6 +815,10 @@ func (_c *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) {
|
||||
_spec.SetField(group.FieldSoraVideoPricePerRequestHd, field.TypeFloat64, value)
|
||||
_node.SoraVideoPricePerRequestHd = &value
|
||||
}
|
||||
if value, ok := _c.mutation.SoraStorageQuotaBytes(); ok {
|
||||
_spec.SetField(group.FieldSoraStorageQuotaBytes, field.TypeInt64, value)
|
||||
_node.SoraStorageQuotaBytes = value
|
||||
}
|
||||
if value, ok := _c.mutation.ClaudeCodeOnly(); ok {
|
||||
_spec.SetField(group.FieldClaudeCodeOnly, field.TypeBool, value)
|
||||
_node.ClaudeCodeOnly = value
|
||||
@@ -1370,6 +1395,24 @@ func (u *GroupUpsert) ClearSoraVideoPricePerRequestHd() *GroupUpsert {
|
||||
return u
|
||||
}
|
||||
|
||||
// SetSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field.
|
||||
func (u *GroupUpsert) SetSoraStorageQuotaBytes(v int64) *GroupUpsert {
|
||||
u.Set(group.FieldSoraStorageQuotaBytes, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field to the value that was provided on create.
|
||||
func (u *GroupUpsert) UpdateSoraStorageQuotaBytes() *GroupUpsert {
|
||||
u.SetExcluded(group.FieldSoraStorageQuotaBytes)
|
||||
return u
|
||||
}
|
||||
|
||||
// AddSoraStorageQuotaBytes adds v to the "sora_storage_quota_bytes" field.
|
||||
func (u *GroupUpsert) AddSoraStorageQuotaBytes(v int64) *GroupUpsert {
|
||||
u.Add(group.FieldSoraStorageQuotaBytes, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetClaudeCodeOnly sets the "claude_code_only" field.
|
||||
func (u *GroupUpsert) SetClaudeCodeOnly(v bool) *GroupUpsert {
|
||||
u.Set(group.FieldClaudeCodeOnly, v)
|
||||
@@ -2007,6 +2050,27 @@ func (u *GroupUpsertOne) ClearSoraVideoPricePerRequestHd() *GroupUpsertOne {
|
||||
})
|
||||
}
|
||||
|
||||
// SetSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field.
|
||||
func (u *GroupUpsertOne) SetSoraStorageQuotaBytes(v int64) *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.SetSoraStorageQuotaBytes(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddSoraStorageQuotaBytes adds v to the "sora_storage_quota_bytes" field.
|
||||
func (u *GroupUpsertOne) AddSoraStorageQuotaBytes(v int64) *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.AddSoraStorageQuotaBytes(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field to the value that was provided on create.
|
||||
func (u *GroupUpsertOne) UpdateSoraStorageQuotaBytes() *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.UpdateSoraStorageQuotaBytes()
|
||||
})
|
||||
}
|
||||
|
||||
// SetClaudeCodeOnly sets the "claude_code_only" field.
|
||||
func (u *GroupUpsertOne) SetClaudeCodeOnly(v bool) *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
@@ -2834,6 +2898,27 @@ func (u *GroupUpsertBulk) ClearSoraVideoPricePerRequestHd() *GroupUpsertBulk {
|
||||
})
|
||||
}
|
||||
|
||||
// SetSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field.
|
||||
func (u *GroupUpsertBulk) SetSoraStorageQuotaBytes(v int64) *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.SetSoraStorageQuotaBytes(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddSoraStorageQuotaBytes adds v to the "sora_storage_quota_bytes" field.
|
||||
func (u *GroupUpsertBulk) AddSoraStorageQuotaBytes(v int64) *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.AddSoraStorageQuotaBytes(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field to the value that was provided on create.
|
||||
func (u *GroupUpsertBulk) UpdateSoraStorageQuotaBytes() *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.UpdateSoraStorageQuotaBytes()
|
||||
})
|
||||
}
|
||||
|
||||
// SetClaudeCodeOnly sets the "claude_code_only" field.
|
||||
func (u *GroupUpsertBulk) SetClaudeCodeOnly(v bool) *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
|
||||
@@ -463,6 +463,27 @@ func (_u *GroupUpdate) ClearSoraVideoPricePerRequestHd() *GroupUpdate {
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field.
|
||||
func (_u *GroupUpdate) SetSoraStorageQuotaBytes(v int64) *GroupUpdate {
|
||||
_u.mutation.ResetSoraStorageQuotaBytes()
|
||||
_u.mutation.SetSoraStorageQuotaBytes(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field if the given value is not nil.
|
||||
func (_u *GroupUpdate) SetNillableSoraStorageQuotaBytes(v *int64) *GroupUpdate {
|
||||
if v != nil {
|
||||
_u.SetSoraStorageQuotaBytes(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddSoraStorageQuotaBytes adds value to the "sora_storage_quota_bytes" field.
|
||||
func (_u *GroupUpdate) AddSoraStorageQuotaBytes(v int64) *GroupUpdate {
|
||||
_u.mutation.AddSoraStorageQuotaBytes(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetClaudeCodeOnly sets the "claude_code_only" field.
|
||||
func (_u *GroupUpdate) SetClaudeCodeOnly(v bool) *GroupUpdate {
|
||||
_u.mutation.SetClaudeCodeOnly(v)
|
||||
@@ -1050,6 +1071,12 @@ func (_u *GroupUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
||||
if _u.mutation.SoraVideoPricePerRequestHdCleared() {
|
||||
_spec.ClearField(group.FieldSoraVideoPricePerRequestHd, field.TypeFloat64)
|
||||
}
|
||||
if value, ok := _u.mutation.SoraStorageQuotaBytes(); ok {
|
||||
_spec.SetField(group.FieldSoraStorageQuotaBytes, field.TypeInt64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedSoraStorageQuotaBytes(); ok {
|
||||
_spec.AddField(group.FieldSoraStorageQuotaBytes, field.TypeInt64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.ClaudeCodeOnly(); ok {
|
||||
_spec.SetField(group.FieldClaudeCodeOnly, field.TypeBool, value)
|
||||
}
|
||||
@@ -1842,6 +1869,27 @@ func (_u *GroupUpdateOne) ClearSoraVideoPricePerRequestHd() *GroupUpdateOne {
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field.
|
||||
func (_u *GroupUpdateOne) SetSoraStorageQuotaBytes(v int64) *GroupUpdateOne {
|
||||
_u.mutation.ResetSoraStorageQuotaBytes()
|
||||
_u.mutation.SetSoraStorageQuotaBytes(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field if the given value is not nil.
|
||||
func (_u *GroupUpdateOne) SetNillableSoraStorageQuotaBytes(v *int64) *GroupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetSoraStorageQuotaBytes(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddSoraStorageQuotaBytes adds value to the "sora_storage_quota_bytes" field.
|
||||
func (_u *GroupUpdateOne) AddSoraStorageQuotaBytes(v int64) *GroupUpdateOne {
|
||||
_u.mutation.AddSoraStorageQuotaBytes(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetClaudeCodeOnly sets the "claude_code_only" field.
|
||||
func (_u *GroupUpdateOne) SetClaudeCodeOnly(v bool) *GroupUpdateOne {
|
||||
_u.mutation.SetClaudeCodeOnly(v)
|
||||
@@ -2459,6 +2507,12 @@ func (_u *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error)
|
||||
if _u.mutation.SoraVideoPricePerRequestHdCleared() {
|
||||
_spec.ClearField(group.FieldSoraVideoPricePerRequestHd, field.TypeFloat64)
|
||||
}
|
||||
if value, ok := _u.mutation.SoraStorageQuotaBytes(); ok {
|
||||
_spec.SetField(group.FieldSoraStorageQuotaBytes, field.TypeInt64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedSoraStorageQuotaBytes(); ok {
|
||||
_spec.AddField(group.FieldSoraStorageQuotaBytes, field.TypeInt64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.ClaudeCodeOnly(); ok {
|
||||
_spec.SetField(group.FieldClaudeCodeOnly, field.TypeBool, value)
|
||||
}
|
||||
|
||||
@@ -24,6 +24,15 @@ var (
|
||||
{Name: "quota", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||
{Name: "quota_used", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||
{Name: "expires_at", Type: field.TypeTime, Nullable: true},
|
||||
{Name: "rate_limit_5h", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||
{Name: "rate_limit_1d", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||
{Name: "rate_limit_7d", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||
{Name: "usage_5h", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||
{Name: "usage_1d", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||
{Name: "usage_7d", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||
{Name: "window_5h_start", Type: field.TypeTime, Nullable: true},
|
||||
{Name: "window_1d_start", Type: field.TypeTime, Nullable: true},
|
||||
{Name: "window_7d_start", Type: field.TypeTime, Nullable: true},
|
||||
{Name: "group_id", Type: field.TypeInt64, Nullable: true},
|
||||
{Name: "user_id", Type: field.TypeInt64},
|
||||
}
|
||||
@@ -35,13 +44,13 @@ var (
|
||||
ForeignKeys: []*schema.ForeignKey{
|
||||
{
|
||||
Symbol: "api_keys_groups_api_keys",
|
||||
Columns: []*schema.Column{APIKeysColumns[13]},
|
||||
Columns: []*schema.Column{APIKeysColumns[22]},
|
||||
RefColumns: []*schema.Column{GroupsColumns[0]},
|
||||
OnDelete: schema.SetNull,
|
||||
},
|
||||
{
|
||||
Symbol: "api_keys_users_api_keys",
|
||||
Columns: []*schema.Column{APIKeysColumns[14]},
|
||||
Columns: []*schema.Column{APIKeysColumns[23]},
|
||||
RefColumns: []*schema.Column{UsersColumns[0]},
|
||||
OnDelete: schema.NoAction,
|
||||
},
|
||||
@@ -50,12 +59,12 @@ var (
|
||||
{
|
||||
Name: "apikey_user_id",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{APIKeysColumns[14]},
|
||||
Columns: []*schema.Column{APIKeysColumns[23]},
|
||||
},
|
||||
{
|
||||
Name: "apikey_group_id",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{APIKeysColumns[13]},
|
||||
Columns: []*schema.Column{APIKeysColumns[22]},
|
||||
},
|
||||
{
|
||||
Name: "apikey_status",
|
||||
@@ -108,6 +117,8 @@ var (
|
||||
{Name: "rate_limited_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "rate_limit_reset_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "overload_until", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "temp_unschedulable_until", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "temp_unschedulable_reason", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}},
|
||||
{Name: "session_window_start", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "session_window_end", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "session_window_status", Type: field.TypeString, Nullable: true, Size: 20},
|
||||
@@ -121,7 +132,7 @@ var (
|
||||
ForeignKeys: []*schema.ForeignKey{
|
||||
{
|
||||
Symbol: "accounts_proxies_proxy",
|
||||
Columns: []*schema.Column{AccountsColumns[25]},
|
||||
Columns: []*schema.Column{AccountsColumns[27]},
|
||||
RefColumns: []*schema.Column{ProxiesColumns[0]},
|
||||
OnDelete: schema.SetNull,
|
||||
},
|
||||
@@ -145,7 +156,7 @@ var (
|
||||
{
|
||||
Name: "account_proxy_id",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{AccountsColumns[25]},
|
||||
Columns: []*schema.Column{AccountsColumns[27]},
|
||||
},
|
||||
{
|
||||
Name: "account_priority",
|
||||
@@ -177,6 +188,16 @@ var (
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{AccountsColumns[21]},
|
||||
},
|
||||
{
|
||||
Name: "account_platform_priority",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{AccountsColumns[6], AccountsColumns[11]},
|
||||
},
|
||||
{
|
||||
Name: "account_priority_status",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{AccountsColumns[11], AccountsColumns[13]},
|
||||
},
|
||||
{
|
||||
Name: "account_deleted_at",
|
||||
Unique: false,
|
||||
@@ -376,6 +397,7 @@ var (
|
||||
{Name: "sora_image_price_540", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||
{Name: "sora_video_price_per_request", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||
{Name: "sora_video_price_per_request_hd", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||
{Name: "sora_storage_quota_bytes", Type: field.TypeInt64, Default: 0},
|
||||
{Name: "claude_code_only", Type: field.TypeBool, Default: false},
|
||||
{Name: "fallback_group_id", Type: field.TypeInt64, Nullable: true},
|
||||
{Name: "fallback_group_id_on_invalid_request", Type: field.TypeInt64, Nullable: true},
|
||||
@@ -420,7 +442,45 @@ var (
|
||||
{
|
||||
Name: "group_sort_order",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{GroupsColumns[29]},
|
||||
Columns: []*schema.Column{GroupsColumns[30]},
|
||||
},
|
||||
},
|
||||
}
|
||||
// IdempotencyRecordsColumns holds the columns for the "idempotency_records" table.
|
||||
IdempotencyRecordsColumns = []*schema.Column{
|
||||
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "scope", Type: field.TypeString, Size: 128},
|
||||
{Name: "idempotency_key_hash", Type: field.TypeString, Size: 64},
|
||||
{Name: "request_fingerprint", Type: field.TypeString, Size: 64},
|
||||
{Name: "status", Type: field.TypeString, Size: 32},
|
||||
{Name: "response_status", Type: field.TypeInt, Nullable: true},
|
||||
{Name: "response_body", Type: field.TypeString, Nullable: true},
|
||||
{Name: "error_reason", Type: field.TypeString, Nullable: true, Size: 128},
|
||||
{Name: "locked_until", Type: field.TypeTime, Nullable: true},
|
||||
{Name: "expires_at", Type: field.TypeTime},
|
||||
}
|
||||
// IdempotencyRecordsTable holds the schema information for the "idempotency_records" table.
|
||||
IdempotencyRecordsTable = &schema.Table{
|
||||
Name: "idempotency_records",
|
||||
Columns: IdempotencyRecordsColumns,
|
||||
PrimaryKey: []*schema.Column{IdempotencyRecordsColumns[0]},
|
||||
Indexes: []*schema.Index{
|
||||
{
|
||||
Name: "idempotencyrecord_scope_idempotency_key_hash",
|
||||
Unique: true,
|
||||
Columns: []*schema.Column{IdempotencyRecordsColumns[3], IdempotencyRecordsColumns[4]},
|
||||
},
|
||||
{
|
||||
Name: "idempotencyrecord_expires_at",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{IdempotencyRecordsColumns[11]},
|
||||
},
|
||||
{
|
||||
Name: "idempotencyrecord_status_locked_until",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{IdempotencyRecordsColumns[6], IdempotencyRecordsColumns[10]},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -810,6 +870,11 @@ var (
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{UsageLogsColumns[28], UsageLogsColumns[27]},
|
||||
},
|
||||
{
|
||||
Name: "usagelog_group_id_created_at",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{UsageLogsColumns[30], UsageLogsColumns[27]},
|
||||
},
|
||||
},
|
||||
}
|
||||
// UsersColumns holds the columns for the "users" table.
|
||||
@@ -829,6 +894,8 @@ var (
|
||||
{Name: "totp_secret_encrypted", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}},
|
||||
{Name: "totp_enabled", Type: field.TypeBool, Default: false},
|
||||
{Name: "totp_enabled_at", Type: field.TypeTime, Nullable: true},
|
||||
{Name: "sora_storage_quota_bytes", Type: field.TypeInt64, Default: 0},
|
||||
{Name: "sora_storage_used_bytes", Type: field.TypeInt64, Default: 0},
|
||||
}
|
||||
// UsersTable holds the schema information for the "users" table.
|
||||
UsersTable = &schema.Table{
|
||||
@@ -1034,6 +1101,11 @@ var (
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{UserSubscriptionsColumns[5]},
|
||||
},
|
||||
{
|
||||
Name: "usersubscription_user_id_status_expires_at",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{UserSubscriptionsColumns[16], UserSubscriptionsColumns[6], UserSubscriptionsColumns[5]},
|
||||
},
|
||||
{
|
||||
Name: "usersubscription_assigned_by",
|
||||
Unique: false,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -102,6 +102,30 @@ func init() {
|
||||
apikeyDescQuotaUsed := apikeyFields[9].Descriptor()
|
||||
// apikey.DefaultQuotaUsed holds the default value on creation for the quota_used field.
|
||||
apikey.DefaultQuotaUsed = apikeyDescQuotaUsed.Default.(float64)
|
||||
// apikeyDescRateLimit5h is the schema descriptor for rate_limit_5h field.
|
||||
apikeyDescRateLimit5h := apikeyFields[11].Descriptor()
|
||||
// apikey.DefaultRateLimit5h holds the default value on creation for the rate_limit_5h field.
|
||||
apikey.DefaultRateLimit5h = apikeyDescRateLimit5h.Default.(float64)
|
||||
// apikeyDescRateLimit1d is the schema descriptor for rate_limit_1d field.
|
||||
apikeyDescRateLimit1d := apikeyFields[12].Descriptor()
|
||||
// apikey.DefaultRateLimit1d holds the default value on creation for the rate_limit_1d field.
|
||||
apikey.DefaultRateLimit1d = apikeyDescRateLimit1d.Default.(float64)
|
||||
// apikeyDescRateLimit7d is the schema descriptor for rate_limit_7d field.
|
||||
apikeyDescRateLimit7d := apikeyFields[13].Descriptor()
|
||||
// apikey.DefaultRateLimit7d holds the default value on creation for the rate_limit_7d field.
|
||||
apikey.DefaultRateLimit7d = apikeyDescRateLimit7d.Default.(float64)
|
||||
// apikeyDescUsage5h is the schema descriptor for usage_5h field.
|
||||
apikeyDescUsage5h := apikeyFields[14].Descriptor()
|
||||
// apikey.DefaultUsage5h holds the default value on creation for the usage_5h field.
|
||||
apikey.DefaultUsage5h = apikeyDescUsage5h.Default.(float64)
|
||||
// apikeyDescUsage1d is the schema descriptor for usage_1d field.
|
||||
apikeyDescUsage1d := apikeyFields[15].Descriptor()
|
||||
// apikey.DefaultUsage1d holds the default value on creation for the usage_1d field.
|
||||
apikey.DefaultUsage1d = apikeyDescUsage1d.Default.(float64)
|
||||
// apikeyDescUsage7d is the schema descriptor for usage_7d field.
|
||||
apikeyDescUsage7d := apikeyFields[16].Descriptor()
|
||||
// apikey.DefaultUsage7d holds the default value on creation for the usage_7d field.
|
||||
apikey.DefaultUsage7d = apikeyDescUsage7d.Default.(float64)
|
||||
accountMixin := schema.Account{}.Mixin()
|
||||
accountMixinHooks1 := accountMixin[1].Hooks()
|
||||
account.Hooks[0] = accountMixinHooks1[0]
|
||||
@@ -210,7 +234,7 @@ func init() {
|
||||
// account.DefaultSchedulable holds the default value on creation for the schedulable field.
|
||||
account.DefaultSchedulable = accountDescSchedulable.Default.(bool)
|
||||
// accountDescSessionWindowStatus is the schema descriptor for session_window_status field.
|
||||
accountDescSessionWindowStatus := accountFields[21].Descriptor()
|
||||
accountDescSessionWindowStatus := accountFields[23].Descriptor()
|
||||
// account.SessionWindowStatusValidator is a validator for the "session_window_status" field. It is called by the builders before save.
|
||||
account.SessionWindowStatusValidator = accountDescSessionWindowStatus.Validators[0].(func(string) error)
|
||||
accountgroupFields := schema.AccountGroup{}.Fields()
|
||||
@@ -399,28 +423,32 @@ func init() {
|
||||
groupDescDefaultValidityDays := groupFields[10].Descriptor()
|
||||
// group.DefaultDefaultValidityDays holds the default value on creation for the default_validity_days field.
|
||||
group.DefaultDefaultValidityDays = groupDescDefaultValidityDays.Default.(int)
|
||||
// groupDescSoraStorageQuotaBytes is the schema descriptor for sora_storage_quota_bytes field.
|
||||
groupDescSoraStorageQuotaBytes := groupFields[18].Descriptor()
|
||||
// group.DefaultSoraStorageQuotaBytes holds the default value on creation for the sora_storage_quota_bytes field.
|
||||
group.DefaultSoraStorageQuotaBytes = groupDescSoraStorageQuotaBytes.Default.(int64)
|
||||
// groupDescClaudeCodeOnly is the schema descriptor for claude_code_only field.
|
||||
groupDescClaudeCodeOnly := groupFields[18].Descriptor()
|
||||
groupDescClaudeCodeOnly := groupFields[19].Descriptor()
|
||||
// group.DefaultClaudeCodeOnly holds the default value on creation for the claude_code_only field.
|
||||
group.DefaultClaudeCodeOnly = groupDescClaudeCodeOnly.Default.(bool)
|
||||
// groupDescModelRoutingEnabled is the schema descriptor for model_routing_enabled field.
|
||||
groupDescModelRoutingEnabled := groupFields[22].Descriptor()
|
||||
groupDescModelRoutingEnabled := groupFields[23].Descriptor()
|
||||
// group.DefaultModelRoutingEnabled holds the default value on creation for the model_routing_enabled field.
|
||||
group.DefaultModelRoutingEnabled = groupDescModelRoutingEnabled.Default.(bool)
|
||||
// groupDescMcpXMLInject is the schema descriptor for mcp_xml_inject field.
|
||||
groupDescMcpXMLInject := groupFields[23].Descriptor()
|
||||
groupDescMcpXMLInject := groupFields[24].Descriptor()
|
||||
// group.DefaultMcpXMLInject holds the default value on creation for the mcp_xml_inject field.
|
||||
group.DefaultMcpXMLInject = groupDescMcpXMLInject.Default.(bool)
|
||||
// groupDescSupportedModelScopes is the schema descriptor for supported_model_scopes field.
|
||||
groupDescSupportedModelScopes := groupFields[24].Descriptor()
|
||||
groupDescSupportedModelScopes := groupFields[25].Descriptor()
|
||||
// group.DefaultSupportedModelScopes holds the default value on creation for the supported_model_scopes field.
|
||||
group.DefaultSupportedModelScopes = groupDescSupportedModelScopes.Default.([]string)
|
||||
// groupDescSortOrder is the schema descriptor for sort_order field.
|
||||
groupDescSortOrder := groupFields[25].Descriptor()
|
||||
groupDescSortOrder := groupFields[26].Descriptor()
|
||||
// group.DefaultSortOrder holds the default value on creation for the sort_order field.
|
||||
group.DefaultSortOrder = groupDescSortOrder.Default.(int)
|
||||
// groupDescSimulateClaudeMaxEnabled is the schema descriptor for simulate_claude_max_enabled field.
|
||||
groupDescSimulateClaudeMaxEnabled := groupFields[26].Descriptor()
|
||||
groupDescSimulateClaudeMaxEnabled := groupFields[27].Descriptor()
|
||||
// group.DefaultSimulateClaudeMaxEnabled holds the default value on creation for the simulate_claude_max_enabled field.
|
||||
group.DefaultSimulateClaudeMaxEnabled = groupDescSimulateClaudeMaxEnabled.Default.(bool)
|
||||
idempotencyrecordMixin := schema.IdempotencyRecord{}.Mixin()
|
||||
@@ -958,6 +986,14 @@ func init() {
|
||||
userDescTotpEnabled := userFields[9].Descriptor()
|
||||
// user.DefaultTotpEnabled holds the default value on creation for the totp_enabled field.
|
||||
user.DefaultTotpEnabled = userDescTotpEnabled.Default.(bool)
|
||||
// userDescSoraStorageQuotaBytes is the schema descriptor for sora_storage_quota_bytes field.
|
||||
userDescSoraStorageQuotaBytes := userFields[11].Descriptor()
|
||||
// user.DefaultSoraStorageQuotaBytes holds the default value on creation for the sora_storage_quota_bytes field.
|
||||
user.DefaultSoraStorageQuotaBytes = userDescSoraStorageQuotaBytes.Default.(int64)
|
||||
// userDescSoraStorageUsedBytes is the schema descriptor for sora_storage_used_bytes field.
|
||||
userDescSoraStorageUsedBytes := userFields[12].Descriptor()
|
||||
// user.DefaultSoraStorageUsedBytes holds the default value on creation for the sora_storage_used_bytes field.
|
||||
user.DefaultSoraStorageUsedBytes = userDescSoraStorageUsedBytes.Default.(int64)
|
||||
userallowedgroupFields := schema.UserAllowedGroup{}.Fields()
|
||||
_ = userallowedgroupFields
|
||||
// userallowedgroupDescCreatedAt is the schema descriptor for created_at field.
|
||||
|
||||
@@ -164,6 +164,19 @@ func (Account) Fields() []ent.Field {
|
||||
Nillable().
|
||||
SchemaType(map[string]string{dialect.Postgres: "timestamptz"}),
|
||||
|
||||
// temp_unschedulable_until: 临时不可调度状态解除时间
|
||||
// 当命中临时不可调度规则时设置,在此时间前调度器应跳过该账号
|
||||
field.Time("temp_unschedulable_until").
|
||||
Optional().
|
||||
Nillable().
|
||||
SchemaType(map[string]string{dialect.Postgres: "timestamptz"}),
|
||||
|
||||
// temp_unschedulable_reason: 临时不可调度原因,便于排障审计
|
||||
field.String("temp_unschedulable_reason").
|
||||
Optional().
|
||||
Nillable().
|
||||
SchemaType(map[string]string{dialect.Postgres: "text"}),
|
||||
|
||||
// session_window_*: 会话窗口相关字段
|
||||
// 用于管理某些需要会话时间窗口的 API(如 Claude Pro)
|
||||
field.Time("session_window_start").
|
||||
@@ -213,6 +226,9 @@ func (Account) Indexes() []ent.Index {
|
||||
index.Fields("rate_limited_at"), // 筛选速率限制账户
|
||||
index.Fields("rate_limit_reset_at"), // 筛选速率限制解除时间
|
||||
index.Fields("overload_until"), // 筛选过载账户
|
||||
index.Fields("deleted_at"), // 软删除查询优化
|
||||
// 调度热路径复合索引(线上由 SQL 迁移创建部分索引,schema 仅用于模型可读性对齐)
|
||||
index.Fields("platform", "priority"),
|
||||
index.Fields("priority", "status"),
|
||||
index.Fields("deleted_at"), // 软删除查询优化
|
||||
}
|
||||
}
|
||||
|
||||
@@ -74,6 +74,47 @@ func (APIKey) Fields() []ent.Field {
|
||||
Optional().
|
||||
Nillable().
|
||||
Comment("Expiration time for this API key (null = never expires)"),
|
||||
|
||||
// ========== Rate limit fields ==========
|
||||
// Rate limit configuration (0 = unlimited)
|
||||
field.Float("rate_limit_5h").
|
||||
SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}).
|
||||
Default(0).
|
||||
Comment("Rate limit in USD per 5 hours (0 = unlimited)"),
|
||||
field.Float("rate_limit_1d").
|
||||
SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}).
|
||||
Default(0).
|
||||
Comment("Rate limit in USD per day (0 = unlimited)"),
|
||||
field.Float("rate_limit_7d").
|
||||
SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}).
|
||||
Default(0).
|
||||
Comment("Rate limit in USD per 7 days (0 = unlimited)"),
|
||||
// Rate limit usage tracking
|
||||
field.Float("usage_5h").
|
||||
SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}).
|
||||
Default(0).
|
||||
Comment("Used amount in USD for the current 5h window"),
|
||||
field.Float("usage_1d").
|
||||
SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}).
|
||||
Default(0).
|
||||
Comment("Used amount in USD for the current 1d window"),
|
||||
field.Float("usage_7d").
|
||||
SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}).
|
||||
Default(0).
|
||||
Comment("Used amount in USD for the current 7d window"),
|
||||
// Window start times
|
||||
field.Time("window_5h_start").
|
||||
Optional().
|
||||
Nillable().
|
||||
Comment("Start time of the current 5h rate limit window"),
|
||||
field.Time("window_1d_start").
|
||||
Optional().
|
||||
Nillable().
|
||||
Comment("Start time of the current 1d rate limit window"),
|
||||
field.Time("window_7d_start").
|
||||
Optional().
|
||||
Nillable().
|
||||
Comment("Start time of the current 7d rate limit window"),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -100,6 +100,10 @@ func (Group) Fields() []ent.Field {
|
||||
Nillable().
|
||||
SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}),
|
||||
|
||||
// Sora 存储配额
|
||||
field.Int64("sora_storage_quota_bytes").
|
||||
Default(0),
|
||||
|
||||
field.Bool("claude_code_only").
|
||||
Default(false).
|
||||
Comment("allow Claude Code client only"),
|
||||
|
||||
@@ -179,5 +179,7 @@ func (UsageLog) Indexes() []ent.Index {
|
||||
// 复合索引用于时间范围查询
|
||||
index.Fields("user_id", "created_at"),
|
||||
index.Fields("api_key_id", "created_at"),
|
||||
// 分组维度时间范围查询(线上由 SQL 迁移创建 group_id IS NOT NULL 的部分索引)
|
||||
index.Fields("group_id", "created_at"),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,6 +72,12 @@ func (User) Fields() []ent.Field {
|
||||
field.Time("totp_enabled_at").
|
||||
Optional().
|
||||
Nillable(),
|
||||
|
||||
// Sora 存储配额
|
||||
field.Int64("sora_storage_quota_bytes").
|
||||
Default(0),
|
||||
field.Int64("sora_storage_used_bytes").
|
||||
Default(0),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -108,6 +108,8 @@ func (UserSubscription) Indexes() []ent.Index {
|
||||
index.Fields("group_id"),
|
||||
index.Fields("status"),
|
||||
index.Fields("expires_at"),
|
||||
// 活跃订阅查询复合索引(线上由 SQL 迁移创建部分索引,schema 仅用于模型可读性对齐)
|
||||
index.Fields("user_id", "status", "expires_at"),
|
||||
index.Fields("assigned_by"),
|
||||
// 唯一约束通过部分索引实现(WHERE deleted_at IS NULL),支持软删除后重新订阅
|
||||
// 见迁移文件 016_soft_delete_partial_unique_indexes.sql
|
||||
|
||||
@@ -45,6 +45,10 @@ type User struct {
|
||||
TotpEnabled bool `json:"totp_enabled,omitempty"`
|
||||
// TotpEnabledAt holds the value of the "totp_enabled_at" field.
|
||||
TotpEnabledAt *time.Time `json:"totp_enabled_at,omitempty"`
|
||||
// SoraStorageQuotaBytes holds the value of the "sora_storage_quota_bytes" field.
|
||||
SoraStorageQuotaBytes int64 `json:"sora_storage_quota_bytes,omitempty"`
|
||||
// SoraStorageUsedBytes holds the value of the "sora_storage_used_bytes" field.
|
||||
SoraStorageUsedBytes int64 `json:"sora_storage_used_bytes,omitempty"`
|
||||
// Edges holds the relations/edges for other nodes in the graph.
|
||||
// The values are being populated by the UserQuery when eager-loading is set.
|
||||
Edges UserEdges `json:"edges"`
|
||||
@@ -177,7 +181,7 @@ func (*User) scanValues(columns []string) ([]any, error) {
|
||||
values[i] = new(sql.NullBool)
|
||||
case user.FieldBalance:
|
||||
values[i] = new(sql.NullFloat64)
|
||||
case user.FieldID, user.FieldConcurrency:
|
||||
case user.FieldID, user.FieldConcurrency, user.FieldSoraStorageQuotaBytes, user.FieldSoraStorageUsedBytes:
|
||||
values[i] = new(sql.NullInt64)
|
||||
case user.FieldEmail, user.FieldPasswordHash, user.FieldRole, user.FieldStatus, user.FieldUsername, user.FieldNotes, user.FieldTotpSecretEncrypted:
|
||||
values[i] = new(sql.NullString)
|
||||
@@ -291,6 +295,18 @@ func (_m *User) assignValues(columns []string, values []any) error {
|
||||
_m.TotpEnabledAt = new(time.Time)
|
||||
*_m.TotpEnabledAt = value.Time
|
||||
}
|
||||
case user.FieldSoraStorageQuotaBytes:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field sora_storage_quota_bytes", values[i])
|
||||
} else if value.Valid {
|
||||
_m.SoraStorageQuotaBytes = value.Int64
|
||||
}
|
||||
case user.FieldSoraStorageUsedBytes:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field sora_storage_used_bytes", values[i])
|
||||
} else if value.Valid {
|
||||
_m.SoraStorageUsedBytes = value.Int64
|
||||
}
|
||||
default:
|
||||
_m.selectValues.Set(columns[i], values[i])
|
||||
}
|
||||
@@ -424,6 +440,12 @@ func (_m *User) String() string {
|
||||
builder.WriteString("totp_enabled_at=")
|
||||
builder.WriteString(v.Format(time.ANSIC))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("sora_storage_quota_bytes=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.SoraStorageQuotaBytes))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("sora_storage_used_bytes=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.SoraStorageUsedBytes))
|
||||
builder.WriteByte(')')
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
@@ -43,6 +43,10 @@ const (
|
||||
FieldTotpEnabled = "totp_enabled"
|
||||
// FieldTotpEnabledAt holds the string denoting the totp_enabled_at field in the database.
|
||||
FieldTotpEnabledAt = "totp_enabled_at"
|
||||
// FieldSoraStorageQuotaBytes holds the string denoting the sora_storage_quota_bytes field in the database.
|
||||
FieldSoraStorageQuotaBytes = "sora_storage_quota_bytes"
|
||||
// FieldSoraStorageUsedBytes holds the string denoting the sora_storage_used_bytes field in the database.
|
||||
FieldSoraStorageUsedBytes = "sora_storage_used_bytes"
|
||||
// EdgeAPIKeys holds the string denoting the api_keys edge name in mutations.
|
||||
EdgeAPIKeys = "api_keys"
|
||||
// EdgeRedeemCodes holds the string denoting the redeem_codes edge name in mutations.
|
||||
@@ -152,6 +156,8 @@ var Columns = []string{
|
||||
FieldTotpSecretEncrypted,
|
||||
FieldTotpEnabled,
|
||||
FieldTotpEnabledAt,
|
||||
FieldSoraStorageQuotaBytes,
|
||||
FieldSoraStorageUsedBytes,
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -208,6 +214,10 @@ var (
|
||||
DefaultNotes string
|
||||
// DefaultTotpEnabled holds the default value on creation for the "totp_enabled" field.
|
||||
DefaultTotpEnabled bool
|
||||
// DefaultSoraStorageQuotaBytes holds the default value on creation for the "sora_storage_quota_bytes" field.
|
||||
DefaultSoraStorageQuotaBytes int64
|
||||
// DefaultSoraStorageUsedBytes holds the default value on creation for the "sora_storage_used_bytes" field.
|
||||
DefaultSoraStorageUsedBytes int64
|
||||
)
|
||||
|
||||
// OrderOption defines the ordering options for the User queries.
|
||||
@@ -288,6 +298,16 @@ func ByTotpEnabledAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldTotpEnabledAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// BySoraStorageQuotaBytes orders the results by the sora_storage_quota_bytes field.
|
||||
func BySoraStorageQuotaBytes(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSoraStorageQuotaBytes, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// BySoraStorageUsedBytes orders the results by the sora_storage_used_bytes field.
|
||||
func BySoraStorageUsedBytes(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSoraStorageUsedBytes, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByAPIKeysCount orders the results by api_keys count.
|
||||
func ByAPIKeysCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
|
||||
@@ -125,6 +125,16 @@ func TotpEnabledAt(v time.Time) predicate.User {
|
||||
return predicate.User(sql.FieldEQ(FieldTotpEnabledAt, v))
|
||||
}
|
||||
|
||||
// SoraStorageQuotaBytes applies equality check predicate on the "sora_storage_quota_bytes" field. It's identical to SoraStorageQuotaBytesEQ.
|
||||
func SoraStorageQuotaBytes(v int64) predicate.User {
|
||||
return predicate.User(sql.FieldEQ(FieldSoraStorageQuotaBytes, v))
|
||||
}
|
||||
|
||||
// SoraStorageUsedBytes applies equality check predicate on the "sora_storage_used_bytes" field. It's identical to SoraStorageUsedBytesEQ.
|
||||
func SoraStorageUsedBytes(v int64) predicate.User {
|
||||
return predicate.User(sql.FieldEQ(FieldSoraStorageUsedBytes, v))
|
||||
}
|
||||
|
||||
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||
func CreatedAtEQ(v time.Time) predicate.User {
|
||||
return predicate.User(sql.FieldEQ(FieldCreatedAt, v))
|
||||
@@ -860,6 +870,86 @@ func TotpEnabledAtNotNil() predicate.User {
|
||||
return predicate.User(sql.FieldNotNull(FieldTotpEnabledAt))
|
||||
}
|
||||
|
||||
// SoraStorageQuotaBytesEQ applies the EQ predicate on the "sora_storage_quota_bytes" field.
|
||||
func SoraStorageQuotaBytesEQ(v int64) predicate.User {
|
||||
return predicate.User(sql.FieldEQ(FieldSoraStorageQuotaBytes, v))
|
||||
}
|
||||
|
||||
// SoraStorageQuotaBytesNEQ applies the NEQ predicate on the "sora_storage_quota_bytes" field.
|
||||
func SoraStorageQuotaBytesNEQ(v int64) predicate.User {
|
||||
return predicate.User(sql.FieldNEQ(FieldSoraStorageQuotaBytes, v))
|
||||
}
|
||||
|
||||
// SoraStorageQuotaBytesIn applies the In predicate on the "sora_storage_quota_bytes" field.
|
||||
func SoraStorageQuotaBytesIn(vs ...int64) predicate.User {
|
||||
return predicate.User(sql.FieldIn(FieldSoraStorageQuotaBytes, vs...))
|
||||
}
|
||||
|
||||
// SoraStorageQuotaBytesNotIn applies the NotIn predicate on the "sora_storage_quota_bytes" field.
|
||||
func SoraStorageQuotaBytesNotIn(vs ...int64) predicate.User {
|
||||
return predicate.User(sql.FieldNotIn(FieldSoraStorageQuotaBytes, vs...))
|
||||
}
|
||||
|
||||
// SoraStorageQuotaBytesGT applies the GT predicate on the "sora_storage_quota_bytes" field.
|
||||
func SoraStorageQuotaBytesGT(v int64) predicate.User {
|
||||
return predicate.User(sql.FieldGT(FieldSoraStorageQuotaBytes, v))
|
||||
}
|
||||
|
||||
// SoraStorageQuotaBytesGTE applies the GTE predicate on the "sora_storage_quota_bytes" field.
|
||||
func SoraStorageQuotaBytesGTE(v int64) predicate.User {
|
||||
return predicate.User(sql.FieldGTE(FieldSoraStorageQuotaBytes, v))
|
||||
}
|
||||
|
||||
// SoraStorageQuotaBytesLT applies the LT predicate on the "sora_storage_quota_bytes" field.
|
||||
func SoraStorageQuotaBytesLT(v int64) predicate.User {
|
||||
return predicate.User(sql.FieldLT(FieldSoraStorageQuotaBytes, v))
|
||||
}
|
||||
|
||||
// SoraStorageQuotaBytesLTE applies the LTE predicate on the "sora_storage_quota_bytes" field.
|
||||
func SoraStorageQuotaBytesLTE(v int64) predicate.User {
|
||||
return predicate.User(sql.FieldLTE(FieldSoraStorageQuotaBytes, v))
|
||||
}
|
||||
|
||||
// SoraStorageUsedBytesEQ applies the EQ predicate on the "sora_storage_used_bytes" field.
|
||||
func SoraStorageUsedBytesEQ(v int64) predicate.User {
|
||||
return predicate.User(sql.FieldEQ(FieldSoraStorageUsedBytes, v))
|
||||
}
|
||||
|
||||
// SoraStorageUsedBytesNEQ applies the NEQ predicate on the "sora_storage_used_bytes" field.
|
||||
func SoraStorageUsedBytesNEQ(v int64) predicate.User {
|
||||
return predicate.User(sql.FieldNEQ(FieldSoraStorageUsedBytes, v))
|
||||
}
|
||||
|
||||
// SoraStorageUsedBytesIn applies the In predicate on the "sora_storage_used_bytes" field.
|
||||
func SoraStorageUsedBytesIn(vs ...int64) predicate.User {
|
||||
return predicate.User(sql.FieldIn(FieldSoraStorageUsedBytes, vs...))
|
||||
}
|
||||
|
||||
// SoraStorageUsedBytesNotIn applies the NotIn predicate on the "sora_storage_used_bytes" field.
|
||||
func SoraStorageUsedBytesNotIn(vs ...int64) predicate.User {
|
||||
return predicate.User(sql.FieldNotIn(FieldSoraStorageUsedBytes, vs...))
|
||||
}
|
||||
|
||||
// SoraStorageUsedBytesGT applies the GT predicate on the "sora_storage_used_bytes" field.
|
||||
func SoraStorageUsedBytesGT(v int64) predicate.User {
|
||||
return predicate.User(sql.FieldGT(FieldSoraStorageUsedBytes, v))
|
||||
}
|
||||
|
||||
// SoraStorageUsedBytesGTE applies the GTE predicate on the "sora_storage_used_bytes" field.
|
||||
func SoraStorageUsedBytesGTE(v int64) predicate.User {
|
||||
return predicate.User(sql.FieldGTE(FieldSoraStorageUsedBytes, v))
|
||||
}
|
||||
|
||||
// SoraStorageUsedBytesLT applies the LT predicate on the "sora_storage_used_bytes" field.
|
||||
func SoraStorageUsedBytesLT(v int64) predicate.User {
|
||||
return predicate.User(sql.FieldLT(FieldSoraStorageUsedBytes, v))
|
||||
}
|
||||
|
||||
// SoraStorageUsedBytesLTE applies the LTE predicate on the "sora_storage_used_bytes" field.
|
||||
func SoraStorageUsedBytesLTE(v int64) predicate.User {
|
||||
return predicate.User(sql.FieldLTE(FieldSoraStorageUsedBytes, v))
|
||||
}
|
||||
|
||||
// HasAPIKeys applies the HasEdge predicate on the "api_keys" edge.
|
||||
func HasAPIKeys() predicate.User {
|
||||
return predicate.User(func(s *sql.Selector) {
|
||||
|
||||
@@ -210,6 +210,34 @@ func (_c *UserCreate) SetNillableTotpEnabledAt(v *time.Time) *UserCreate {
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field.
|
||||
func (_c *UserCreate) SetSoraStorageQuotaBytes(v int64) *UserCreate {
|
||||
_c.mutation.SetSoraStorageQuotaBytes(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field if the given value is not nil.
|
||||
func (_c *UserCreate) SetNillableSoraStorageQuotaBytes(v *int64) *UserCreate {
|
||||
if v != nil {
|
||||
_c.SetSoraStorageQuotaBytes(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetSoraStorageUsedBytes sets the "sora_storage_used_bytes" field.
|
||||
func (_c *UserCreate) SetSoraStorageUsedBytes(v int64) *UserCreate {
|
||||
_c.mutation.SetSoraStorageUsedBytes(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableSoraStorageUsedBytes sets the "sora_storage_used_bytes" field if the given value is not nil.
|
||||
func (_c *UserCreate) SetNillableSoraStorageUsedBytes(v *int64) *UserCreate {
|
||||
if v != nil {
|
||||
_c.SetSoraStorageUsedBytes(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs.
|
||||
func (_c *UserCreate) AddAPIKeyIDs(ids ...int64) *UserCreate {
|
||||
_c.mutation.AddAPIKeyIDs(ids...)
|
||||
@@ -424,6 +452,14 @@ func (_c *UserCreate) defaults() error {
|
||||
v := user.DefaultTotpEnabled
|
||||
_c.mutation.SetTotpEnabled(v)
|
||||
}
|
||||
if _, ok := _c.mutation.SoraStorageQuotaBytes(); !ok {
|
||||
v := user.DefaultSoraStorageQuotaBytes
|
||||
_c.mutation.SetSoraStorageQuotaBytes(v)
|
||||
}
|
||||
if _, ok := _c.mutation.SoraStorageUsedBytes(); !ok {
|
||||
v := user.DefaultSoraStorageUsedBytes
|
||||
_c.mutation.SetSoraStorageUsedBytes(v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -487,6 +523,12 @@ func (_c *UserCreate) check() error {
|
||||
if _, ok := _c.mutation.TotpEnabled(); !ok {
|
||||
return &ValidationError{Name: "totp_enabled", err: errors.New(`ent: missing required field "User.totp_enabled"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.SoraStorageQuotaBytes(); !ok {
|
||||
return &ValidationError{Name: "sora_storage_quota_bytes", err: errors.New(`ent: missing required field "User.sora_storage_quota_bytes"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.SoraStorageUsedBytes(); !ok {
|
||||
return &ValidationError{Name: "sora_storage_used_bytes", err: errors.New(`ent: missing required field "User.sora_storage_used_bytes"`)}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -570,6 +612,14 @@ func (_c *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) {
|
||||
_spec.SetField(user.FieldTotpEnabledAt, field.TypeTime, value)
|
||||
_node.TotpEnabledAt = &value
|
||||
}
|
||||
if value, ok := _c.mutation.SoraStorageQuotaBytes(); ok {
|
||||
_spec.SetField(user.FieldSoraStorageQuotaBytes, field.TypeInt64, value)
|
||||
_node.SoraStorageQuotaBytes = value
|
||||
}
|
||||
if value, ok := _c.mutation.SoraStorageUsedBytes(); ok {
|
||||
_spec.SetField(user.FieldSoraStorageUsedBytes, field.TypeInt64, value)
|
||||
_node.SoraStorageUsedBytes = value
|
||||
}
|
||||
if nodes := _c.mutation.APIKeysIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
@@ -956,6 +1006,42 @@ func (u *UserUpsert) ClearTotpEnabledAt() *UserUpsert {
|
||||
return u
|
||||
}
|
||||
|
||||
// SetSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field.
|
||||
func (u *UserUpsert) SetSoraStorageQuotaBytes(v int64) *UserUpsert {
|
||||
u.Set(user.FieldSoraStorageQuotaBytes, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field to the value that was provided on create.
|
||||
func (u *UserUpsert) UpdateSoraStorageQuotaBytes() *UserUpsert {
|
||||
u.SetExcluded(user.FieldSoraStorageQuotaBytes)
|
||||
return u
|
||||
}
|
||||
|
||||
// AddSoraStorageQuotaBytes adds v to the "sora_storage_quota_bytes" field.
|
||||
func (u *UserUpsert) AddSoraStorageQuotaBytes(v int64) *UserUpsert {
|
||||
u.Add(user.FieldSoraStorageQuotaBytes, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetSoraStorageUsedBytes sets the "sora_storage_used_bytes" field.
|
||||
func (u *UserUpsert) SetSoraStorageUsedBytes(v int64) *UserUpsert {
|
||||
u.Set(user.FieldSoraStorageUsedBytes, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateSoraStorageUsedBytes sets the "sora_storage_used_bytes" field to the value that was provided on create.
|
||||
func (u *UserUpsert) UpdateSoraStorageUsedBytes() *UserUpsert {
|
||||
u.SetExcluded(user.FieldSoraStorageUsedBytes)
|
||||
return u
|
||||
}
|
||||
|
||||
// AddSoraStorageUsedBytes adds v to the "sora_storage_used_bytes" field.
|
||||
func (u *UserUpsert) AddSoraStorageUsedBytes(v int64) *UserUpsert {
|
||||
u.Add(user.FieldSoraStorageUsedBytes, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateNewValues updates the mutable fields using the new values that were set on create.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
@@ -1218,6 +1304,48 @@ func (u *UserUpsertOne) ClearTotpEnabledAt() *UserUpsertOne {
|
||||
})
|
||||
}
|
||||
|
||||
// SetSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field.
|
||||
func (u *UserUpsertOne) SetSoraStorageQuotaBytes(v int64) *UserUpsertOne {
|
||||
return u.Update(func(s *UserUpsert) {
|
||||
s.SetSoraStorageQuotaBytes(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddSoraStorageQuotaBytes adds v to the "sora_storage_quota_bytes" field.
|
||||
func (u *UserUpsertOne) AddSoraStorageQuotaBytes(v int64) *UserUpsertOne {
|
||||
return u.Update(func(s *UserUpsert) {
|
||||
s.AddSoraStorageQuotaBytes(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field to the value that was provided on create.
|
||||
func (u *UserUpsertOne) UpdateSoraStorageQuotaBytes() *UserUpsertOne {
|
||||
return u.Update(func(s *UserUpsert) {
|
||||
s.UpdateSoraStorageQuotaBytes()
|
||||
})
|
||||
}
|
||||
|
||||
// SetSoraStorageUsedBytes sets the "sora_storage_used_bytes" field.
|
||||
func (u *UserUpsertOne) SetSoraStorageUsedBytes(v int64) *UserUpsertOne {
|
||||
return u.Update(func(s *UserUpsert) {
|
||||
s.SetSoraStorageUsedBytes(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddSoraStorageUsedBytes adds v to the "sora_storage_used_bytes" field.
|
||||
func (u *UserUpsertOne) AddSoraStorageUsedBytes(v int64) *UserUpsertOne {
|
||||
return u.Update(func(s *UserUpsert) {
|
||||
s.AddSoraStorageUsedBytes(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateSoraStorageUsedBytes sets the "sora_storage_used_bytes" field to the value that was provided on create.
|
||||
func (u *UserUpsertOne) UpdateSoraStorageUsedBytes() *UserUpsertOne {
|
||||
return u.Update(func(s *UserUpsert) {
|
||||
s.UpdateSoraStorageUsedBytes()
|
||||
})
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (u *UserUpsertOne) Exec(ctx context.Context) error {
|
||||
if len(u.create.conflict) == 0 {
|
||||
@@ -1646,6 +1774,48 @@ func (u *UserUpsertBulk) ClearTotpEnabledAt() *UserUpsertBulk {
|
||||
})
|
||||
}
|
||||
|
||||
// SetSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field.
|
||||
func (u *UserUpsertBulk) SetSoraStorageQuotaBytes(v int64) *UserUpsertBulk {
|
||||
return u.Update(func(s *UserUpsert) {
|
||||
s.SetSoraStorageQuotaBytes(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddSoraStorageQuotaBytes adds v to the "sora_storage_quota_bytes" field.
|
||||
func (u *UserUpsertBulk) AddSoraStorageQuotaBytes(v int64) *UserUpsertBulk {
|
||||
return u.Update(func(s *UserUpsert) {
|
||||
s.AddSoraStorageQuotaBytes(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field to the value that was provided on create.
|
||||
func (u *UserUpsertBulk) UpdateSoraStorageQuotaBytes() *UserUpsertBulk {
|
||||
return u.Update(func(s *UserUpsert) {
|
||||
s.UpdateSoraStorageQuotaBytes()
|
||||
})
|
||||
}
|
||||
|
||||
// SetSoraStorageUsedBytes sets the "sora_storage_used_bytes" field.
|
||||
func (u *UserUpsertBulk) SetSoraStorageUsedBytes(v int64) *UserUpsertBulk {
|
||||
return u.Update(func(s *UserUpsert) {
|
||||
s.SetSoraStorageUsedBytes(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddSoraStorageUsedBytes adds v to the "sora_storage_used_bytes" field.
|
||||
func (u *UserUpsertBulk) AddSoraStorageUsedBytes(v int64) *UserUpsertBulk {
|
||||
return u.Update(func(s *UserUpsert) {
|
||||
s.AddSoraStorageUsedBytes(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateSoraStorageUsedBytes sets the "sora_storage_used_bytes" field to the value that was provided on create.
|
||||
func (u *UserUpsertBulk) UpdateSoraStorageUsedBytes() *UserUpsertBulk {
|
||||
return u.Update(func(s *UserUpsert) {
|
||||
s.UpdateSoraStorageUsedBytes()
|
||||
})
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (u *UserUpsertBulk) Exec(ctx context.Context) error {
|
||||
if u.create.err != nil {
|
||||
|
||||
@@ -242,6 +242,48 @@ func (_u *UserUpdate) ClearTotpEnabledAt() *UserUpdate {
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field.
|
||||
func (_u *UserUpdate) SetSoraStorageQuotaBytes(v int64) *UserUpdate {
|
||||
_u.mutation.ResetSoraStorageQuotaBytes()
|
||||
_u.mutation.SetSoraStorageQuotaBytes(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field if the given value is not nil.
|
||||
func (_u *UserUpdate) SetNillableSoraStorageQuotaBytes(v *int64) *UserUpdate {
|
||||
if v != nil {
|
||||
_u.SetSoraStorageQuotaBytes(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddSoraStorageQuotaBytes adds value to the "sora_storage_quota_bytes" field.
|
||||
func (_u *UserUpdate) AddSoraStorageQuotaBytes(v int64) *UserUpdate {
|
||||
_u.mutation.AddSoraStorageQuotaBytes(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetSoraStorageUsedBytes sets the "sora_storage_used_bytes" field.
|
||||
func (_u *UserUpdate) SetSoraStorageUsedBytes(v int64) *UserUpdate {
|
||||
_u.mutation.ResetSoraStorageUsedBytes()
|
||||
_u.mutation.SetSoraStorageUsedBytes(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableSoraStorageUsedBytes sets the "sora_storage_used_bytes" field if the given value is not nil.
|
||||
func (_u *UserUpdate) SetNillableSoraStorageUsedBytes(v *int64) *UserUpdate {
|
||||
if v != nil {
|
||||
_u.SetSoraStorageUsedBytes(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddSoraStorageUsedBytes adds value to the "sora_storage_used_bytes" field.
|
||||
func (_u *UserUpdate) AddSoraStorageUsedBytes(v int64) *UserUpdate {
|
||||
_u.mutation.AddSoraStorageUsedBytes(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs.
|
||||
func (_u *UserUpdate) AddAPIKeyIDs(ids ...int64) *UserUpdate {
|
||||
_u.mutation.AddAPIKeyIDs(ids...)
|
||||
@@ -709,6 +751,18 @@ func (_u *UserUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
||||
if _u.mutation.TotpEnabledAtCleared() {
|
||||
_spec.ClearField(user.FieldTotpEnabledAt, field.TypeTime)
|
||||
}
|
||||
if value, ok := _u.mutation.SoraStorageQuotaBytes(); ok {
|
||||
_spec.SetField(user.FieldSoraStorageQuotaBytes, field.TypeInt64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedSoraStorageQuotaBytes(); ok {
|
||||
_spec.AddField(user.FieldSoraStorageQuotaBytes, field.TypeInt64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.SoraStorageUsedBytes(); ok {
|
||||
_spec.SetField(user.FieldSoraStorageUsedBytes, field.TypeInt64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedSoraStorageUsedBytes(); ok {
|
||||
_spec.AddField(user.FieldSoraStorageUsedBytes, field.TypeInt64, value)
|
||||
}
|
||||
if _u.mutation.APIKeysCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
@@ -1352,6 +1406,48 @@ func (_u *UserUpdateOne) ClearTotpEnabledAt() *UserUpdateOne {
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field.
|
||||
func (_u *UserUpdateOne) SetSoraStorageQuotaBytes(v int64) *UserUpdateOne {
|
||||
_u.mutation.ResetSoraStorageQuotaBytes()
|
||||
_u.mutation.SetSoraStorageQuotaBytes(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field if the given value is not nil.
|
||||
func (_u *UserUpdateOne) SetNillableSoraStorageQuotaBytes(v *int64) *UserUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetSoraStorageQuotaBytes(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddSoraStorageQuotaBytes adds value to the "sora_storage_quota_bytes" field.
|
||||
func (_u *UserUpdateOne) AddSoraStorageQuotaBytes(v int64) *UserUpdateOne {
|
||||
_u.mutation.AddSoraStorageQuotaBytes(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetSoraStorageUsedBytes sets the "sora_storage_used_bytes" field.
|
||||
func (_u *UserUpdateOne) SetSoraStorageUsedBytes(v int64) *UserUpdateOne {
|
||||
_u.mutation.ResetSoraStorageUsedBytes()
|
||||
_u.mutation.SetSoraStorageUsedBytes(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableSoraStorageUsedBytes sets the "sora_storage_used_bytes" field if the given value is not nil.
|
||||
func (_u *UserUpdateOne) SetNillableSoraStorageUsedBytes(v *int64) *UserUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetSoraStorageUsedBytes(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddSoraStorageUsedBytes adds value to the "sora_storage_used_bytes" field.
|
||||
func (_u *UserUpdateOne) AddSoraStorageUsedBytes(v int64) *UserUpdateOne {
|
||||
_u.mutation.AddSoraStorageUsedBytes(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs.
|
||||
func (_u *UserUpdateOne) AddAPIKeyIDs(ids ...int64) *UserUpdateOne {
|
||||
_u.mutation.AddAPIKeyIDs(ids...)
|
||||
@@ -1849,6 +1945,18 @@ func (_u *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) {
|
||||
if _u.mutation.TotpEnabledAtCleared() {
|
||||
_spec.ClearField(user.FieldTotpEnabledAt, field.TypeTime)
|
||||
}
|
||||
if value, ok := _u.mutation.SoraStorageQuotaBytes(); ok {
|
||||
_spec.SetField(user.FieldSoraStorageQuotaBytes, field.TypeInt64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedSoraStorageQuotaBytes(); ok {
|
||||
_spec.AddField(user.FieldSoraStorageQuotaBytes, field.TypeInt64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.SoraStorageUsedBytes(); ok {
|
||||
_spec.SetField(user.FieldSoraStorageUsedBytes, field.TypeInt64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedSoraStorageUsedBytes(); ok {
|
||||
_spec.AddField(user.FieldSoraStorageUsedBytes, field.TypeInt64, value)
|
||||
}
|
||||
if _u.mutation.APIKeysCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
|
||||
@@ -7,7 +7,11 @@ require (
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2
|
||||
github.com/DouDOU-start/go-sora2api v1.1.0
|
||||
github.com/alitto/pond/v2 v2.6.2
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.10
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.10
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.2
|
||||
github.com/cespare/xxhash/v2 v2.3.0
|
||||
github.com/coder/websocket v1.8.14
|
||||
github.com/dgraph-io/ristretto v0.2.0
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2
|
||||
@@ -34,6 +38,8 @@ require (
|
||||
golang.org/x/net v0.49.0
|
||||
golang.org/x/sync v0.19.0
|
||||
golang.org/x/term v0.40.0
|
||||
google.golang.org/grpc v1.75.1
|
||||
google.golang.org/protobuf v1.36.10
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
modernc.org/sqlite v1.44.3
|
||||
@@ -47,6 +53,22 @@ require (
|
||||
github.com/agext/levenshtein v1.2.3 // indirect
|
||||
github.com/andybalholm/brotli v1.2.0 // indirect
|
||||
github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.18 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.18 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.18 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.18 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.18 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.18 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.15 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.7 // indirect
|
||||
github.com/aws/smithy-go v1.24.1 // indirect
|
||||
github.com/bdandy/go-errors v1.2.2 // indirect
|
||||
github.com/bdandy/go-socks4 v1.2.3 // indirect
|
||||
github.com/bmatcuk/doublestar v1.3.4 // indirect
|
||||
@@ -88,6 +110,7 @@ require (
|
||||
github.com/goccy/go-json v0.10.2 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/subcommands v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/hashicorp/hcl/v2 v2.18.1 // indirect
|
||||
@@ -149,7 +172,6 @@ require (
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
|
||||
go.opentelemetry.io/otel v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/automaxprocs v1.6.0 // indirect
|
||||
@@ -159,8 +181,8 @@ require (
|
||||
golang.org/x/mod v0.32.0 // indirect
|
||||
golang.org/x/sys v0.41.0 // indirect
|
||||
golang.org/x/text v0.34.0 // indirect
|
||||
google.golang.org/grpc v1.75.1 // indirect
|
||||
google.golang.org/protobuf v1.36.10 // indirect
|
||||
golang.org/x/tools v0.41.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
modernc.org/libc v1.67.6 // indirect
|
||||
modernc.org/mathutil v1.7.1 // indirect
|
||||
|
||||
@@ -22,6 +22,44 @@ github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwTo
|
||||
github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY=
|
||||
github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY=
|
||||
github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.2 h1:LuT2rzqNQsauaGkPK/7813XxcZ3o3yePY0Iy891T2ls=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.2/go.mod h1:IvvlAZQXvTXznUPfRVfryiG1fbzE2NGK6m9u39YQ+S4=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.5 h1:zWFmPmgw4sveAYi1mRqG+E/g0461cJ5M4bJ8/nc6d3Q=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.5/go.mod h1:nVUlMLVV8ycXSb7mSkcNu9e3v/1TJq2RTlrPwhYWr5c=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.10 h1:9DMthfO6XWZYLfzZglAgW5Fyou2nRI5CuV44sTedKBI=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.10/go.mod h1:2rUIOnA2JaiqYmSKYmRJlcMWy6qTj1vuRFscppSBMcw=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.10 h1:EEhmEUFCE1Yhl7vDhNOI5OCL/iKMdkkYFTRpZXNw7m8=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.10/go.mod h1:RnnlFCAlxQCkN2Q379B67USkBMu1PipEEiibzYN5UTE=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.18 h1:Ii4s+Sq3yDfaMLpjrJsqD6SmG/Wq/P5L/hw2qa78UAY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.18/go.mod h1:6x81qnY++ovptLE6nWQeWrpXxbnlIex+4H4eYYGcqfc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.18 h1:F43zk1vemYIqPAwhjTjYIz0irU2EY7sOb/F5eJ3HuyM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.18/go.mod h1:w1jdlZXrGKaJcNoL+Nnrj+k5wlpGXqnNrKoP22HvAug=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.18 h1:xCeWVjj0ki0l3nruoyP2slHsGArMxeiiaoPN5QZH6YQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.18/go.mod h1:r/eLGuGCBw6l36ZRWiw6PaZwPXb6YOj+i/7MizNl5/k=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.18 h1:eZioDaZGJ0tMM4gzmkNIO2aAoQd+je7Ug7TkvAzlmkU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.18/go.mod h1:CCXwUKAJdoWr6/NcxZ+zsiPr6oH/Q5aTooRGYieAyj4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.5 h1:CeY9LUdur+Dxoeldqoun6y4WtJ3RQtzk0JMP2gfUay0=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.5/go.mod h1:AZLZf2fMaahW5s/wMRciu1sYbdsikT/UHwbUjOdEVTc=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.10 h1:fJvQ5mIBVfKtiyx0AHY6HeWcRX5LGANLpq8SVR+Uazs=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.10/go.mod h1:Kzm5e6OmNH8VMkgK9t+ry5jEih4Y8whqs+1hrkxim1I=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.18 h1:LTRCYFlnnKFlKsyIQxKhJuDuA3ZkrDQMRYm6rXiHlLY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.18/go.mod h1:XhwkgGG6bHSd00nO/mexWTcTjgd6PjuvWQMqSn2UaEk=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.18 h1:/A/xDuZAVD2BpsS2fftFRo/NoEKQJ8YTnJDEHBy2Gtg=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.18/go.mod h1:hWe9b4f+djUQGmyiGEeOnZv69dtMSgpDRIvNMvuvzvY=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.2 h1:M1A9AjcFwlxTLuf0Faj88L8Iqw0n/AJHjpZTQzMMsSc=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.2/go.mod h1:KsdTV6Q9WKUZm2mNJnUFmIoXfZux91M3sr/a4REX8e0=
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.6 h1:MzORe+J94I+hYu2a6XmV5yC9huoTv8NRcCrUNedDypQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.6/go.mod h1:hXzcHLARD7GeWnifd8j9RWqtfIgxj4/cAtIVIK7hg8g=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.11 h1:7oGD8KPfBOJGXiCoRKrrrQkbvCp8N++u36hrLMPey6o=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.11/go.mod h1:0DO9B5EUJQlIDif+XJRWCljZRKsAFKh3gpFz7UnDtOo=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.15 h1:edCcNp9eGIUDUCrzoCu1jWAXLGFIizeqkdkKgRlJwWc=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.15/go.mod h1:lyRQKED9xWfgkYC/wmmYfv7iVIM68Z5OQ88ZdcV1QbU=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.7 h1:NITQpgo9A5NrDZ57uOWj+abvXSb83BbyggcUBVksN7c=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.7/go.mod h1:sks5UWBhEuWYDPdwlnRFn1w7xWdH29Jcpe+/PJQefEs=
|
||||
github.com/aws/smithy-go v1.24.1 h1:VbyeNfmYkWoxMVpGUAbQumkODcYmfMRfZ8yQiH30SK0=
|
||||
github.com/aws/smithy-go v1.24.1/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/bdandy/go-errors v1.2.2 h1:WdFv/oukjTJCLa79UfkGmwX7ZxONAihKu4V0mLIs11Q=
|
||||
github.com/bdandy/go-errors v1.2.2/go.mod h1:NkYHl4Fey9oRRdbB1CoC6e84tuqQHiqrOcZpqFEkBxM=
|
||||
github.com/bdandy/go-socks4 v1.2.3 h1:Q6Y2heY1GRjCtHbmlKfnwrKVU/k81LS8mRGLRlmDlic=
|
||||
@@ -56,6 +94,12 @@ github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
|
||||
github.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfatpWHKCs=
|
||||
github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA=
|
||||
github.com/clipperhouse/uax29/v2 v2.5.0 h1:x7T0T4eTHDONxFJsL94uKNKPHrclyFI0lm7+w94cO8U=
|
||||
github.com/clipperhouse/uax29/v2 v2.5.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g=
|
||||
github.com/coder/websocket v1.8.14 h1:9L0p0iKiNOibykf283eHkKUHHrpG7f65OE3BhhO7v9g=
|
||||
github.com/coder/websocket v1.8.14/go.mod h1:NX3SzP+inril6yawo5CQXx8+fk145lPDC6pumgx0mVg=
|
||||
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
|
||||
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
||||
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
|
||||
@@ -80,8 +124,6 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/dlclark/regexp2 v1.10.0 h1:+/GIL799phkJqYW+3YbOd8LCcbHzT0Pbo8zl70MHsq0=
|
||||
github.com/dlclark/regexp2 v1.10.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
|
||||
github.com/docker/docker v28.5.1+incompatible h1:Bm8DchhSD2J6PsFzxC35TZo4TLGR2PdW/E69rU45NhM=
|
||||
github.com/docker/docker v28.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
|
||||
@@ -129,6 +171,8 @@ github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
||||
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
@@ -138,6 +182,8 @@ github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs=
|
||||
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
|
||||
github.com/google/subcommands v1.2.0 h1:vWQspBTo2nEqTUFita5/KeEWlUL8kQObDFbub/EN9oE=
|
||||
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/wire v0.7.0 h1:JxUKI6+CVBgCO2WToKy/nQk0sS+amI9z9EjVmdaocj4=
|
||||
@@ -157,8 +203,6 @@ github.com/icholy/digest v1.1.0 h1:HfGg9Irj7i+IX1o1QAmPfIBNu/Q5A5Tu3n/MED9k9H4=
|
||||
github.com/icholy/digest v1.1.0/go.mod h1:QNrsSGQ5v7v9cReDI0+eyjsXGUoRSUZQHeQ5C4XLa0Y=
|
||||
github.com/imroc/req/v3 v3.57.0 h1:LMTUjNRUybUkTPn8oJDq8Kg3JRBOBTcnDhKu7mzupKI=
|
||||
github.com/imroc/req/v3 v3.57.0/go.mod h1:JL62ey1nvSLq81HORNcosvlf7SxZStONNqOprg0Pz00=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
|
||||
@@ -194,8 +238,8 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
||||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byFGLdw=
|
||||
github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs=
|
||||
github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM=
|
||||
github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||
github.com/mdelapenya/tlscert v0.2.0 h1:7H81W6Z/4weDvZBNOfQte5GpIMo0lGYEeWbkGp5LJHI=
|
||||
@@ -241,10 +285,6 @@ github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6
|
||||
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkoukk/tiktoken-go v0.1.8 h1:85ENo+3FpWgAACBaEUVp+lctuTcYUO7BtmfhlN/QTRo=
|
||||
github.com/pkoukk/tiktoken-go v0.1.8/go.mod h1:9NiV+i9mJKGj1rYOT+njbv+ZwA/zJxYdewGl6qVatpg=
|
||||
github.com/pkoukk/tiktoken-go-loader v0.0.2 h1:LUKws63GV3pVHwH1srkBplBv+7URgmOmhSkRxsIvsK4=
|
||||
github.com/pkoukk/tiktoken-go-loader v0.0.2/go.mod h1:4mIkYyZooFlnenDlormIo6cd5wrlUKNr97wp9nGgEKo=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
@@ -264,8 +304,6 @@ github.com/refraction-networking/utls v1.8.2 h1:j4Q1gJj0xngdeH+Ox/qND11aEfhpgoEv
|
||||
github.com/refraction-networking/utls v1.8.2/go.mod h1:jkSOEkLqn+S/jtpEHPOsVv/4V4EVnelwbMQl4vCWXAM=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
@@ -360,6 +398,8 @@ go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/Wgbsd
|
||||
go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
|
||||
go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI=
|
||||
go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps=
|
||||
go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
|
||||
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
|
||||
@@ -415,6 +455,8 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm
|
||||
golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
|
||||
golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
||||
google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4 h1:8XJ4pajGwOlasW+L13MnEGA8W4115jJySQtVfS2/IBU=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4/go.mod h1:NnuHhy+bxcg30o7FnVAZbXsPHUDQ9qKWAQKCD7VxFtk=
|
||||
|
||||
@@ -30,6 +30,14 @@ const (
|
||||
// __CSP_NONCE__ will be replaced with actual nonce at request time by the SecurityHeaders middleware
|
||||
const DefaultCSPPolicy = "default-src 'self'; script-src 'self' __CSP_NONCE__ https://challenges.cloudflare.com https://static.cloudflareinsights.com; style-src 'self' 'unsafe-inline' https://fonts.googleapis.com; img-src 'self' data: https:; font-src 'self' data: https://fonts.gstatic.com; connect-src 'self' https:; frame-src https://challenges.cloudflare.com; frame-ancestors 'none'; base-uri 'self'; form-action 'self'"
|
||||
|
||||
// UMQ(用户消息队列)模式常量
|
||||
const (
|
||||
// UMQModeSerialize: 账号级串行锁 + RPM 自适应延迟
|
||||
UMQModeSerialize = "serialize"
|
||||
// UMQModeThrottle: 仅 RPM 自适应前置延迟,不阻塞并发
|
||||
UMQModeThrottle = "throttle"
|
||||
)
|
||||
|
||||
// 连接池隔离策略常量
|
||||
// 用于控制上游 HTTP 连接池的隔离粒度,影响连接复用和资源消耗
|
||||
const (
|
||||
@@ -265,8 +273,13 @@ type CSPConfig struct {
|
||||
}
|
||||
|
||||
type ProxyFallbackConfig struct {
|
||||
// AllowDirectOnError 当代理初始化失败时是否允许回退直连。
|
||||
// 默认 false:避免因代理配置错误导致 IP 泄露/关联。
|
||||
// AllowDirectOnError 当辅助服务的代理初始化失败时是否允许回退直连。
|
||||
// 仅影响以下非 AI 账号连接的辅助服务:
|
||||
// - GitHub Release 更新检查
|
||||
// - 定价数据拉取
|
||||
// 不影响 AI 账号网关连接(Claude/OpenAI/Gemini/Antigravity),
|
||||
// 这些关键路径的代理失败始终返回错误,不会回退直连。
|
||||
// 默认 false:避免因代理配置错误导致服务器真实 IP 泄露。
|
||||
AllowDirectOnError bool `mapstructure:"allow_direct_on_error"`
|
||||
}
|
||||
|
||||
@@ -364,6 +377,8 @@ type GatewayConfig struct {
|
||||
// OpenAIPassthroughAllowTimeoutHeaders: OpenAI 透传模式是否放行客户端超时头
|
||||
// 关闭(默认)可避免 x-stainless-timeout 等头导致上游提前断流。
|
||||
OpenAIPassthroughAllowTimeoutHeaders bool `mapstructure:"openai_passthrough_allow_timeout_headers"`
|
||||
// OpenAIWS: OpenAI Responses WebSocket 配置(默认开启,可按需回滚到 HTTP)
|
||||
OpenAIWS GatewayOpenAIWSConfig `mapstructure:"openai_ws"`
|
||||
|
||||
// HTTP 上游连接池配置(性能优化:支持高并发场景调优)
|
||||
// MaxIdleConns: 所有主机的最大空闲连接总数
|
||||
@@ -448,6 +463,147 @@ type GatewayConfig struct {
|
||||
UserGroupRateCacheTTLSeconds int `mapstructure:"user_group_rate_cache_ttl_seconds"`
|
||||
// ModelsListCacheTTLSeconds: /v1/models 模型列表短缓存 TTL(秒)
|
||||
ModelsListCacheTTLSeconds int `mapstructure:"models_list_cache_ttl_seconds"`
|
||||
|
||||
// UserMessageQueue: 用户消息串行队列配置
|
||||
// 对 role:"user" 的真实用户消息实施账号级串行化 + RPM 自适应延迟
|
||||
UserMessageQueue UserMessageQueueConfig `mapstructure:"user_message_queue"`
|
||||
}
|
||||
|
||||
// UserMessageQueueConfig 用户消息串行队列配置
|
||||
// 用于 Anthropic OAuth/SetupToken 账号的用户消息串行化发送
|
||||
type UserMessageQueueConfig struct {
|
||||
// Mode: 模式选择
|
||||
// "serialize" = 账号级串行锁 + RPM 自适应延迟
|
||||
// "throttle" = 仅 RPM 自适应前置延迟,不阻塞并发
|
||||
// "" = 禁用(默认)
|
||||
Mode string `mapstructure:"mode"`
|
||||
// Enabled: 已废弃,仅向后兼容(等同于 mode: "serialize")
|
||||
Enabled bool `mapstructure:"enabled"`
|
||||
// LockTTLMs: 串行锁 TTL(毫秒),应大于最长请求时间
|
||||
LockTTLMs int `mapstructure:"lock_ttl_ms"`
|
||||
// WaitTimeoutMs: 等待获取锁的超时时间(毫秒)
|
||||
WaitTimeoutMs int `mapstructure:"wait_timeout_ms"`
|
||||
// MinDelayMs: RPM 自适应延迟下限(毫秒)
|
||||
MinDelayMs int `mapstructure:"min_delay_ms"`
|
||||
// MaxDelayMs: RPM 自适应延迟上限(毫秒)
|
||||
MaxDelayMs int `mapstructure:"max_delay_ms"`
|
||||
// CleanupIntervalSeconds: 孤儿锁清理间隔(秒),0 表示禁用
|
||||
CleanupIntervalSeconds int `mapstructure:"cleanup_interval_seconds"`
|
||||
}
|
||||
|
||||
// WaitTimeout 返回等待超时的 time.Duration
|
||||
func (c *UserMessageQueueConfig) WaitTimeout() time.Duration {
|
||||
if c.WaitTimeoutMs <= 0 {
|
||||
return 30 * time.Second
|
||||
}
|
||||
return time.Duration(c.WaitTimeoutMs) * time.Millisecond
|
||||
}
|
||||
|
||||
// GetEffectiveMode 返回生效的模式
|
||||
// 注意:Mode 字段已在 load() 中做过白名单校验和规范化,此处无需重复验证
|
||||
func (c *UserMessageQueueConfig) GetEffectiveMode() string {
|
||||
if c.Mode == UMQModeSerialize || c.Mode == UMQModeThrottle {
|
||||
return c.Mode
|
||||
}
|
||||
if c.Enabled {
|
||||
return UMQModeSerialize // 向后兼容
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// GatewayOpenAIWSConfig OpenAI Responses WebSocket 配置。
|
||||
// 注意:默认全局开启;如需回滚可使用 force_http 或关闭 enabled。
|
||||
type GatewayOpenAIWSConfig struct {
|
||||
// ModeRouterV2Enabled: 新版 WS mode 路由开关(默认 false;关闭时保持 legacy 行为)
|
||||
ModeRouterV2Enabled bool `mapstructure:"mode_router_v2_enabled"`
|
||||
// IngressModeDefault: ingress 默认模式(off/shared/dedicated)
|
||||
IngressModeDefault string `mapstructure:"ingress_mode_default"`
|
||||
// Enabled: 全局总开关(默认 true)
|
||||
Enabled bool `mapstructure:"enabled"`
|
||||
// OAuthEnabled: 是否允许 OpenAI OAuth 账号使用 WS
|
||||
OAuthEnabled bool `mapstructure:"oauth_enabled"`
|
||||
// APIKeyEnabled: 是否允许 OpenAI API Key 账号使用 WS
|
||||
APIKeyEnabled bool `mapstructure:"apikey_enabled"`
|
||||
// ForceHTTP: 全局强制 HTTP(用于紧急回滚)
|
||||
ForceHTTP bool `mapstructure:"force_http"`
|
||||
// AllowStoreRecovery: 允许在 WSv2 下按策略恢复 store=true(默认 false)
|
||||
AllowStoreRecovery bool `mapstructure:"allow_store_recovery"`
|
||||
// IngressPreviousResponseRecoveryEnabled: ingress 模式收到 previous_response_not_found 时,是否允许自动去掉 previous_response_id 重试一次(默认 true)
|
||||
IngressPreviousResponseRecoveryEnabled bool `mapstructure:"ingress_previous_response_recovery_enabled"`
|
||||
// StoreDisabledConnMode: store=false 且无可复用会话连接时的建连策略(strict/adaptive/off)
|
||||
// - strict: 强制新建连接(隔离优先)
|
||||
// - adaptive: 仅在高风险失败后强制新建连接(性能与隔离折中)
|
||||
// - off: 不强制新建连接(复用优先)
|
||||
StoreDisabledConnMode string `mapstructure:"store_disabled_conn_mode"`
|
||||
// StoreDisabledForceNewConn: store=false 且无可复用粘连连接时是否强制新建连接(默认 true,保障会话隔离)
|
||||
// 兼容旧配置;当 StoreDisabledConnMode 为空时才生效。
|
||||
StoreDisabledForceNewConn bool `mapstructure:"store_disabled_force_new_conn"`
|
||||
// PrewarmGenerateEnabled: 是否启用 WSv2 generate=false 预热(默认 false)
|
||||
PrewarmGenerateEnabled bool `mapstructure:"prewarm_generate_enabled"`
|
||||
|
||||
// Feature 开关:v2 优先于 v1
|
||||
ResponsesWebsockets bool `mapstructure:"responses_websockets"`
|
||||
ResponsesWebsocketsV2 bool `mapstructure:"responses_websockets_v2"`
|
||||
|
||||
// 连接池参数
|
||||
MaxConnsPerAccount int `mapstructure:"max_conns_per_account"`
|
||||
MinIdlePerAccount int `mapstructure:"min_idle_per_account"`
|
||||
MaxIdlePerAccount int `mapstructure:"max_idle_per_account"`
|
||||
// DynamicMaxConnsByAccountConcurrencyEnabled: 是否按账号并发动态计算连接池上限
|
||||
DynamicMaxConnsByAccountConcurrencyEnabled bool `mapstructure:"dynamic_max_conns_by_account_concurrency_enabled"`
|
||||
// OAuthMaxConnsFactor: OAuth 账号连接池系数(effective=ceil(concurrency*factor))
|
||||
OAuthMaxConnsFactor float64 `mapstructure:"oauth_max_conns_factor"`
|
||||
// APIKeyMaxConnsFactor: API Key 账号连接池系数(effective=ceil(concurrency*factor))
|
||||
APIKeyMaxConnsFactor float64 `mapstructure:"apikey_max_conns_factor"`
|
||||
DialTimeoutSeconds int `mapstructure:"dial_timeout_seconds"`
|
||||
ReadTimeoutSeconds int `mapstructure:"read_timeout_seconds"`
|
||||
WriteTimeoutSeconds int `mapstructure:"write_timeout_seconds"`
|
||||
PoolTargetUtilization float64 `mapstructure:"pool_target_utilization"`
|
||||
QueueLimitPerConn int `mapstructure:"queue_limit_per_conn"`
|
||||
// EventFlushBatchSize: WS 流式写出批量 flush 阈值(事件条数)
|
||||
EventFlushBatchSize int `mapstructure:"event_flush_batch_size"`
|
||||
// EventFlushIntervalMS: WS 流式写出最大等待时间(毫秒);0 表示仅按 batch 触发
|
||||
EventFlushIntervalMS int `mapstructure:"event_flush_interval_ms"`
|
||||
// PrewarmCooldownMS: 连接池预热触发冷却时间(毫秒)
|
||||
PrewarmCooldownMS int `mapstructure:"prewarm_cooldown_ms"`
|
||||
// FallbackCooldownSeconds: WS 回退冷却窗口,避免 WS/HTTP 抖动;0 表示关闭冷却
|
||||
FallbackCooldownSeconds int `mapstructure:"fallback_cooldown_seconds"`
|
||||
// RetryBackoffInitialMS: WS 重试初始退避(毫秒);<=0 表示关闭退避
|
||||
RetryBackoffInitialMS int `mapstructure:"retry_backoff_initial_ms"`
|
||||
// RetryBackoffMaxMS: WS 重试最大退避(毫秒)
|
||||
RetryBackoffMaxMS int `mapstructure:"retry_backoff_max_ms"`
|
||||
// RetryJitterRatio: WS 重试退避抖动比例(0-1)
|
||||
RetryJitterRatio float64 `mapstructure:"retry_jitter_ratio"`
|
||||
// RetryTotalBudgetMS: WS 单次请求重试总预算(毫秒);0 表示关闭预算限制
|
||||
RetryTotalBudgetMS int `mapstructure:"retry_total_budget_ms"`
|
||||
// PayloadLogSampleRate: payload_schema 日志采样率(0-1)
|
||||
PayloadLogSampleRate float64 `mapstructure:"payload_log_sample_rate"`
|
||||
|
||||
// 账号调度与粘连参数
|
||||
LBTopK int `mapstructure:"lb_top_k"`
|
||||
// StickySessionTTLSeconds: session_hash -> account_id 粘连 TTL
|
||||
StickySessionTTLSeconds int `mapstructure:"sticky_session_ttl_seconds"`
|
||||
// SessionHashReadOldFallback: 会话哈希迁移期是否允许“新 key 未命中时回退读旧 SHA-256 key”
|
||||
SessionHashReadOldFallback bool `mapstructure:"session_hash_read_old_fallback"`
|
||||
// SessionHashDualWriteOld: 会话哈希迁移期是否双写旧 SHA-256 key(短 TTL)
|
||||
SessionHashDualWriteOld bool `mapstructure:"session_hash_dual_write_old"`
|
||||
// MetadataBridgeEnabled: RequestMetadata 迁移期是否保留旧 ctxkey.* 兼容桥接
|
||||
MetadataBridgeEnabled bool `mapstructure:"metadata_bridge_enabled"`
|
||||
// StickyResponseIDTTLSeconds: response_id -> account_id 粘连 TTL
|
||||
StickyResponseIDTTLSeconds int `mapstructure:"sticky_response_id_ttl_seconds"`
|
||||
// StickyPreviousResponseTTLSeconds: 兼容旧键(当新键未设置时回退)
|
||||
StickyPreviousResponseTTLSeconds int `mapstructure:"sticky_previous_response_ttl_seconds"`
|
||||
|
||||
SchedulerScoreWeights GatewayOpenAIWSSchedulerScoreWeights `mapstructure:"scheduler_score_weights"`
|
||||
}
|
||||
|
||||
// GatewayOpenAIWSSchedulerScoreWeights 账号调度打分权重。
|
||||
type GatewayOpenAIWSSchedulerScoreWeights struct {
|
||||
Priority float64 `mapstructure:"priority"`
|
||||
Load float64 `mapstructure:"load"`
|
||||
Queue float64 `mapstructure:"queue"`
|
||||
ErrorRate float64 `mapstructure:"error_rate"`
|
||||
TTFT float64 `mapstructure:"ttft"`
|
||||
}
|
||||
|
||||
// GatewayUsageRecordConfig 使用量记录异步队列配置
|
||||
@@ -716,7 +872,8 @@ type DefaultConfig struct {
|
||||
}
|
||||
|
||||
type RateLimitConfig struct {
|
||||
OverloadCooldownMinutes int `mapstructure:"overload_cooldown_minutes"` // 529过载冷却时间(分钟)
|
||||
OverloadCooldownMinutes int `mapstructure:"overload_cooldown_minutes"` // 529过载冷却时间(分钟)
|
||||
OAuth401CooldownMinutes int `mapstructure:"oauth_401_cooldown_minutes"` // OAuth 401临时不可调度冷却(分钟)
|
||||
}
|
||||
|
||||
// APIKeyAuthCacheConfig API Key 认证缓存配置
|
||||
@@ -886,6 +1043,20 @@ func load(allowMissingJWTSecret bool) (*Config, error) {
|
||||
cfg.Log.StacktraceLevel = strings.ToLower(strings.TrimSpace(cfg.Log.StacktraceLevel))
|
||||
cfg.Log.Output.FilePath = strings.TrimSpace(cfg.Log.Output.FilePath)
|
||||
|
||||
// 兼容旧键 gateway.openai_ws.sticky_previous_response_ttl_seconds。
|
||||
// 新键未配置(<=0)时回退旧键;新键优先。
|
||||
if cfg.Gateway.OpenAIWS.StickyResponseIDTTLSeconds <= 0 && cfg.Gateway.OpenAIWS.StickyPreviousResponseTTLSeconds > 0 {
|
||||
cfg.Gateway.OpenAIWS.StickyResponseIDTTLSeconds = cfg.Gateway.OpenAIWS.StickyPreviousResponseTTLSeconds
|
||||
}
|
||||
|
||||
// Normalize UMQ mode: 白名单校验,非法值在加载时一次性 warn 并清空
|
||||
if m := cfg.Gateway.UserMessageQueue.Mode; m != "" && m != UMQModeSerialize && m != UMQModeThrottle {
|
||||
slog.Warn("invalid user_message_queue mode, disabling",
|
||||
"mode", m,
|
||||
"valid_modes", []string{UMQModeSerialize, UMQModeThrottle})
|
||||
cfg.Gateway.UserMessageQueue.Mode = ""
|
||||
}
|
||||
|
||||
// Auto-generate TOTP encryption key if not set (32 bytes = 64 hex chars for AES-256)
|
||||
cfg.Totp.EncryptionKey = strings.TrimSpace(cfg.Totp.EncryptionKey)
|
||||
if cfg.Totp.EncryptionKey == "" {
|
||||
@@ -945,7 +1116,7 @@ func setDefaults() {
|
||||
viper.SetDefault("server.read_header_timeout", 30) // 30秒读取请求头
|
||||
viper.SetDefault("server.idle_timeout", 120) // 120秒空闲超时
|
||||
viper.SetDefault("server.trusted_proxies", []string{})
|
||||
viper.SetDefault("server.max_request_body_size", int64(100*1024*1024))
|
||||
viper.SetDefault("server.max_request_body_size", int64(256*1024*1024))
|
||||
// H2C 默认配置
|
||||
viper.SetDefault("server.h2c.enabled", false)
|
||||
viper.SetDefault("server.h2c.max_concurrent_streams", uint32(50)) // 50 个并发流
|
||||
@@ -1002,6 +1173,9 @@ func setDefaults() {
|
||||
viper.SetDefault("security.csp.policy", DefaultCSPPolicy)
|
||||
viper.SetDefault("security.proxy_probe.insecure_skip_verify", false)
|
||||
|
||||
// Security - disable direct fallback on proxy error
|
||||
viper.SetDefault("security.proxy_fallback.allow_direct_on_error", false)
|
||||
|
||||
// Billing
|
||||
viper.SetDefault("billing.circuit_breaker.enabled", true)
|
||||
viper.SetDefault("billing.circuit_breaker.failure_threshold", 5)
|
||||
@@ -1053,7 +1227,7 @@ func setDefaults() {
|
||||
|
||||
// Ops (vNext)
|
||||
viper.SetDefault("ops.enabled", true)
|
||||
viper.SetDefault("ops.use_preaggregated_tables", false)
|
||||
viper.SetDefault("ops.use_preaggregated_tables", true)
|
||||
viper.SetDefault("ops.cleanup.enabled", true)
|
||||
viper.SetDefault("ops.cleanup.schedule", "0 2 * * *")
|
||||
// Retention days: vNext defaults to 30 days across ops datasets.
|
||||
@@ -1087,10 +1261,11 @@ func setDefaults() {
|
||||
|
||||
// RateLimit
|
||||
viper.SetDefault("rate_limit.overload_cooldown_minutes", 10)
|
||||
viper.SetDefault("rate_limit.oauth_401_cooldown_minutes", 10)
|
||||
|
||||
// Pricing - 从 model-price-repo 同步模型定价和上下文窗口数据的配置
|
||||
viper.SetDefault("pricing.remote_url", "https://github.com/Wei-Shaw/model-price-repo/raw/refs/heads/main/model_prices_and_context_window.json")
|
||||
viper.SetDefault("pricing.hash_url", "https://github.com/Wei-Shaw/model-price-repo/raw/refs/heads/main/model_prices_and_context_window.sha256")
|
||||
// Pricing - 从 model-price-repo 同步模型定价和上下文窗口数据(固定到 commit,避免分支漂移)
|
||||
viper.SetDefault("pricing.remote_url", "https://raw.githubusercontent.com/Wei-Shaw/model-price-repo/c7947e9871687e664180bc971d4837f1fc2784a9/model_prices_and_context_window.json")
|
||||
viper.SetDefault("pricing.hash_url", "https://raw.githubusercontent.com/Wei-Shaw/model-price-repo/c7947e9871687e664180bc971d4837f1fc2784a9/model_prices_and_context_window.sha256")
|
||||
viper.SetDefault("pricing.data_dir", "./data")
|
||||
viper.SetDefault("pricing.fallback_file", "./resources/model-pricing/model_prices_and_context_window.json")
|
||||
viper.SetDefault("pricing.update_interval_hours", 24)
|
||||
@@ -1157,9 +1332,55 @@ func setDefaults() {
|
||||
viper.SetDefault("gateway.max_account_switches_gemini", 3)
|
||||
viper.SetDefault("gateway.force_codex_cli", false)
|
||||
viper.SetDefault("gateway.openai_passthrough_allow_timeout_headers", false)
|
||||
// OpenAI Responses WebSocket(默认开启;可通过 force_http 紧急回滚)
|
||||
viper.SetDefault("gateway.openai_ws.enabled", true)
|
||||
viper.SetDefault("gateway.openai_ws.mode_router_v2_enabled", false)
|
||||
viper.SetDefault("gateway.openai_ws.ingress_mode_default", "shared")
|
||||
viper.SetDefault("gateway.openai_ws.oauth_enabled", true)
|
||||
viper.SetDefault("gateway.openai_ws.apikey_enabled", true)
|
||||
viper.SetDefault("gateway.openai_ws.force_http", false)
|
||||
viper.SetDefault("gateway.openai_ws.allow_store_recovery", false)
|
||||
viper.SetDefault("gateway.openai_ws.ingress_previous_response_recovery_enabled", true)
|
||||
viper.SetDefault("gateway.openai_ws.store_disabled_conn_mode", "strict")
|
||||
viper.SetDefault("gateway.openai_ws.store_disabled_force_new_conn", true)
|
||||
viper.SetDefault("gateway.openai_ws.prewarm_generate_enabled", false)
|
||||
viper.SetDefault("gateway.openai_ws.responses_websockets", false)
|
||||
viper.SetDefault("gateway.openai_ws.responses_websockets_v2", true)
|
||||
viper.SetDefault("gateway.openai_ws.max_conns_per_account", 128)
|
||||
viper.SetDefault("gateway.openai_ws.min_idle_per_account", 4)
|
||||
viper.SetDefault("gateway.openai_ws.max_idle_per_account", 12)
|
||||
viper.SetDefault("gateway.openai_ws.dynamic_max_conns_by_account_concurrency_enabled", true)
|
||||
viper.SetDefault("gateway.openai_ws.oauth_max_conns_factor", 1.0)
|
||||
viper.SetDefault("gateway.openai_ws.apikey_max_conns_factor", 1.0)
|
||||
viper.SetDefault("gateway.openai_ws.dial_timeout_seconds", 10)
|
||||
viper.SetDefault("gateway.openai_ws.read_timeout_seconds", 900)
|
||||
viper.SetDefault("gateway.openai_ws.write_timeout_seconds", 120)
|
||||
viper.SetDefault("gateway.openai_ws.pool_target_utilization", 0.7)
|
||||
viper.SetDefault("gateway.openai_ws.queue_limit_per_conn", 64)
|
||||
viper.SetDefault("gateway.openai_ws.event_flush_batch_size", 1)
|
||||
viper.SetDefault("gateway.openai_ws.event_flush_interval_ms", 10)
|
||||
viper.SetDefault("gateway.openai_ws.prewarm_cooldown_ms", 300)
|
||||
viper.SetDefault("gateway.openai_ws.fallback_cooldown_seconds", 30)
|
||||
viper.SetDefault("gateway.openai_ws.retry_backoff_initial_ms", 120)
|
||||
viper.SetDefault("gateway.openai_ws.retry_backoff_max_ms", 2000)
|
||||
viper.SetDefault("gateway.openai_ws.retry_jitter_ratio", 0.2)
|
||||
viper.SetDefault("gateway.openai_ws.retry_total_budget_ms", 5000)
|
||||
viper.SetDefault("gateway.openai_ws.payload_log_sample_rate", 0.2)
|
||||
viper.SetDefault("gateway.openai_ws.lb_top_k", 7)
|
||||
viper.SetDefault("gateway.openai_ws.sticky_session_ttl_seconds", 3600)
|
||||
viper.SetDefault("gateway.openai_ws.session_hash_read_old_fallback", true)
|
||||
viper.SetDefault("gateway.openai_ws.session_hash_dual_write_old", true)
|
||||
viper.SetDefault("gateway.openai_ws.metadata_bridge_enabled", true)
|
||||
viper.SetDefault("gateway.openai_ws.sticky_response_id_ttl_seconds", 3600)
|
||||
viper.SetDefault("gateway.openai_ws.sticky_previous_response_ttl_seconds", 3600)
|
||||
viper.SetDefault("gateway.openai_ws.scheduler_score_weights.priority", 1.0)
|
||||
viper.SetDefault("gateway.openai_ws.scheduler_score_weights.load", 1.0)
|
||||
viper.SetDefault("gateway.openai_ws.scheduler_score_weights.queue", 0.7)
|
||||
viper.SetDefault("gateway.openai_ws.scheduler_score_weights.error_rate", 0.8)
|
||||
viper.SetDefault("gateway.openai_ws.scheduler_score_weights.ttft", 0.5)
|
||||
viper.SetDefault("gateway.antigravity_fallback_cooldown_minutes", 1)
|
||||
viper.SetDefault("gateway.antigravity_extra_retries", 10)
|
||||
viper.SetDefault("gateway.max_body_size", int64(100*1024*1024))
|
||||
viper.SetDefault("gateway.max_body_size", int64(256*1024*1024))
|
||||
viper.SetDefault("gateway.upstream_response_read_max_bytes", int64(8*1024*1024))
|
||||
viper.SetDefault("gateway.proxy_probe_response_read_max_bytes", int64(1024*1024))
|
||||
viper.SetDefault("gateway.gemini_debug_response_headers", false)
|
||||
@@ -1215,6 +1436,14 @@ func setDefaults() {
|
||||
viper.SetDefault("gateway.user_group_rate_cache_ttl_seconds", 30)
|
||||
viper.SetDefault("gateway.models_list_cache_ttl_seconds", 15)
|
||||
// TLS指纹伪装配置(默认关闭,需要账号级别单独启用)
|
||||
// 用户消息串行队列默认值
|
||||
viper.SetDefault("gateway.user_message_queue.enabled", false)
|
||||
viper.SetDefault("gateway.user_message_queue.lock_ttl_ms", 120000)
|
||||
viper.SetDefault("gateway.user_message_queue.wait_timeout_ms", 30000)
|
||||
viper.SetDefault("gateway.user_message_queue.min_delay_ms", 200)
|
||||
viper.SetDefault("gateway.user_message_queue.max_delay_ms", 2000)
|
||||
viper.SetDefault("gateway.user_message_queue.cleanup_interval_seconds", 60)
|
||||
|
||||
viper.SetDefault("gateway.tls_fingerprint.enabled", true)
|
||||
viper.SetDefault("concurrency.ping_interval", 10)
|
||||
|
||||
@@ -1266,9 +1495,6 @@ func setDefaults() {
|
||||
viper.SetDefault("gemini.oauth.scopes", "")
|
||||
viper.SetDefault("gemini.quota.policy", "")
|
||||
|
||||
// Security - proxy fallback
|
||||
viper.SetDefault("security.proxy_fallback.allow_direct_on_error", false)
|
||||
|
||||
// Subscription Maintenance (bounded queue + worker pool)
|
||||
viper.SetDefault("subscription_maintenance.worker_count", 2)
|
||||
viper.SetDefault("subscription_maintenance.queue_size", 1024)
|
||||
@@ -1747,6 +1973,118 @@ func (c *Config) Validate() error {
|
||||
(c.Gateway.StreamKeepaliveInterval < 5 || c.Gateway.StreamKeepaliveInterval > 30) {
|
||||
return fmt.Errorf("gateway.stream_keepalive_interval must be 0 or between 5-30 seconds")
|
||||
}
|
||||
// 兼容旧键 sticky_previous_response_ttl_seconds
|
||||
if c.Gateway.OpenAIWS.StickyResponseIDTTLSeconds <= 0 && c.Gateway.OpenAIWS.StickyPreviousResponseTTLSeconds > 0 {
|
||||
c.Gateway.OpenAIWS.StickyResponseIDTTLSeconds = c.Gateway.OpenAIWS.StickyPreviousResponseTTLSeconds
|
||||
}
|
||||
if c.Gateway.OpenAIWS.MaxConnsPerAccount <= 0 {
|
||||
return fmt.Errorf("gateway.openai_ws.max_conns_per_account must be positive")
|
||||
}
|
||||
if c.Gateway.OpenAIWS.MinIdlePerAccount < 0 {
|
||||
return fmt.Errorf("gateway.openai_ws.min_idle_per_account must be non-negative")
|
||||
}
|
||||
if c.Gateway.OpenAIWS.MaxIdlePerAccount < 0 {
|
||||
return fmt.Errorf("gateway.openai_ws.max_idle_per_account must be non-negative")
|
||||
}
|
||||
if c.Gateway.OpenAIWS.MinIdlePerAccount > c.Gateway.OpenAIWS.MaxIdlePerAccount {
|
||||
return fmt.Errorf("gateway.openai_ws.min_idle_per_account must be <= max_idle_per_account")
|
||||
}
|
||||
if c.Gateway.OpenAIWS.MaxIdlePerAccount > c.Gateway.OpenAIWS.MaxConnsPerAccount {
|
||||
return fmt.Errorf("gateway.openai_ws.max_idle_per_account must be <= max_conns_per_account")
|
||||
}
|
||||
if c.Gateway.OpenAIWS.OAuthMaxConnsFactor <= 0 {
|
||||
return fmt.Errorf("gateway.openai_ws.oauth_max_conns_factor must be positive")
|
||||
}
|
||||
if c.Gateway.OpenAIWS.APIKeyMaxConnsFactor <= 0 {
|
||||
return fmt.Errorf("gateway.openai_ws.apikey_max_conns_factor must be positive")
|
||||
}
|
||||
if c.Gateway.OpenAIWS.DialTimeoutSeconds <= 0 {
|
||||
return fmt.Errorf("gateway.openai_ws.dial_timeout_seconds must be positive")
|
||||
}
|
||||
if c.Gateway.OpenAIWS.ReadTimeoutSeconds <= 0 {
|
||||
return fmt.Errorf("gateway.openai_ws.read_timeout_seconds must be positive")
|
||||
}
|
||||
if c.Gateway.OpenAIWS.WriteTimeoutSeconds <= 0 {
|
||||
return fmt.Errorf("gateway.openai_ws.write_timeout_seconds must be positive")
|
||||
}
|
||||
if c.Gateway.OpenAIWS.PoolTargetUtilization <= 0 || c.Gateway.OpenAIWS.PoolTargetUtilization > 1 {
|
||||
return fmt.Errorf("gateway.openai_ws.pool_target_utilization must be within (0,1]")
|
||||
}
|
||||
if c.Gateway.OpenAIWS.QueueLimitPerConn <= 0 {
|
||||
return fmt.Errorf("gateway.openai_ws.queue_limit_per_conn must be positive")
|
||||
}
|
||||
if c.Gateway.OpenAIWS.EventFlushBatchSize <= 0 {
|
||||
return fmt.Errorf("gateway.openai_ws.event_flush_batch_size must be positive")
|
||||
}
|
||||
if c.Gateway.OpenAIWS.EventFlushIntervalMS < 0 {
|
||||
return fmt.Errorf("gateway.openai_ws.event_flush_interval_ms must be non-negative")
|
||||
}
|
||||
if c.Gateway.OpenAIWS.PrewarmCooldownMS < 0 {
|
||||
return fmt.Errorf("gateway.openai_ws.prewarm_cooldown_ms must be non-negative")
|
||||
}
|
||||
if c.Gateway.OpenAIWS.FallbackCooldownSeconds < 0 {
|
||||
return fmt.Errorf("gateway.openai_ws.fallback_cooldown_seconds must be non-negative")
|
||||
}
|
||||
if c.Gateway.OpenAIWS.RetryBackoffInitialMS < 0 {
|
||||
return fmt.Errorf("gateway.openai_ws.retry_backoff_initial_ms must be non-negative")
|
||||
}
|
||||
if c.Gateway.OpenAIWS.RetryBackoffMaxMS < 0 {
|
||||
return fmt.Errorf("gateway.openai_ws.retry_backoff_max_ms must be non-negative")
|
||||
}
|
||||
if c.Gateway.OpenAIWS.RetryBackoffInitialMS > 0 && c.Gateway.OpenAIWS.RetryBackoffMaxMS > 0 &&
|
||||
c.Gateway.OpenAIWS.RetryBackoffMaxMS < c.Gateway.OpenAIWS.RetryBackoffInitialMS {
|
||||
return fmt.Errorf("gateway.openai_ws.retry_backoff_max_ms must be >= retry_backoff_initial_ms")
|
||||
}
|
||||
if c.Gateway.OpenAIWS.RetryJitterRatio < 0 || c.Gateway.OpenAIWS.RetryJitterRatio > 1 {
|
||||
return fmt.Errorf("gateway.openai_ws.retry_jitter_ratio must be within [0,1]")
|
||||
}
|
||||
if c.Gateway.OpenAIWS.RetryTotalBudgetMS < 0 {
|
||||
return fmt.Errorf("gateway.openai_ws.retry_total_budget_ms must be non-negative")
|
||||
}
|
||||
if mode := strings.ToLower(strings.TrimSpace(c.Gateway.OpenAIWS.IngressModeDefault)); mode != "" {
|
||||
switch mode {
|
||||
case "off", "shared", "dedicated":
|
||||
default:
|
||||
return fmt.Errorf("gateway.openai_ws.ingress_mode_default must be one of off|shared|dedicated")
|
||||
}
|
||||
}
|
||||
if mode := strings.ToLower(strings.TrimSpace(c.Gateway.OpenAIWS.StoreDisabledConnMode)); mode != "" {
|
||||
switch mode {
|
||||
case "strict", "adaptive", "off":
|
||||
default:
|
||||
return fmt.Errorf("gateway.openai_ws.store_disabled_conn_mode must be one of strict|adaptive|off")
|
||||
}
|
||||
}
|
||||
if c.Gateway.OpenAIWS.PayloadLogSampleRate < 0 || c.Gateway.OpenAIWS.PayloadLogSampleRate > 1 {
|
||||
return fmt.Errorf("gateway.openai_ws.payload_log_sample_rate must be within [0,1]")
|
||||
}
|
||||
if c.Gateway.OpenAIWS.LBTopK <= 0 {
|
||||
return fmt.Errorf("gateway.openai_ws.lb_top_k must be positive")
|
||||
}
|
||||
if c.Gateway.OpenAIWS.StickySessionTTLSeconds <= 0 {
|
||||
return fmt.Errorf("gateway.openai_ws.sticky_session_ttl_seconds must be positive")
|
||||
}
|
||||
if c.Gateway.OpenAIWS.StickyResponseIDTTLSeconds <= 0 {
|
||||
return fmt.Errorf("gateway.openai_ws.sticky_response_id_ttl_seconds must be positive")
|
||||
}
|
||||
if c.Gateway.OpenAIWS.StickyPreviousResponseTTLSeconds < 0 {
|
||||
return fmt.Errorf("gateway.openai_ws.sticky_previous_response_ttl_seconds must be non-negative")
|
||||
}
|
||||
if c.Gateway.OpenAIWS.SchedulerScoreWeights.Priority < 0 ||
|
||||
c.Gateway.OpenAIWS.SchedulerScoreWeights.Load < 0 ||
|
||||
c.Gateway.OpenAIWS.SchedulerScoreWeights.Queue < 0 ||
|
||||
c.Gateway.OpenAIWS.SchedulerScoreWeights.ErrorRate < 0 ||
|
||||
c.Gateway.OpenAIWS.SchedulerScoreWeights.TTFT < 0 {
|
||||
return fmt.Errorf("gateway.openai_ws.scheduler_score_weights.* must be non-negative")
|
||||
}
|
||||
weightSum := c.Gateway.OpenAIWS.SchedulerScoreWeights.Priority +
|
||||
c.Gateway.OpenAIWS.SchedulerScoreWeights.Load +
|
||||
c.Gateway.OpenAIWS.SchedulerScoreWeights.Queue +
|
||||
c.Gateway.OpenAIWS.SchedulerScoreWeights.ErrorRate +
|
||||
c.Gateway.OpenAIWS.SchedulerScoreWeights.TTFT
|
||||
if weightSum <= 0 {
|
||||
return fmt.Errorf("gateway.openai_ws.scheduler_score_weights must not all be zero")
|
||||
}
|
||||
if c.Gateway.MaxLineSize < 0 {
|
||||
return fmt.Errorf("gateway.max_line_size must be non-negative")
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func resetViperWithJWTSecret(t *testing.T) {
|
||||
@@ -75,6 +76,103 @@ func TestLoadDefaultSchedulingConfig(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadDefaultOpenAIWSConfig(t *testing.T) {
|
||||
resetViperWithJWTSecret(t)
|
||||
|
||||
cfg, err := Load()
|
||||
if err != nil {
|
||||
t.Fatalf("Load() error: %v", err)
|
||||
}
|
||||
|
||||
if !cfg.Gateway.OpenAIWS.Enabled {
|
||||
t.Fatalf("Gateway.OpenAIWS.Enabled = false, want true")
|
||||
}
|
||||
if !cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 {
|
||||
t.Fatalf("Gateway.OpenAIWS.ResponsesWebsocketsV2 = false, want true")
|
||||
}
|
||||
if cfg.Gateway.OpenAIWS.ResponsesWebsockets {
|
||||
t.Fatalf("Gateway.OpenAIWS.ResponsesWebsockets = true, want false")
|
||||
}
|
||||
if !cfg.Gateway.OpenAIWS.DynamicMaxConnsByAccountConcurrencyEnabled {
|
||||
t.Fatalf("Gateway.OpenAIWS.DynamicMaxConnsByAccountConcurrencyEnabled = false, want true")
|
||||
}
|
||||
if cfg.Gateway.OpenAIWS.OAuthMaxConnsFactor != 1.0 {
|
||||
t.Fatalf("Gateway.OpenAIWS.OAuthMaxConnsFactor = %v, want 1.0", cfg.Gateway.OpenAIWS.OAuthMaxConnsFactor)
|
||||
}
|
||||
if cfg.Gateway.OpenAIWS.APIKeyMaxConnsFactor != 1.0 {
|
||||
t.Fatalf("Gateway.OpenAIWS.APIKeyMaxConnsFactor = %v, want 1.0", cfg.Gateway.OpenAIWS.APIKeyMaxConnsFactor)
|
||||
}
|
||||
if cfg.Gateway.OpenAIWS.StickySessionTTLSeconds != 3600 {
|
||||
t.Fatalf("Gateway.OpenAIWS.StickySessionTTLSeconds = %d, want 3600", cfg.Gateway.OpenAIWS.StickySessionTTLSeconds)
|
||||
}
|
||||
if !cfg.Gateway.OpenAIWS.SessionHashReadOldFallback {
|
||||
t.Fatalf("Gateway.OpenAIWS.SessionHashReadOldFallback = false, want true")
|
||||
}
|
||||
if !cfg.Gateway.OpenAIWS.SessionHashDualWriteOld {
|
||||
t.Fatalf("Gateway.OpenAIWS.SessionHashDualWriteOld = false, want true")
|
||||
}
|
||||
if !cfg.Gateway.OpenAIWS.MetadataBridgeEnabled {
|
||||
t.Fatalf("Gateway.OpenAIWS.MetadataBridgeEnabled = false, want true")
|
||||
}
|
||||
if cfg.Gateway.OpenAIWS.StickyResponseIDTTLSeconds != 3600 {
|
||||
t.Fatalf("Gateway.OpenAIWS.StickyResponseIDTTLSeconds = %d, want 3600", cfg.Gateway.OpenAIWS.StickyResponseIDTTLSeconds)
|
||||
}
|
||||
if cfg.Gateway.OpenAIWS.FallbackCooldownSeconds != 30 {
|
||||
t.Fatalf("Gateway.OpenAIWS.FallbackCooldownSeconds = %d, want 30", cfg.Gateway.OpenAIWS.FallbackCooldownSeconds)
|
||||
}
|
||||
if cfg.Gateway.OpenAIWS.EventFlushBatchSize != 1 {
|
||||
t.Fatalf("Gateway.OpenAIWS.EventFlushBatchSize = %d, want 1", cfg.Gateway.OpenAIWS.EventFlushBatchSize)
|
||||
}
|
||||
if cfg.Gateway.OpenAIWS.EventFlushIntervalMS != 10 {
|
||||
t.Fatalf("Gateway.OpenAIWS.EventFlushIntervalMS = %d, want 10", cfg.Gateway.OpenAIWS.EventFlushIntervalMS)
|
||||
}
|
||||
if cfg.Gateway.OpenAIWS.PrewarmCooldownMS != 300 {
|
||||
t.Fatalf("Gateway.OpenAIWS.PrewarmCooldownMS = %d, want 300", cfg.Gateway.OpenAIWS.PrewarmCooldownMS)
|
||||
}
|
||||
if cfg.Gateway.OpenAIWS.RetryBackoffInitialMS != 120 {
|
||||
t.Fatalf("Gateway.OpenAIWS.RetryBackoffInitialMS = %d, want 120", cfg.Gateway.OpenAIWS.RetryBackoffInitialMS)
|
||||
}
|
||||
if cfg.Gateway.OpenAIWS.RetryBackoffMaxMS != 2000 {
|
||||
t.Fatalf("Gateway.OpenAIWS.RetryBackoffMaxMS = %d, want 2000", cfg.Gateway.OpenAIWS.RetryBackoffMaxMS)
|
||||
}
|
||||
if cfg.Gateway.OpenAIWS.RetryJitterRatio != 0.2 {
|
||||
t.Fatalf("Gateway.OpenAIWS.RetryJitterRatio = %v, want 0.2", cfg.Gateway.OpenAIWS.RetryJitterRatio)
|
||||
}
|
||||
if cfg.Gateway.OpenAIWS.RetryTotalBudgetMS != 5000 {
|
||||
t.Fatalf("Gateway.OpenAIWS.RetryTotalBudgetMS = %d, want 5000", cfg.Gateway.OpenAIWS.RetryTotalBudgetMS)
|
||||
}
|
||||
if cfg.Gateway.OpenAIWS.PayloadLogSampleRate != 0.2 {
|
||||
t.Fatalf("Gateway.OpenAIWS.PayloadLogSampleRate = %v, want 0.2", cfg.Gateway.OpenAIWS.PayloadLogSampleRate)
|
||||
}
|
||||
if !cfg.Gateway.OpenAIWS.StoreDisabledForceNewConn {
|
||||
t.Fatalf("Gateway.OpenAIWS.StoreDisabledForceNewConn = false, want true")
|
||||
}
|
||||
if cfg.Gateway.OpenAIWS.StoreDisabledConnMode != "strict" {
|
||||
t.Fatalf("Gateway.OpenAIWS.StoreDisabledConnMode = %q, want %q", cfg.Gateway.OpenAIWS.StoreDisabledConnMode, "strict")
|
||||
}
|
||||
if cfg.Gateway.OpenAIWS.ModeRouterV2Enabled {
|
||||
t.Fatalf("Gateway.OpenAIWS.ModeRouterV2Enabled = true, want false")
|
||||
}
|
||||
if cfg.Gateway.OpenAIWS.IngressModeDefault != "shared" {
|
||||
t.Fatalf("Gateway.OpenAIWS.IngressModeDefault = %q, want %q", cfg.Gateway.OpenAIWS.IngressModeDefault, "shared")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadOpenAIWSStickyTTLCompatibility(t *testing.T) {
|
||||
resetViperWithJWTSecret(t)
|
||||
t.Setenv("GATEWAY_OPENAI_WS_STICKY_RESPONSE_ID_TTL_SECONDS", "0")
|
||||
t.Setenv("GATEWAY_OPENAI_WS_STICKY_PREVIOUS_RESPONSE_TTL_SECONDS", "7200")
|
||||
|
||||
cfg, err := Load()
|
||||
if err != nil {
|
||||
t.Fatalf("Load() error: %v", err)
|
||||
}
|
||||
|
||||
if cfg.Gateway.OpenAIWS.StickyResponseIDTTLSeconds != 7200 {
|
||||
t.Fatalf("StickyResponseIDTTLSeconds = %d, want 7200", cfg.Gateway.OpenAIWS.StickyResponseIDTTLSeconds)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadDefaultIdempotencyConfig(t *testing.T) {
|
||||
resetViperWithJWTSecret(t)
|
||||
|
||||
@@ -993,6 +1091,16 @@ func TestValidateConfigErrors(t *testing.T) {
|
||||
mutate: func(c *Config) { c.Gateway.StreamKeepaliveInterval = 4 },
|
||||
wantErr: "gateway.stream_keepalive_interval",
|
||||
},
|
||||
{
|
||||
name: "gateway openai ws oauth max conns factor",
|
||||
mutate: func(c *Config) { c.Gateway.OpenAIWS.OAuthMaxConnsFactor = 0 },
|
||||
wantErr: "gateway.openai_ws.oauth_max_conns_factor",
|
||||
},
|
||||
{
|
||||
name: "gateway openai ws apikey max conns factor",
|
||||
mutate: func(c *Config) { c.Gateway.OpenAIWS.APIKeyMaxConnsFactor = 0 },
|
||||
wantErr: "gateway.openai_ws.apikey_max_conns_factor",
|
||||
},
|
||||
{
|
||||
name: "gateway stream data interval range",
|
||||
mutate: func(c *Config) { c.Gateway.StreamDataIntervalTimeout = 5 },
|
||||
@@ -1174,6 +1282,165 @@ func TestValidateConfigErrors(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateConfig_OpenAIWSRules(t *testing.T) {
|
||||
buildValid := func(t *testing.T) *Config {
|
||||
t.Helper()
|
||||
resetViperWithJWTSecret(t)
|
||||
cfg, err := Load()
|
||||
require.NoError(t, err)
|
||||
return cfg
|
||||
}
|
||||
|
||||
t.Run("sticky response id ttl 兼容旧键回填", func(t *testing.T) {
|
||||
cfg := buildValid(t)
|
||||
cfg.Gateway.OpenAIWS.StickyResponseIDTTLSeconds = 0
|
||||
cfg.Gateway.OpenAIWS.StickyPreviousResponseTTLSeconds = 7200
|
||||
|
||||
require.NoError(t, cfg.Validate())
|
||||
require.Equal(t, 7200, cfg.Gateway.OpenAIWS.StickyResponseIDTTLSeconds)
|
||||
})
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
mutate func(*Config)
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "max_conns_per_account 必须为正数",
|
||||
mutate: func(c *Config) { c.Gateway.OpenAIWS.MaxConnsPerAccount = 0 },
|
||||
wantErr: "gateway.openai_ws.max_conns_per_account",
|
||||
},
|
||||
{
|
||||
name: "min_idle_per_account 不能为负数",
|
||||
mutate: func(c *Config) { c.Gateway.OpenAIWS.MinIdlePerAccount = -1 },
|
||||
wantErr: "gateway.openai_ws.min_idle_per_account",
|
||||
},
|
||||
{
|
||||
name: "max_idle_per_account 不能为负数",
|
||||
mutate: func(c *Config) { c.Gateway.OpenAIWS.MaxIdlePerAccount = -1 },
|
||||
wantErr: "gateway.openai_ws.max_idle_per_account",
|
||||
},
|
||||
{
|
||||
name: "min_idle_per_account 不能大于 max_idle_per_account",
|
||||
mutate: func(c *Config) {
|
||||
c.Gateway.OpenAIWS.MinIdlePerAccount = 3
|
||||
c.Gateway.OpenAIWS.MaxIdlePerAccount = 2
|
||||
},
|
||||
wantErr: "gateway.openai_ws.min_idle_per_account must be <= max_idle_per_account",
|
||||
},
|
||||
{
|
||||
name: "max_idle_per_account 不能大于 max_conns_per_account",
|
||||
mutate: func(c *Config) {
|
||||
c.Gateway.OpenAIWS.MaxConnsPerAccount = 2
|
||||
c.Gateway.OpenAIWS.MinIdlePerAccount = 1
|
||||
c.Gateway.OpenAIWS.MaxIdlePerAccount = 3
|
||||
},
|
||||
wantErr: "gateway.openai_ws.max_idle_per_account must be <= max_conns_per_account",
|
||||
},
|
||||
{
|
||||
name: "dial_timeout_seconds 必须为正数",
|
||||
mutate: func(c *Config) { c.Gateway.OpenAIWS.DialTimeoutSeconds = 0 },
|
||||
wantErr: "gateway.openai_ws.dial_timeout_seconds",
|
||||
},
|
||||
{
|
||||
name: "read_timeout_seconds 必须为正数",
|
||||
mutate: func(c *Config) { c.Gateway.OpenAIWS.ReadTimeoutSeconds = 0 },
|
||||
wantErr: "gateway.openai_ws.read_timeout_seconds",
|
||||
},
|
||||
{
|
||||
name: "write_timeout_seconds 必须为正数",
|
||||
mutate: func(c *Config) { c.Gateway.OpenAIWS.WriteTimeoutSeconds = 0 },
|
||||
wantErr: "gateway.openai_ws.write_timeout_seconds",
|
||||
},
|
||||
{
|
||||
name: "pool_target_utilization 必须在 (0,1]",
|
||||
mutate: func(c *Config) { c.Gateway.OpenAIWS.PoolTargetUtilization = 0 },
|
||||
wantErr: "gateway.openai_ws.pool_target_utilization",
|
||||
},
|
||||
{
|
||||
name: "queue_limit_per_conn 必须为正数",
|
||||
mutate: func(c *Config) { c.Gateway.OpenAIWS.QueueLimitPerConn = 0 },
|
||||
wantErr: "gateway.openai_ws.queue_limit_per_conn",
|
||||
},
|
||||
{
|
||||
name: "fallback_cooldown_seconds 不能为负数",
|
||||
mutate: func(c *Config) { c.Gateway.OpenAIWS.FallbackCooldownSeconds = -1 },
|
||||
wantErr: "gateway.openai_ws.fallback_cooldown_seconds",
|
||||
},
|
||||
{
|
||||
name: "store_disabled_conn_mode 必须为 strict|adaptive|off",
|
||||
mutate: func(c *Config) { c.Gateway.OpenAIWS.StoreDisabledConnMode = "invalid" },
|
||||
wantErr: "gateway.openai_ws.store_disabled_conn_mode",
|
||||
},
|
||||
{
|
||||
name: "ingress_mode_default 必须为 off|shared|dedicated",
|
||||
mutate: func(c *Config) { c.Gateway.OpenAIWS.IngressModeDefault = "invalid" },
|
||||
wantErr: "gateway.openai_ws.ingress_mode_default",
|
||||
},
|
||||
{
|
||||
name: "payload_log_sample_rate 必须在 [0,1] 范围内",
|
||||
mutate: func(c *Config) { c.Gateway.OpenAIWS.PayloadLogSampleRate = 1.2 },
|
||||
wantErr: "gateway.openai_ws.payload_log_sample_rate",
|
||||
},
|
||||
{
|
||||
name: "retry_total_budget_ms 不能为负数",
|
||||
mutate: func(c *Config) { c.Gateway.OpenAIWS.RetryTotalBudgetMS = -1 },
|
||||
wantErr: "gateway.openai_ws.retry_total_budget_ms",
|
||||
},
|
||||
{
|
||||
name: "lb_top_k 必须为正数",
|
||||
mutate: func(c *Config) { c.Gateway.OpenAIWS.LBTopK = 0 },
|
||||
wantErr: "gateway.openai_ws.lb_top_k",
|
||||
},
|
||||
{
|
||||
name: "sticky_session_ttl_seconds 必须为正数",
|
||||
mutate: func(c *Config) { c.Gateway.OpenAIWS.StickySessionTTLSeconds = 0 },
|
||||
wantErr: "gateway.openai_ws.sticky_session_ttl_seconds",
|
||||
},
|
||||
{
|
||||
name: "sticky_response_id_ttl_seconds 必须为正数",
|
||||
mutate: func(c *Config) {
|
||||
c.Gateway.OpenAIWS.StickyResponseIDTTLSeconds = 0
|
||||
c.Gateway.OpenAIWS.StickyPreviousResponseTTLSeconds = 0
|
||||
},
|
||||
wantErr: "gateway.openai_ws.sticky_response_id_ttl_seconds",
|
||||
},
|
||||
{
|
||||
name: "sticky_previous_response_ttl_seconds 不能为负数",
|
||||
mutate: func(c *Config) { c.Gateway.OpenAIWS.StickyPreviousResponseTTLSeconds = -1 },
|
||||
wantErr: "gateway.openai_ws.sticky_previous_response_ttl_seconds",
|
||||
},
|
||||
{
|
||||
name: "scheduler_score_weights 不能为负数",
|
||||
mutate: func(c *Config) { c.Gateway.OpenAIWS.SchedulerScoreWeights.Queue = -0.1 },
|
||||
wantErr: "gateway.openai_ws.scheduler_score_weights.* must be non-negative",
|
||||
},
|
||||
{
|
||||
name: "scheduler_score_weights 不能全为 0",
|
||||
mutate: func(c *Config) {
|
||||
c.Gateway.OpenAIWS.SchedulerScoreWeights.Priority = 0
|
||||
c.Gateway.OpenAIWS.SchedulerScoreWeights.Load = 0
|
||||
c.Gateway.OpenAIWS.SchedulerScoreWeights.Queue = 0
|
||||
c.Gateway.OpenAIWS.SchedulerScoreWeights.ErrorRate = 0
|
||||
c.Gateway.OpenAIWS.SchedulerScoreWeights.TTFT = 0
|
||||
},
|
||||
wantErr: "gateway.openai_ws.scheduler_score_weights must not all be zero",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cfg := buildValid(t)
|
||||
tc.mutate(cfg)
|
||||
|
||||
err := cfg.Validate()
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), tc.wantErr)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateConfig_AutoScaleDisabledIgnoreAutoScaleFields(t *testing.T) {
|
||||
resetViperWithJWTSecret(t)
|
||||
cfg, err := Load()
|
||||
|
||||
@@ -104,6 +104,9 @@ var DefaultAntigravityModelMapping = map[string]string{
|
||||
"gemini-3.1-flash-image": "gemini-3.1-flash-image",
|
||||
// Gemini 3.1 image preview 映射
|
||||
"gemini-3.1-flash-image-preview": "gemini-3.1-flash-image",
|
||||
// Gemini 3 image 兼容映射(向 3.1 image 迁移)
|
||||
"gemini-3-pro-image": "gemini-3.1-flash-image",
|
||||
"gemini-3-pro-image-preview": "gemini-3.1-flash-image",
|
||||
// 其他官方模型
|
||||
"gpt-oss-120b-medium": "gpt-oss-120b-medium",
|
||||
"tab_flash_lite_preview": "tab_flash_lite_preview",
|
||||
|
||||
24
backend/internal/domain/constants_test.go
Normal file
24
backend/internal/domain/constants_test.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package domain
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestDefaultAntigravityModelMapping_ImageCompatibilityAliases(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cases := map[string]string{
|
||||
"gemini-3.1-flash-image": "gemini-3.1-flash-image",
|
||||
"gemini-3.1-flash-image-preview": "gemini-3.1-flash-image",
|
||||
"gemini-3-pro-image": "gemini-3.1-flash-image",
|
||||
"gemini-3-pro-image-preview": "gemini-3.1-flash-image",
|
||||
}
|
||||
|
||||
for from, want := range cases {
|
||||
got, ok := DefaultAntigravityModelMapping[from]
|
||||
if !ok {
|
||||
t.Fatalf("expected mapping for %q to exist", from)
|
||||
}
|
||||
if got != want {
|
||||
t.Fatalf("unexpected mapping for %q: got %q want %q", from, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -64,6 +64,7 @@ func setupAccountDataRouter() (*gin.Engine, *stubAdminService) {
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
router.GET("/api/v1/admin/accounts/data", h.ExportData)
|
||||
|
||||
@@ -53,6 +53,7 @@ type AccountHandler struct {
|
||||
concurrencyService *service.ConcurrencyService
|
||||
crsSyncService *service.CRSSyncService
|
||||
sessionLimitCache service.SessionLimitCache
|
||||
rpmCache service.RPMCache
|
||||
tokenCacheInvalidator service.TokenCacheInvalidator
|
||||
}
|
||||
|
||||
@@ -69,6 +70,7 @@ func NewAccountHandler(
|
||||
concurrencyService *service.ConcurrencyService,
|
||||
crsSyncService *service.CRSSyncService,
|
||||
sessionLimitCache service.SessionLimitCache,
|
||||
rpmCache service.RPMCache,
|
||||
tokenCacheInvalidator service.TokenCacheInvalidator,
|
||||
) *AccountHandler {
|
||||
return &AccountHandler{
|
||||
@@ -83,6 +85,7 @@ func NewAccountHandler(
|
||||
concurrencyService: concurrencyService,
|
||||
crsSyncService: crsSyncService,
|
||||
sessionLimitCache: sessionLimitCache,
|
||||
rpmCache: rpmCache,
|
||||
tokenCacheInvalidator: tokenCacheInvalidator,
|
||||
}
|
||||
}
|
||||
@@ -154,6 +157,7 @@ type AccountWithConcurrency struct {
|
||||
// 以下字段仅对 Anthropic OAuth/SetupToken 账号有效,且仅在启用相应功能时返回
|
||||
CurrentWindowCost *float64 `json:"current_window_cost,omitempty"` // 当前窗口费用
|
||||
ActiveSessions *int `json:"active_sessions,omitempty"` // 当前活跃会话数
|
||||
CurrentRPM *int `json:"current_rpm,omitempty"` // 当前分钟 RPM 计数
|
||||
}
|
||||
|
||||
func (h *AccountHandler) buildAccountResponseWithRuntime(ctx context.Context, account *service.Account) AccountWithConcurrency {
|
||||
@@ -189,6 +193,12 @@ func (h *AccountHandler) buildAccountResponseWithRuntime(ctx context.Context, ac
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if h.rpmCache != nil && account.GetBaseRPM() > 0 {
|
||||
if rpm, err := h.rpmCache.GetRPM(ctx, account.ID); err == nil {
|
||||
item.CurrentRPM = &rpm
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return item
|
||||
@@ -207,6 +217,7 @@ func (h *AccountHandler) List(c *gin.Context) {
|
||||
if len(search) > 100 {
|
||||
search = search[:100]
|
||||
}
|
||||
lite := parseBoolQueryWithDefault(c.Query("lite"), false)
|
||||
|
||||
var groupID int64
|
||||
if groupIDStr := c.Query("group"); groupIDStr != "" {
|
||||
@@ -225,67 +236,81 @@ func (h *AccountHandler) List(c *gin.Context) {
|
||||
accountIDs[i] = acc.ID
|
||||
}
|
||||
|
||||
concurrencyCounts, err := h.concurrencyService.GetAccountConcurrencyBatch(c.Request.Context(), accountIDs)
|
||||
if err != nil {
|
||||
// Log error but don't fail the request, just use 0 for all
|
||||
concurrencyCounts = make(map[int64]int)
|
||||
}
|
||||
|
||||
// 识别需要查询窗口费用和会话数的账号(Anthropic OAuth/SetupToken 且启用了相应功能)
|
||||
windowCostAccountIDs := make([]int64, 0)
|
||||
sessionLimitAccountIDs := make([]int64, 0)
|
||||
sessionIdleTimeouts := make(map[int64]time.Duration) // 各账号的会话空闲超时配置
|
||||
for i := range accounts {
|
||||
acc := &accounts[i]
|
||||
if acc.IsAnthropicOAuthOrSetupToken() {
|
||||
if acc.GetWindowCostLimit() > 0 {
|
||||
windowCostAccountIDs = append(windowCostAccountIDs, acc.ID)
|
||||
}
|
||||
if acc.GetMaxSessions() > 0 {
|
||||
sessionLimitAccountIDs = append(sessionLimitAccountIDs, acc.ID)
|
||||
sessionIdleTimeouts[acc.ID] = time.Duration(acc.GetSessionIdleTimeoutMinutes()) * time.Minute
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 并行获取窗口费用和活跃会话数
|
||||
concurrencyCounts := make(map[int64]int)
|
||||
var windowCosts map[int64]float64
|
||||
var activeSessions map[int64]int
|
||||
|
||||
// 获取活跃会话数(批量查询,传入各账号的 idleTimeout 配置)
|
||||
if len(sessionLimitAccountIDs) > 0 && h.sessionLimitCache != nil {
|
||||
activeSessions, _ = h.sessionLimitCache.GetActiveSessionCountBatch(c.Request.Context(), sessionLimitAccountIDs, sessionIdleTimeouts)
|
||||
if activeSessions == nil {
|
||||
activeSessions = make(map[int64]int)
|
||||
var rpmCounts map[int64]int
|
||||
if !lite {
|
||||
// Get current concurrency counts for all accounts
|
||||
if h.concurrencyService != nil {
|
||||
if cc, ccErr := h.concurrencyService.GetAccountConcurrencyBatch(c.Request.Context(), accountIDs); ccErr == nil && cc != nil {
|
||||
concurrencyCounts = cc
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 获取窗口费用(并行查询)
|
||||
if len(windowCostAccountIDs) > 0 {
|
||||
windowCosts = make(map[int64]float64)
|
||||
var mu sync.Mutex
|
||||
g, gctx := errgroup.WithContext(c.Request.Context())
|
||||
g.SetLimit(10) // 限制并发数
|
||||
|
||||
// 识别需要查询窗口费用、会话数和 RPM 的账号(Anthropic OAuth/SetupToken 且启用了相应功能)
|
||||
windowCostAccountIDs := make([]int64, 0)
|
||||
sessionLimitAccountIDs := make([]int64, 0)
|
||||
rpmAccountIDs := make([]int64, 0)
|
||||
sessionIdleTimeouts := make(map[int64]time.Duration) // 各账号的会话空闲超时配置
|
||||
for i := range accounts {
|
||||
acc := &accounts[i]
|
||||
if !acc.IsAnthropicOAuthOrSetupToken() || acc.GetWindowCostLimit() <= 0 {
|
||||
continue
|
||||
}
|
||||
accCopy := acc // 闭包捕获
|
||||
g.Go(func() error {
|
||||
// 使用统一的窗口开始时间计算逻辑(考虑窗口过期情况)
|
||||
startTime := accCopy.GetCurrentWindowStartTime()
|
||||
stats, err := h.accountUsageService.GetAccountWindowStats(gctx, accCopy.ID, startTime)
|
||||
if err == nil && stats != nil {
|
||||
mu.Lock()
|
||||
windowCosts[accCopy.ID] = stats.StandardCost // 使用标准费用
|
||||
mu.Unlock()
|
||||
if acc.IsAnthropicOAuthOrSetupToken() {
|
||||
if acc.GetWindowCostLimit() > 0 {
|
||||
windowCostAccountIDs = append(windowCostAccountIDs, acc.ID)
|
||||
}
|
||||
return nil // 不返回错误,允许部分失败
|
||||
})
|
||||
if acc.GetMaxSessions() > 0 {
|
||||
sessionLimitAccountIDs = append(sessionLimitAccountIDs, acc.ID)
|
||||
sessionIdleTimeouts[acc.ID] = time.Duration(acc.GetSessionIdleTimeoutMinutes()) * time.Minute
|
||||
}
|
||||
if acc.GetBaseRPM() > 0 {
|
||||
rpmAccountIDs = append(rpmAccountIDs, acc.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 获取 RPM 计数(批量查询)
|
||||
if len(rpmAccountIDs) > 0 && h.rpmCache != nil {
|
||||
rpmCounts, _ = h.rpmCache.GetRPMBatch(c.Request.Context(), rpmAccountIDs)
|
||||
if rpmCounts == nil {
|
||||
rpmCounts = make(map[int64]int)
|
||||
}
|
||||
}
|
||||
|
||||
// 获取活跃会话数(批量查询,传入各账号的 idleTimeout 配置)
|
||||
if len(sessionLimitAccountIDs) > 0 && h.sessionLimitCache != nil {
|
||||
activeSessions, _ = h.sessionLimitCache.GetActiveSessionCountBatch(c.Request.Context(), sessionLimitAccountIDs, sessionIdleTimeouts)
|
||||
if activeSessions == nil {
|
||||
activeSessions = make(map[int64]int)
|
||||
}
|
||||
}
|
||||
|
||||
// 获取窗口费用(并行查询)
|
||||
if len(windowCostAccountIDs) > 0 {
|
||||
windowCosts = make(map[int64]float64)
|
||||
var mu sync.Mutex
|
||||
g, gctx := errgroup.WithContext(c.Request.Context())
|
||||
g.SetLimit(10) // 限制并发数
|
||||
|
||||
for i := range accounts {
|
||||
acc := &accounts[i]
|
||||
if !acc.IsAnthropicOAuthOrSetupToken() || acc.GetWindowCostLimit() <= 0 {
|
||||
continue
|
||||
}
|
||||
accCopy := acc // 闭包捕获
|
||||
g.Go(func() error {
|
||||
// 使用统一的窗口开始时间计算逻辑(考虑窗口过期情况)
|
||||
startTime := accCopy.GetCurrentWindowStartTime()
|
||||
stats, err := h.accountUsageService.GetAccountWindowStats(gctx, accCopy.ID, startTime)
|
||||
if err == nil && stats != nil {
|
||||
mu.Lock()
|
||||
windowCosts[accCopy.ID] = stats.StandardCost // 使用标准费用
|
||||
mu.Unlock()
|
||||
}
|
||||
return nil // 不返回错误,允许部分失败
|
||||
})
|
||||
}
|
||||
_ = g.Wait()
|
||||
}
|
||||
_ = g.Wait()
|
||||
}
|
||||
|
||||
// Build response with concurrency info
|
||||
@@ -311,10 +336,17 @@ func (h *AccountHandler) List(c *gin.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
// 添加 RPM 计数(仅当启用时)
|
||||
if rpmCounts != nil {
|
||||
if rpm, ok := rpmCounts[acc.ID]; ok {
|
||||
item.CurrentRPM = &rpm
|
||||
}
|
||||
}
|
||||
|
||||
result[i] = item
|
||||
}
|
||||
|
||||
etag := buildAccountsListETag(result, total, page, pageSize, platform, accountType, status, search)
|
||||
etag := buildAccountsListETag(result, total, page, pageSize, platform, accountType, status, search, lite)
|
||||
if etag != "" {
|
||||
c.Header("ETag", etag)
|
||||
c.Header("Vary", "If-None-Match")
|
||||
@@ -332,6 +364,7 @@ func buildAccountsListETag(
|
||||
total int64,
|
||||
page, pageSize int,
|
||||
platform, accountType, status, search string,
|
||||
lite bool,
|
||||
) string {
|
||||
payload := struct {
|
||||
Total int64 `json:"total"`
|
||||
@@ -341,6 +374,7 @@ func buildAccountsListETag(
|
||||
AccountType string `json:"type"`
|
||||
Status string `json:"status"`
|
||||
Search string `json:"search"`
|
||||
Lite bool `json:"lite"`
|
||||
Items []AccountWithConcurrency `json:"items"`
|
||||
}{
|
||||
Total: total,
|
||||
@@ -350,6 +384,7 @@ func buildAccountsListETag(
|
||||
AccountType: accountType,
|
||||
Status: status,
|
||||
Search: search,
|
||||
Lite: lite,
|
||||
Items: items,
|
||||
}
|
||||
raw, err := json.Marshal(payload)
|
||||
@@ -453,6 +488,8 @@ func (h *AccountHandler) Create(c *gin.Context) {
|
||||
response.BadRequest(c, "rate_multiplier must be >= 0")
|
||||
return
|
||||
}
|
||||
// base_rpm 输入校验:负值归零,超过 10000 截断
|
||||
sanitizeExtraBaseRPM(req.Extra)
|
||||
|
||||
// 确定是否跳过混合渠道检查
|
||||
skipCheck := req.ConfirmMixedChannelRisk != nil && *req.ConfirmMixedChannelRisk
|
||||
@@ -522,6 +559,8 @@ func (h *AccountHandler) Update(c *gin.Context) {
|
||||
response.BadRequest(c, "rate_multiplier must be >= 0")
|
||||
return
|
||||
}
|
||||
// base_rpm 输入校验:负值归零,超过 10000 截断
|
||||
sanitizeExtraBaseRPM(req.Extra)
|
||||
|
||||
// 确定是否跳过混合渠道检查
|
||||
skipCheck := req.ConfirmMixedChannelRisk != nil && *req.ConfirmMixedChannelRisk
|
||||
@@ -904,6 +943,9 @@ func (h *AccountHandler) BatchCreate(c *gin.Context) {
|
||||
continue
|
||||
}
|
||||
|
||||
// base_rpm 输入校验:负值归零,超过 10000 截断
|
||||
sanitizeExtraBaseRPM(item.Extra)
|
||||
|
||||
skipCheck := item.ConfirmMixedChannelRisk != nil && *item.ConfirmMixedChannelRisk
|
||||
|
||||
account, err := h.adminService.CreateAccount(ctx, &service.CreateAccountInput{
|
||||
@@ -1048,6 +1090,8 @@ func (h *AccountHandler) BulkUpdate(c *gin.Context) {
|
||||
response.BadRequest(c, "rate_multiplier must be >= 0")
|
||||
return
|
||||
}
|
||||
// base_rpm 输入校验:负值归零,超过 10000 截断
|
||||
sanitizeExtraBaseRPM(req.Extra)
|
||||
|
||||
// 确定是否跳过混合渠道检查
|
||||
skipCheck := req.ConfirmMixedChannelRisk != nil && *req.ConfirmMixedChannelRisk
|
||||
@@ -1351,6 +1395,57 @@ func (h *AccountHandler) GetTodayStats(c *gin.Context) {
|
||||
response.Success(c, stats)
|
||||
}
|
||||
|
||||
// BatchTodayStatsRequest 批量今日统计请求体。
|
||||
type BatchTodayStatsRequest struct {
|
||||
AccountIDs []int64 `json:"account_ids" binding:"required"`
|
||||
}
|
||||
|
||||
// GetBatchTodayStats 批量获取多个账号的今日统计。
|
||||
// POST /api/v1/admin/accounts/today-stats/batch
|
||||
func (h *AccountHandler) GetBatchTodayStats(c *gin.Context) {
|
||||
var req BatchTodayStatsRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
response.BadRequest(c, "Invalid request: "+err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
accountIDs := normalizeInt64IDList(req.AccountIDs)
|
||||
if len(accountIDs) == 0 {
|
||||
response.Success(c, gin.H{"stats": map[string]any{}})
|
||||
return
|
||||
}
|
||||
|
||||
cacheKey := buildAccountTodayStatsBatchCacheKey(accountIDs)
|
||||
if cached, ok := accountTodayStatsBatchCache.Get(cacheKey); ok {
|
||||
if cached.ETag != "" {
|
||||
c.Header("ETag", cached.ETag)
|
||||
c.Header("Vary", "If-None-Match")
|
||||
if ifNoneMatchMatched(c.GetHeader("If-None-Match"), cached.ETag) {
|
||||
c.Status(http.StatusNotModified)
|
||||
return
|
||||
}
|
||||
}
|
||||
c.Header("X-Snapshot-Cache", "hit")
|
||||
response.Success(c, cached.Payload)
|
||||
return
|
||||
}
|
||||
|
||||
stats, err := h.accountUsageService.GetTodayStatsBatch(c.Request.Context(), accountIDs)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
|
||||
payload := gin.H{"stats": stats}
|
||||
cached := accountTodayStatsBatchCache.Set(cacheKey, payload)
|
||||
if cached.ETag != "" {
|
||||
c.Header("ETag", cached.ETag)
|
||||
c.Header("Vary", "If-None-Match")
|
||||
}
|
||||
c.Header("X-Snapshot-Cache", "miss")
|
||||
response.Success(c, payload)
|
||||
}
|
||||
|
||||
// SetSchedulableRequest represents the request body for setting schedulable status
|
||||
type SetSchedulableRequest struct {
|
||||
Schedulable bool `json:"schedulable"`
|
||||
@@ -1692,3 +1787,22 @@ func (h *AccountHandler) BatchRefreshTier(c *gin.Context) {
|
||||
func (h *AccountHandler) GetAntigravityDefaultModelMapping(c *gin.Context) {
|
||||
response.Success(c, domain.DefaultAntigravityModelMapping)
|
||||
}
|
||||
|
||||
// sanitizeExtraBaseRPM 对 extra map 中的 base_rpm 值进行范围校验和归一化。
|
||||
// 负值归零,超过 10000 截断为 10000。extra 为 nil 或不含 base_rpm 时无操作。
|
||||
func sanitizeExtraBaseRPM(extra map[string]any) {
|
||||
if extra == nil {
|
||||
return
|
||||
}
|
||||
raw, ok := extra["base_rpm"]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
v := service.ParseExtraInt(raw)
|
||||
if v < 0 {
|
||||
v = 0
|
||||
} else if v > 10000 {
|
||||
v = 10000
|
||||
}
|
||||
extra["base_rpm"] = v
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
func setupAccountMixedChannelRouter(adminSvc *stubAdminService) *gin.Engine {
|
||||
gin.SetMode(gin.TestMode)
|
||||
router := gin.New()
|
||||
accountHandler := NewAccountHandler(adminSvc, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil)
|
||||
accountHandler := NewAccountHandler(adminSvc, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil)
|
||||
router.POST("/api/v1/admin/accounts/check-mixed-channel", accountHandler.CheckMixedChannel)
|
||||
router.POST("/api/v1/admin/accounts", accountHandler.Create)
|
||||
router.PUT("/api/v1/admin/accounts/:id", accountHandler.Update)
|
||||
|
||||
@@ -28,6 +28,7 @@ func TestAccountHandler_Create_AnthropicAPIKeyPassthroughExtraForwarded(t *testi
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
router := gin.New()
|
||||
|
||||
25
backend/internal/handler/admin/account_today_stats_cache.go
Normal file
25
backend/internal/handler/admin/account_today_stats_cache.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var accountTodayStatsBatchCache = newSnapshotCache(30 * time.Second)
|
||||
|
||||
func buildAccountTodayStatsBatchCacheKey(accountIDs []int64) string {
|
||||
if len(accountIDs) == 0 {
|
||||
return "accounts_today_stats_empty"
|
||||
}
|
||||
var b strings.Builder
|
||||
b.Grow(len(accountIDs) * 6)
|
||||
_, _ = b.WriteString("accounts_today_stats:")
|
||||
for i, id := range accountIDs {
|
||||
if i > 0 {
|
||||
_ = b.WriteByte(',')
|
||||
}
|
||||
_, _ = b.WriteString(strconv.FormatInt(id, 10))
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
@@ -407,5 +407,23 @@ func (s *stubAdminService) UpdateGroupSortOrders(ctx context.Context, updates []
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stubAdminService) AdminUpdateAPIKeyGroupID(ctx context.Context, keyID int64, groupID *int64) (*service.AdminUpdateAPIKeyGroupIDResult, error) {
|
||||
for i := range s.apiKeys {
|
||||
if s.apiKeys[i].ID == keyID {
|
||||
k := s.apiKeys[i]
|
||||
if groupID != nil {
|
||||
if *groupID == 0 {
|
||||
k.GroupID = nil
|
||||
} else {
|
||||
gid := *groupID
|
||||
k.GroupID = &gid
|
||||
}
|
||||
}
|
||||
return &service.AdminUpdateAPIKeyGroupIDResult{APIKey: &k}, nil
|
||||
}
|
||||
}
|
||||
return nil, service.ErrAPIKeyNotFound
|
||||
}
|
||||
|
||||
// Ensure stub implements interface.
|
||||
var _ service.AdminService = (*stubAdminService)(nil)
|
||||
|
||||
63
backend/internal/handler/admin/apikey_handler.go
Normal file
63
backend/internal/handler/admin/apikey_handler.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/Wei-Shaw/sub2api/internal/handler/dto"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/response"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// AdminAPIKeyHandler handles admin API key management
|
||||
type AdminAPIKeyHandler struct {
|
||||
adminService service.AdminService
|
||||
}
|
||||
|
||||
// NewAdminAPIKeyHandler creates a new admin API key handler
|
||||
func NewAdminAPIKeyHandler(adminService service.AdminService) *AdminAPIKeyHandler {
|
||||
return &AdminAPIKeyHandler{
|
||||
adminService: adminService,
|
||||
}
|
||||
}
|
||||
|
||||
// AdminUpdateAPIKeyGroupRequest represents the request to update an API key's group
|
||||
type AdminUpdateAPIKeyGroupRequest struct {
|
||||
GroupID *int64 `json:"group_id"` // nil=不修改, 0=解绑, >0=绑定到目标分组
|
||||
}
|
||||
|
||||
// UpdateGroup handles updating an API key's group binding
|
||||
// PUT /api/v1/admin/api-keys/:id
|
||||
func (h *AdminAPIKeyHandler) UpdateGroup(c *gin.Context) {
|
||||
keyID, err := strconv.ParseInt(c.Param("id"), 10, 64)
|
||||
if err != nil {
|
||||
response.BadRequest(c, "Invalid API key ID")
|
||||
return
|
||||
}
|
||||
|
||||
var req AdminUpdateAPIKeyGroupRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
response.BadRequest(c, "Invalid request: "+err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
result, err := h.adminService.AdminUpdateAPIKeyGroupID(c.Request.Context(), keyID, req.GroupID)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
|
||||
resp := struct {
|
||||
APIKey *dto.APIKey `json:"api_key"`
|
||||
AutoGrantedGroupAccess bool `json:"auto_granted_group_access"`
|
||||
GrantedGroupID *int64 `json:"granted_group_id,omitempty"`
|
||||
GrantedGroupName string `json:"granted_group_name,omitempty"`
|
||||
}{
|
||||
APIKey: dto.APIKeyFromService(result.APIKey),
|
||||
AutoGrantedGroupAccess: result.AutoGrantedGroupAccess,
|
||||
GrantedGroupID: result.GrantedGroupID,
|
||||
GrantedGroupName: result.GrantedGroupName,
|
||||
}
|
||||
response.Success(c, resp)
|
||||
}
|
||||
202
backend/internal/handler/admin/apikey_handler_test.go
Normal file
202
backend/internal/handler/admin/apikey_handler_test.go
Normal file
@@ -0,0 +1,202 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func setupAPIKeyHandler(adminSvc service.AdminService) *gin.Engine {
|
||||
gin.SetMode(gin.TestMode)
|
||||
router := gin.New()
|
||||
h := NewAdminAPIKeyHandler(adminSvc)
|
||||
router.PUT("/api/v1/admin/api-keys/:id", h.UpdateGroup)
|
||||
return router
|
||||
}
|
||||
|
||||
func TestAdminAPIKeyHandler_UpdateGroup_InvalidID(t *testing.T) {
|
||||
router := setupAPIKeyHandler(newStubAdminService())
|
||||
body := `{"group_id": 2}`
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
req := httptest.NewRequest(http.MethodPut, "/api/v1/admin/api-keys/abc", bytes.NewBufferString(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
router.ServeHTTP(rec, req)
|
||||
|
||||
require.Equal(t, http.StatusBadRequest, rec.Code)
|
||||
require.Contains(t, rec.Body.String(), "Invalid API key ID")
|
||||
}
|
||||
|
||||
func TestAdminAPIKeyHandler_UpdateGroup_InvalidJSON(t *testing.T) {
|
||||
router := setupAPIKeyHandler(newStubAdminService())
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
req := httptest.NewRequest(http.MethodPut, "/api/v1/admin/api-keys/10", bytes.NewBufferString(`{bad json`))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
router.ServeHTTP(rec, req)
|
||||
|
||||
require.Equal(t, http.StatusBadRequest, rec.Code)
|
||||
require.Contains(t, rec.Body.String(), "Invalid request")
|
||||
}
|
||||
|
||||
func TestAdminAPIKeyHandler_UpdateGroup_KeyNotFound(t *testing.T) {
|
||||
router := setupAPIKeyHandler(newStubAdminService())
|
||||
body := `{"group_id": 2}`
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
req := httptest.NewRequest(http.MethodPut, "/api/v1/admin/api-keys/999", bytes.NewBufferString(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
router.ServeHTTP(rec, req)
|
||||
|
||||
// ErrAPIKeyNotFound maps to 404
|
||||
require.Equal(t, http.StatusNotFound, rec.Code)
|
||||
}
|
||||
|
||||
func TestAdminAPIKeyHandler_UpdateGroup_BindGroup(t *testing.T) {
|
||||
router := setupAPIKeyHandler(newStubAdminService())
|
||||
body := `{"group_id": 2}`
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
req := httptest.NewRequest(http.MethodPut, "/api/v1/admin/api-keys/10", bytes.NewBufferString(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
router.ServeHTTP(rec, req)
|
||||
|
||||
require.Equal(t, http.StatusOK, rec.Code)
|
||||
|
||||
var resp struct {
|
||||
Code int `json:"code"`
|
||||
Data json.RawMessage `json:"data"`
|
||||
}
|
||||
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp))
|
||||
require.Equal(t, 0, resp.Code)
|
||||
|
||||
var data struct {
|
||||
APIKey struct {
|
||||
ID int64 `json:"id"`
|
||||
GroupID *int64 `json:"group_id"`
|
||||
} `json:"api_key"`
|
||||
AutoGrantedGroupAccess bool `json:"auto_granted_group_access"`
|
||||
}
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &data))
|
||||
require.Equal(t, int64(10), data.APIKey.ID)
|
||||
require.NotNil(t, data.APIKey.GroupID)
|
||||
require.Equal(t, int64(2), *data.APIKey.GroupID)
|
||||
}
|
||||
|
||||
func TestAdminAPIKeyHandler_UpdateGroup_Unbind(t *testing.T) {
|
||||
svc := newStubAdminService()
|
||||
gid := int64(2)
|
||||
svc.apiKeys[0].GroupID = &gid
|
||||
router := setupAPIKeyHandler(svc)
|
||||
body := `{"group_id": 0}`
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
req := httptest.NewRequest(http.MethodPut, "/api/v1/admin/api-keys/10", bytes.NewBufferString(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
router.ServeHTTP(rec, req)
|
||||
|
||||
require.Equal(t, http.StatusOK, rec.Code)
|
||||
|
||||
var resp struct {
|
||||
Data struct {
|
||||
APIKey struct {
|
||||
GroupID *int64 `json:"group_id"`
|
||||
} `json:"api_key"`
|
||||
} `json:"data"`
|
||||
}
|
||||
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp))
|
||||
require.Nil(t, resp.Data.APIKey.GroupID)
|
||||
}
|
||||
|
||||
func TestAdminAPIKeyHandler_UpdateGroup_ServiceError(t *testing.T) {
|
||||
svc := &failingUpdateGroupService{
|
||||
stubAdminService: newStubAdminService(),
|
||||
err: errors.New("internal failure"),
|
||||
}
|
||||
router := setupAPIKeyHandler(svc)
|
||||
body := `{"group_id": 2}`
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
req := httptest.NewRequest(http.MethodPut, "/api/v1/admin/api-keys/10", bytes.NewBufferString(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
router.ServeHTTP(rec, req)
|
||||
|
||||
require.Equal(t, http.StatusInternalServerError, rec.Code)
|
||||
}
|
||||
|
||||
// H2: empty body → group_id is nil → no-op, returns original key
|
||||
func TestAdminAPIKeyHandler_UpdateGroup_EmptyBody_NoChange(t *testing.T) {
|
||||
router := setupAPIKeyHandler(newStubAdminService())
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
req := httptest.NewRequest(http.MethodPut, "/api/v1/admin/api-keys/10", bytes.NewBufferString(`{}`))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
router.ServeHTTP(rec, req)
|
||||
|
||||
require.Equal(t, http.StatusOK, rec.Code)
|
||||
|
||||
var resp struct {
|
||||
Code int `json:"code"`
|
||||
Data struct {
|
||||
APIKey struct {
|
||||
ID int64 `json:"id"`
|
||||
} `json:"api_key"`
|
||||
} `json:"data"`
|
||||
}
|
||||
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp))
|
||||
require.Equal(t, 0, resp.Code)
|
||||
require.Equal(t, int64(10), resp.Data.APIKey.ID)
|
||||
}
|
||||
|
||||
// M2: service returns GROUP_NOT_ACTIVE → handler maps to 400
|
||||
func TestAdminAPIKeyHandler_UpdateGroup_GroupNotActive(t *testing.T) {
|
||||
svc := &failingUpdateGroupService{
|
||||
stubAdminService: newStubAdminService(),
|
||||
err: infraerrors.BadRequest("GROUP_NOT_ACTIVE", "target group is not active"),
|
||||
}
|
||||
router := setupAPIKeyHandler(svc)
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
req := httptest.NewRequest(http.MethodPut, "/api/v1/admin/api-keys/10", bytes.NewBufferString(`{"group_id": 5}`))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
router.ServeHTTP(rec, req)
|
||||
|
||||
require.Equal(t, http.StatusBadRequest, rec.Code)
|
||||
require.Contains(t, rec.Body.String(), "GROUP_NOT_ACTIVE")
|
||||
}
|
||||
|
||||
// M2: service returns INVALID_GROUP_ID → handler maps to 400
|
||||
func TestAdminAPIKeyHandler_UpdateGroup_NegativeGroupID(t *testing.T) {
|
||||
svc := &failingUpdateGroupService{
|
||||
stubAdminService: newStubAdminService(),
|
||||
err: infraerrors.BadRequest("INVALID_GROUP_ID", "group_id must be non-negative"),
|
||||
}
|
||||
router := setupAPIKeyHandler(svc)
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
req := httptest.NewRequest(http.MethodPut, "/api/v1/admin/api-keys/10", bytes.NewBufferString(`{"group_id": -5}`))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
router.ServeHTTP(rec, req)
|
||||
|
||||
require.Equal(t, http.StatusBadRequest, rec.Code)
|
||||
require.Contains(t, rec.Body.String(), "INVALID_GROUP_ID")
|
||||
}
|
||||
|
||||
// failingUpdateGroupService overrides AdminUpdateAPIKeyGroupID to return an error.
|
||||
type failingUpdateGroupService struct {
|
||||
*stubAdminService
|
||||
err error
|
||||
}
|
||||
|
||||
func (f *failingUpdateGroupService) AdminUpdateAPIKeyGroupID(_ context.Context, _ int64, _ *int64) (*service.AdminUpdateAPIKeyGroupIDResult, error) {
|
||||
return nil, f.err
|
||||
}
|
||||
@@ -36,7 +36,7 @@ func (f *failingAdminService) UpdateAccount(ctx context.Context, id int64, input
|
||||
func setupAccountHandlerWithService(adminSvc service.AdminService) (*gin.Engine, *AccountHandler) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
router := gin.New()
|
||||
handler := NewAccountHandler(adminSvc, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil)
|
||||
handler := NewAccountHandler(adminSvc, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil)
|
||||
router.POST("/api/v1/admin/accounts/batch-update-credentials", handler.BatchUpdateCredentials)
|
||||
return router, handler
|
||||
}
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/response"
|
||||
@@ -186,7 +188,7 @@ func (h *DashboardHandler) GetRealtimeMetrics(c *gin.Context) {
|
||||
|
||||
// GetUsageTrend handles getting usage trend data
|
||||
// GET /api/v1/admin/dashboard/trend
|
||||
// Query params: start_date, end_date (YYYY-MM-DD), granularity (day/hour), user_id, api_key_id, model, account_id, group_id, stream, billing_type
|
||||
// Query params: start_date, end_date (YYYY-MM-DD), granularity (day/hour), user_id, api_key_id, model, account_id, group_id, request_type, stream, billing_type
|
||||
func (h *DashboardHandler) GetUsageTrend(c *gin.Context) {
|
||||
startTime, endTime := parseTimeRange(c)
|
||||
granularity := c.DefaultQuery("granularity", "day")
|
||||
@@ -194,6 +196,7 @@ func (h *DashboardHandler) GetUsageTrend(c *gin.Context) {
|
||||
// Parse optional filter params
|
||||
var userID, apiKeyID, accountID, groupID int64
|
||||
var model string
|
||||
var requestType *int16
|
||||
var stream *bool
|
||||
var billingType *int8
|
||||
|
||||
@@ -220,9 +223,20 @@ func (h *DashboardHandler) GetUsageTrend(c *gin.Context) {
|
||||
if modelStr := c.Query("model"); modelStr != "" {
|
||||
model = modelStr
|
||||
}
|
||||
if streamStr := c.Query("stream"); streamStr != "" {
|
||||
if requestTypeStr := strings.TrimSpace(c.Query("request_type")); requestTypeStr != "" {
|
||||
parsed, err := service.ParseUsageRequestType(requestTypeStr)
|
||||
if err != nil {
|
||||
response.BadRequest(c, err.Error())
|
||||
return
|
||||
}
|
||||
value := int16(parsed)
|
||||
requestType = &value
|
||||
} else if streamStr := c.Query("stream"); streamStr != "" {
|
||||
if streamVal, err := strconv.ParseBool(streamStr); err == nil {
|
||||
stream = &streamVal
|
||||
} else {
|
||||
response.BadRequest(c, "Invalid stream value, use true or false")
|
||||
return
|
||||
}
|
||||
}
|
||||
if billingTypeStr := c.Query("billing_type"); billingTypeStr != "" {
|
||||
@@ -235,7 +249,7 @@ func (h *DashboardHandler) GetUsageTrend(c *gin.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
trend, err := h.dashboardService.GetUsageTrendWithFilters(c.Request.Context(), startTime, endTime, granularity, userID, apiKeyID, accountID, groupID, model, stream, billingType)
|
||||
trend, err := h.dashboardService.GetUsageTrendWithFilters(c.Request.Context(), startTime, endTime, granularity, userID, apiKeyID, accountID, groupID, model, requestType, stream, billingType)
|
||||
if err != nil {
|
||||
response.Error(c, 500, "Failed to get usage trend")
|
||||
return
|
||||
@@ -251,12 +265,13 @@ func (h *DashboardHandler) GetUsageTrend(c *gin.Context) {
|
||||
|
||||
// GetModelStats handles getting model usage statistics
|
||||
// GET /api/v1/admin/dashboard/models
|
||||
// Query params: start_date, end_date (YYYY-MM-DD), user_id, api_key_id, account_id, group_id, stream, billing_type
|
||||
// Query params: start_date, end_date (YYYY-MM-DD), user_id, api_key_id, account_id, group_id, request_type, stream, billing_type
|
||||
func (h *DashboardHandler) GetModelStats(c *gin.Context) {
|
||||
startTime, endTime := parseTimeRange(c)
|
||||
|
||||
// Parse optional filter params
|
||||
var userID, apiKeyID, accountID, groupID int64
|
||||
var requestType *int16
|
||||
var stream *bool
|
||||
var billingType *int8
|
||||
|
||||
@@ -280,9 +295,20 @@ func (h *DashboardHandler) GetModelStats(c *gin.Context) {
|
||||
groupID = id
|
||||
}
|
||||
}
|
||||
if streamStr := c.Query("stream"); streamStr != "" {
|
||||
if requestTypeStr := strings.TrimSpace(c.Query("request_type")); requestTypeStr != "" {
|
||||
parsed, err := service.ParseUsageRequestType(requestTypeStr)
|
||||
if err != nil {
|
||||
response.BadRequest(c, err.Error())
|
||||
return
|
||||
}
|
||||
value := int16(parsed)
|
||||
requestType = &value
|
||||
} else if streamStr := c.Query("stream"); streamStr != "" {
|
||||
if streamVal, err := strconv.ParseBool(streamStr); err == nil {
|
||||
stream = &streamVal
|
||||
} else {
|
||||
response.BadRequest(c, "Invalid stream value, use true or false")
|
||||
return
|
||||
}
|
||||
}
|
||||
if billingTypeStr := c.Query("billing_type"); billingTypeStr != "" {
|
||||
@@ -295,7 +321,7 @@ func (h *DashboardHandler) GetModelStats(c *gin.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
stats, err := h.dashboardService.GetModelStatsWithFilters(c.Request.Context(), startTime, endTime, userID, apiKeyID, accountID, groupID, stream, billingType)
|
||||
stats, err := h.dashboardService.GetModelStatsWithFilters(c.Request.Context(), startTime, endTime, userID, apiKeyID, accountID, groupID, requestType, stream, billingType)
|
||||
if err != nil {
|
||||
response.Error(c, 500, "Failed to get model statistics")
|
||||
return
|
||||
@@ -310,11 +336,12 @@ func (h *DashboardHandler) GetModelStats(c *gin.Context) {
|
||||
|
||||
// GetGroupStats handles getting group usage statistics
|
||||
// GET /api/v1/admin/dashboard/groups
|
||||
// Query params: start_date, end_date (YYYY-MM-DD), user_id, api_key_id, account_id, group_id, stream, billing_type
|
||||
// Query params: start_date, end_date (YYYY-MM-DD), user_id, api_key_id, account_id, group_id, request_type, stream, billing_type
|
||||
func (h *DashboardHandler) GetGroupStats(c *gin.Context) {
|
||||
startTime, endTime := parseTimeRange(c)
|
||||
|
||||
var userID, apiKeyID, accountID, groupID int64
|
||||
var requestType *int16
|
||||
var stream *bool
|
||||
var billingType *int8
|
||||
|
||||
@@ -338,9 +365,20 @@ func (h *DashboardHandler) GetGroupStats(c *gin.Context) {
|
||||
groupID = id
|
||||
}
|
||||
}
|
||||
if streamStr := c.Query("stream"); streamStr != "" {
|
||||
if requestTypeStr := strings.TrimSpace(c.Query("request_type")); requestTypeStr != "" {
|
||||
parsed, err := service.ParseUsageRequestType(requestTypeStr)
|
||||
if err != nil {
|
||||
response.BadRequest(c, err.Error())
|
||||
return
|
||||
}
|
||||
value := int16(parsed)
|
||||
requestType = &value
|
||||
} else if streamStr := c.Query("stream"); streamStr != "" {
|
||||
if streamVal, err := strconv.ParseBool(streamStr); err == nil {
|
||||
stream = &streamVal
|
||||
} else {
|
||||
response.BadRequest(c, "Invalid stream value, use true or false")
|
||||
return
|
||||
}
|
||||
}
|
||||
if billingTypeStr := c.Query("billing_type"); billingTypeStr != "" {
|
||||
@@ -353,7 +391,7 @@ func (h *DashboardHandler) GetGroupStats(c *gin.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
stats, err := h.dashboardService.GetGroupStatsWithFilters(c.Request.Context(), startTime, endTime, userID, apiKeyID, accountID, groupID, stream, billingType)
|
||||
stats, err := h.dashboardService.GetGroupStatsWithFilters(c.Request.Context(), startTime, endTime, userID, apiKeyID, accountID, groupID, requestType, stream, billingType)
|
||||
if err != nil {
|
||||
response.Error(c, 500, "Failed to get group statistics")
|
||||
return
|
||||
@@ -423,6 +461,9 @@ type BatchUsersUsageRequest struct {
|
||||
UserIDs []int64 `json:"user_ids" binding:"required"`
|
||||
}
|
||||
|
||||
var dashboardBatchUsersUsageCache = newSnapshotCache(30 * time.Second)
|
||||
var dashboardBatchAPIKeysUsageCache = newSnapshotCache(30 * time.Second)
|
||||
|
||||
// GetBatchUsersUsage handles getting usage stats for multiple users
|
||||
// POST /api/v1/admin/dashboard/users-usage
|
||||
func (h *DashboardHandler) GetBatchUsersUsage(c *gin.Context) {
|
||||
@@ -432,18 +473,34 @@ func (h *DashboardHandler) GetBatchUsersUsage(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
if len(req.UserIDs) == 0 {
|
||||
userIDs := normalizeInt64IDList(req.UserIDs)
|
||||
if len(userIDs) == 0 {
|
||||
response.Success(c, gin.H{"stats": map[string]any{}})
|
||||
return
|
||||
}
|
||||
|
||||
stats, err := h.dashboardService.GetBatchUserUsageStats(c.Request.Context(), req.UserIDs, time.Time{}, time.Time{})
|
||||
keyRaw, _ := json.Marshal(struct {
|
||||
UserIDs []int64 `json:"user_ids"`
|
||||
}{
|
||||
UserIDs: userIDs,
|
||||
})
|
||||
cacheKey := string(keyRaw)
|
||||
if cached, ok := dashboardBatchUsersUsageCache.Get(cacheKey); ok {
|
||||
c.Header("X-Snapshot-Cache", "hit")
|
||||
response.Success(c, cached.Payload)
|
||||
return
|
||||
}
|
||||
|
||||
stats, err := h.dashboardService.GetBatchUserUsageStats(c.Request.Context(), userIDs, time.Time{}, time.Time{})
|
||||
if err != nil {
|
||||
response.Error(c, 500, "Failed to get user usage stats")
|
||||
return
|
||||
}
|
||||
|
||||
response.Success(c, gin.H{"stats": stats})
|
||||
payload := gin.H{"stats": stats}
|
||||
dashboardBatchUsersUsageCache.Set(cacheKey, payload)
|
||||
c.Header("X-Snapshot-Cache", "miss")
|
||||
response.Success(c, payload)
|
||||
}
|
||||
|
||||
// BatchAPIKeysUsageRequest represents the request body for batch api key usage stats
|
||||
@@ -460,16 +517,32 @@ func (h *DashboardHandler) GetBatchAPIKeysUsage(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
if len(req.APIKeyIDs) == 0 {
|
||||
apiKeyIDs := normalizeInt64IDList(req.APIKeyIDs)
|
||||
if len(apiKeyIDs) == 0 {
|
||||
response.Success(c, gin.H{"stats": map[string]any{}})
|
||||
return
|
||||
}
|
||||
|
||||
stats, err := h.dashboardService.GetBatchAPIKeyUsageStats(c.Request.Context(), req.APIKeyIDs, time.Time{}, time.Time{})
|
||||
keyRaw, _ := json.Marshal(struct {
|
||||
APIKeyIDs []int64 `json:"api_key_ids"`
|
||||
}{
|
||||
APIKeyIDs: apiKeyIDs,
|
||||
})
|
||||
cacheKey := string(keyRaw)
|
||||
if cached, ok := dashboardBatchAPIKeysUsageCache.Get(cacheKey); ok {
|
||||
c.Header("X-Snapshot-Cache", "hit")
|
||||
response.Success(c, cached.Payload)
|
||||
return
|
||||
}
|
||||
|
||||
stats, err := h.dashboardService.GetBatchAPIKeyUsageStats(c.Request.Context(), apiKeyIDs, time.Time{}, time.Time{})
|
||||
if err != nil {
|
||||
response.Error(c, 500, "Failed to get API key usage stats")
|
||||
return
|
||||
}
|
||||
|
||||
response.Success(c, gin.H{"stats": stats})
|
||||
payload := gin.H{"stats": stats}
|
||||
dashboardBatchAPIKeysUsageCache.Set(cacheKey, payload)
|
||||
c.Header("X-Snapshot-Cache", "miss")
|
||||
response.Success(c, payload)
|
||||
}
|
||||
|
||||
@@ -0,0 +1,132 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/usagestats"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type dashboardUsageRepoCapture struct {
|
||||
service.UsageLogRepository
|
||||
trendRequestType *int16
|
||||
trendStream *bool
|
||||
modelRequestType *int16
|
||||
modelStream *bool
|
||||
}
|
||||
|
||||
func (s *dashboardUsageRepoCapture) GetUsageTrendWithFilters(
|
||||
ctx context.Context,
|
||||
startTime, endTime time.Time,
|
||||
granularity string,
|
||||
userID, apiKeyID, accountID, groupID int64,
|
||||
model string,
|
||||
requestType *int16,
|
||||
stream *bool,
|
||||
billingType *int8,
|
||||
) ([]usagestats.TrendDataPoint, error) {
|
||||
s.trendRequestType = requestType
|
||||
s.trendStream = stream
|
||||
return []usagestats.TrendDataPoint{}, nil
|
||||
}
|
||||
|
||||
func (s *dashboardUsageRepoCapture) GetModelStatsWithFilters(
|
||||
ctx context.Context,
|
||||
startTime, endTime time.Time,
|
||||
userID, apiKeyID, accountID, groupID int64,
|
||||
requestType *int16,
|
||||
stream *bool,
|
||||
billingType *int8,
|
||||
) ([]usagestats.ModelStat, error) {
|
||||
s.modelRequestType = requestType
|
||||
s.modelStream = stream
|
||||
return []usagestats.ModelStat{}, nil
|
||||
}
|
||||
|
||||
func newDashboardRequestTypeTestRouter(repo *dashboardUsageRepoCapture) *gin.Engine {
|
||||
gin.SetMode(gin.TestMode)
|
||||
dashboardSvc := service.NewDashboardService(repo, nil, nil, nil)
|
||||
handler := NewDashboardHandler(dashboardSvc, nil)
|
||||
router := gin.New()
|
||||
router.GET("/admin/dashboard/trend", handler.GetUsageTrend)
|
||||
router.GET("/admin/dashboard/models", handler.GetModelStats)
|
||||
return router
|
||||
}
|
||||
|
||||
func TestDashboardTrendRequestTypePriority(t *testing.T) {
|
||||
repo := &dashboardUsageRepoCapture{}
|
||||
router := newDashboardRequestTypeTestRouter(repo)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/admin/dashboard/trend?request_type=ws_v2&stream=bad", nil)
|
||||
rec := httptest.NewRecorder()
|
||||
router.ServeHTTP(rec, req)
|
||||
|
||||
require.Equal(t, http.StatusOK, rec.Code)
|
||||
require.NotNil(t, repo.trendRequestType)
|
||||
require.Equal(t, int16(service.RequestTypeWSV2), *repo.trendRequestType)
|
||||
require.Nil(t, repo.trendStream)
|
||||
}
|
||||
|
||||
func TestDashboardTrendInvalidRequestType(t *testing.T) {
|
||||
repo := &dashboardUsageRepoCapture{}
|
||||
router := newDashboardRequestTypeTestRouter(repo)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/admin/dashboard/trend?request_type=bad", nil)
|
||||
rec := httptest.NewRecorder()
|
||||
router.ServeHTTP(rec, req)
|
||||
|
||||
require.Equal(t, http.StatusBadRequest, rec.Code)
|
||||
}
|
||||
|
||||
func TestDashboardTrendInvalidStream(t *testing.T) {
|
||||
repo := &dashboardUsageRepoCapture{}
|
||||
router := newDashboardRequestTypeTestRouter(repo)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/admin/dashboard/trend?stream=bad", nil)
|
||||
rec := httptest.NewRecorder()
|
||||
router.ServeHTTP(rec, req)
|
||||
|
||||
require.Equal(t, http.StatusBadRequest, rec.Code)
|
||||
}
|
||||
|
||||
func TestDashboardModelStatsRequestTypePriority(t *testing.T) {
|
||||
repo := &dashboardUsageRepoCapture{}
|
||||
router := newDashboardRequestTypeTestRouter(repo)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/admin/dashboard/models?request_type=sync&stream=bad", nil)
|
||||
rec := httptest.NewRecorder()
|
||||
router.ServeHTTP(rec, req)
|
||||
|
||||
require.Equal(t, http.StatusOK, rec.Code)
|
||||
require.NotNil(t, repo.modelRequestType)
|
||||
require.Equal(t, int16(service.RequestTypeSync), *repo.modelRequestType)
|
||||
require.Nil(t, repo.modelStream)
|
||||
}
|
||||
|
||||
func TestDashboardModelStatsInvalidRequestType(t *testing.T) {
|
||||
repo := &dashboardUsageRepoCapture{}
|
||||
router := newDashboardRequestTypeTestRouter(repo)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/admin/dashboard/models?request_type=bad", nil)
|
||||
rec := httptest.NewRecorder()
|
||||
router.ServeHTTP(rec, req)
|
||||
|
||||
require.Equal(t, http.StatusBadRequest, rec.Code)
|
||||
}
|
||||
|
||||
func TestDashboardModelStatsInvalidStream(t *testing.T) {
|
||||
repo := &dashboardUsageRepoCapture{}
|
||||
router := newDashboardRequestTypeTestRouter(repo)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/admin/dashboard/models?stream=bad", nil)
|
||||
rec := httptest.NewRecorder()
|
||||
router.ServeHTTP(rec, req)
|
||||
|
||||
require.Equal(t, http.StatusBadRequest, rec.Code)
|
||||
}
|
||||
292
backend/internal/handler/admin/dashboard_snapshot_v2_handler.go
Normal file
292
backend/internal/handler/admin/dashboard_snapshot_v2_handler.go
Normal file
@@ -0,0 +1,292 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/response"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/usagestats"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
var dashboardSnapshotV2Cache = newSnapshotCache(30 * time.Second)
|
||||
|
||||
type dashboardSnapshotV2Stats struct {
|
||||
usagestats.DashboardStats
|
||||
Uptime int64 `json:"uptime"`
|
||||
}
|
||||
|
||||
type dashboardSnapshotV2Response struct {
|
||||
GeneratedAt string `json:"generated_at"`
|
||||
|
||||
StartDate string `json:"start_date"`
|
||||
EndDate string `json:"end_date"`
|
||||
Granularity string `json:"granularity"`
|
||||
|
||||
Stats *dashboardSnapshotV2Stats `json:"stats,omitempty"`
|
||||
Trend []usagestats.TrendDataPoint `json:"trend,omitempty"`
|
||||
Models []usagestats.ModelStat `json:"models,omitempty"`
|
||||
Groups []usagestats.GroupStat `json:"groups,omitempty"`
|
||||
UsersTrend []usagestats.UserUsageTrendPoint `json:"users_trend,omitempty"`
|
||||
}
|
||||
|
||||
type dashboardSnapshotV2Filters struct {
|
||||
UserID int64
|
||||
APIKeyID int64
|
||||
AccountID int64
|
||||
GroupID int64
|
||||
Model string
|
||||
RequestType *int16
|
||||
Stream *bool
|
||||
BillingType *int8
|
||||
}
|
||||
|
||||
type dashboardSnapshotV2CacheKey struct {
|
||||
StartTime string `json:"start_time"`
|
||||
EndTime string `json:"end_time"`
|
||||
Granularity string `json:"granularity"`
|
||||
UserID int64 `json:"user_id"`
|
||||
APIKeyID int64 `json:"api_key_id"`
|
||||
AccountID int64 `json:"account_id"`
|
||||
GroupID int64 `json:"group_id"`
|
||||
Model string `json:"model"`
|
||||
RequestType *int16 `json:"request_type"`
|
||||
Stream *bool `json:"stream"`
|
||||
BillingType *int8 `json:"billing_type"`
|
||||
IncludeStats bool `json:"include_stats"`
|
||||
IncludeTrend bool `json:"include_trend"`
|
||||
IncludeModels bool `json:"include_models"`
|
||||
IncludeGroups bool `json:"include_groups"`
|
||||
IncludeUsersTrend bool `json:"include_users_trend"`
|
||||
UsersTrendLimit int `json:"users_trend_limit"`
|
||||
}
|
||||
|
||||
func (h *DashboardHandler) GetSnapshotV2(c *gin.Context) {
|
||||
startTime, endTime := parseTimeRange(c)
|
||||
granularity := strings.TrimSpace(c.DefaultQuery("granularity", "day"))
|
||||
if granularity != "hour" {
|
||||
granularity = "day"
|
||||
}
|
||||
|
||||
includeStats := parseBoolQueryWithDefault(c.Query("include_stats"), true)
|
||||
includeTrend := parseBoolQueryWithDefault(c.Query("include_trend"), true)
|
||||
includeModels := parseBoolQueryWithDefault(c.Query("include_model_stats"), true)
|
||||
includeGroups := parseBoolQueryWithDefault(c.Query("include_group_stats"), false)
|
||||
includeUsersTrend := parseBoolQueryWithDefault(c.Query("include_users_trend"), false)
|
||||
usersTrendLimit := 12
|
||||
if raw := strings.TrimSpace(c.Query("users_trend_limit")); raw != "" {
|
||||
if parsed, err := strconv.Atoi(raw); err == nil && parsed > 0 && parsed <= 50 {
|
||||
usersTrendLimit = parsed
|
||||
}
|
||||
}
|
||||
|
||||
filters, err := parseDashboardSnapshotV2Filters(c)
|
||||
if err != nil {
|
||||
response.BadRequest(c, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
keyRaw, _ := json.Marshal(dashboardSnapshotV2CacheKey{
|
||||
StartTime: startTime.UTC().Format(time.RFC3339),
|
||||
EndTime: endTime.UTC().Format(time.RFC3339),
|
||||
Granularity: granularity,
|
||||
UserID: filters.UserID,
|
||||
APIKeyID: filters.APIKeyID,
|
||||
AccountID: filters.AccountID,
|
||||
GroupID: filters.GroupID,
|
||||
Model: filters.Model,
|
||||
RequestType: filters.RequestType,
|
||||
Stream: filters.Stream,
|
||||
BillingType: filters.BillingType,
|
||||
IncludeStats: includeStats,
|
||||
IncludeTrend: includeTrend,
|
||||
IncludeModels: includeModels,
|
||||
IncludeGroups: includeGroups,
|
||||
IncludeUsersTrend: includeUsersTrend,
|
||||
UsersTrendLimit: usersTrendLimit,
|
||||
})
|
||||
cacheKey := string(keyRaw)
|
||||
|
||||
if cached, ok := dashboardSnapshotV2Cache.Get(cacheKey); ok {
|
||||
if cached.ETag != "" {
|
||||
c.Header("ETag", cached.ETag)
|
||||
c.Header("Vary", "If-None-Match")
|
||||
if ifNoneMatchMatched(c.GetHeader("If-None-Match"), cached.ETag) {
|
||||
c.Status(http.StatusNotModified)
|
||||
return
|
||||
}
|
||||
}
|
||||
c.Header("X-Snapshot-Cache", "hit")
|
||||
response.Success(c, cached.Payload)
|
||||
return
|
||||
}
|
||||
|
||||
resp := &dashboardSnapshotV2Response{
|
||||
GeneratedAt: time.Now().UTC().Format(time.RFC3339),
|
||||
StartDate: startTime.Format("2006-01-02"),
|
||||
EndDate: endTime.Add(-24 * time.Hour).Format("2006-01-02"),
|
||||
Granularity: granularity,
|
||||
}
|
||||
|
||||
if includeStats {
|
||||
stats, err := h.dashboardService.GetDashboardStats(c.Request.Context())
|
||||
if err != nil {
|
||||
response.Error(c, 500, "Failed to get dashboard statistics")
|
||||
return
|
||||
}
|
||||
resp.Stats = &dashboardSnapshotV2Stats{
|
||||
DashboardStats: *stats,
|
||||
Uptime: int64(time.Since(h.startTime).Seconds()),
|
||||
}
|
||||
}
|
||||
|
||||
if includeTrend {
|
||||
trend, err := h.dashboardService.GetUsageTrendWithFilters(
|
||||
c.Request.Context(),
|
||||
startTime,
|
||||
endTime,
|
||||
granularity,
|
||||
filters.UserID,
|
||||
filters.APIKeyID,
|
||||
filters.AccountID,
|
||||
filters.GroupID,
|
||||
filters.Model,
|
||||
filters.RequestType,
|
||||
filters.Stream,
|
||||
filters.BillingType,
|
||||
)
|
||||
if err != nil {
|
||||
response.Error(c, 500, "Failed to get usage trend")
|
||||
return
|
||||
}
|
||||
resp.Trend = trend
|
||||
}
|
||||
|
||||
if includeModels {
|
||||
models, err := h.dashboardService.GetModelStatsWithFilters(
|
||||
c.Request.Context(),
|
||||
startTime,
|
||||
endTime,
|
||||
filters.UserID,
|
||||
filters.APIKeyID,
|
||||
filters.AccountID,
|
||||
filters.GroupID,
|
||||
filters.RequestType,
|
||||
filters.Stream,
|
||||
filters.BillingType,
|
||||
)
|
||||
if err != nil {
|
||||
response.Error(c, 500, "Failed to get model statistics")
|
||||
return
|
||||
}
|
||||
resp.Models = models
|
||||
}
|
||||
|
||||
if includeGroups {
|
||||
groups, err := h.dashboardService.GetGroupStatsWithFilters(
|
||||
c.Request.Context(),
|
||||
startTime,
|
||||
endTime,
|
||||
filters.UserID,
|
||||
filters.APIKeyID,
|
||||
filters.AccountID,
|
||||
filters.GroupID,
|
||||
filters.RequestType,
|
||||
filters.Stream,
|
||||
filters.BillingType,
|
||||
)
|
||||
if err != nil {
|
||||
response.Error(c, 500, "Failed to get group statistics")
|
||||
return
|
||||
}
|
||||
resp.Groups = groups
|
||||
}
|
||||
|
||||
if includeUsersTrend {
|
||||
usersTrend, err := h.dashboardService.GetUserUsageTrend(
|
||||
c.Request.Context(),
|
||||
startTime,
|
||||
endTime,
|
||||
granularity,
|
||||
usersTrendLimit,
|
||||
)
|
||||
if err != nil {
|
||||
response.Error(c, 500, "Failed to get user usage trend")
|
||||
return
|
||||
}
|
||||
resp.UsersTrend = usersTrend
|
||||
}
|
||||
|
||||
cached := dashboardSnapshotV2Cache.Set(cacheKey, resp)
|
||||
if cached.ETag != "" {
|
||||
c.Header("ETag", cached.ETag)
|
||||
c.Header("Vary", "If-None-Match")
|
||||
}
|
||||
c.Header("X-Snapshot-Cache", "miss")
|
||||
response.Success(c, resp)
|
||||
}
|
||||
|
||||
func parseDashboardSnapshotV2Filters(c *gin.Context) (*dashboardSnapshotV2Filters, error) {
|
||||
filters := &dashboardSnapshotV2Filters{
|
||||
Model: strings.TrimSpace(c.Query("model")),
|
||||
}
|
||||
|
||||
if userIDStr := strings.TrimSpace(c.Query("user_id")); userIDStr != "" {
|
||||
id, err := strconv.ParseInt(userIDStr, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
filters.UserID = id
|
||||
}
|
||||
if apiKeyIDStr := strings.TrimSpace(c.Query("api_key_id")); apiKeyIDStr != "" {
|
||||
id, err := strconv.ParseInt(apiKeyIDStr, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
filters.APIKeyID = id
|
||||
}
|
||||
if accountIDStr := strings.TrimSpace(c.Query("account_id")); accountIDStr != "" {
|
||||
id, err := strconv.ParseInt(accountIDStr, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
filters.AccountID = id
|
||||
}
|
||||
if groupIDStr := strings.TrimSpace(c.Query("group_id")); groupIDStr != "" {
|
||||
id, err := strconv.ParseInt(groupIDStr, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
filters.GroupID = id
|
||||
}
|
||||
|
||||
if requestTypeStr := strings.TrimSpace(c.Query("request_type")); requestTypeStr != "" {
|
||||
parsed, err := service.ParseUsageRequestType(requestTypeStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
value := int16(parsed)
|
||||
filters.RequestType = &value
|
||||
} else if streamStr := strings.TrimSpace(c.Query("stream")); streamStr != "" {
|
||||
streamVal, err := strconv.ParseBool(streamStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
filters.Stream = &streamVal
|
||||
}
|
||||
|
||||
if billingTypeStr := strings.TrimSpace(c.Query("billing_type")); billingTypeStr != "" {
|
||||
v, err := strconv.ParseInt(billingTypeStr, 10, 8)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bt := int8(v)
|
||||
filters.BillingType = &bt
|
||||
}
|
||||
|
||||
return filters, nil
|
||||
}
|
||||
545
backend/internal/handler/admin/data_management_handler.go
Normal file
545
backend/internal/handler/admin/data_management_handler.go
Normal file
@@ -0,0 +1,545 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/response"
|
||||
middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type DataManagementHandler struct {
|
||||
dataManagementService dataManagementService
|
||||
}
|
||||
|
||||
func NewDataManagementHandler(dataManagementService *service.DataManagementService) *DataManagementHandler {
|
||||
return &DataManagementHandler{dataManagementService: dataManagementService}
|
||||
}
|
||||
|
||||
type dataManagementService interface {
|
||||
GetConfig(ctx context.Context) (service.DataManagementConfig, error)
|
||||
UpdateConfig(ctx context.Context, cfg service.DataManagementConfig) (service.DataManagementConfig, error)
|
||||
ValidateS3(ctx context.Context, cfg service.DataManagementS3Config) (service.DataManagementTestS3Result, error)
|
||||
CreateBackupJob(ctx context.Context, input service.DataManagementCreateBackupJobInput) (service.DataManagementBackupJob, error)
|
||||
ListSourceProfiles(ctx context.Context, sourceType string) ([]service.DataManagementSourceProfile, error)
|
||||
CreateSourceProfile(ctx context.Context, input service.DataManagementCreateSourceProfileInput) (service.DataManagementSourceProfile, error)
|
||||
UpdateSourceProfile(ctx context.Context, input service.DataManagementUpdateSourceProfileInput) (service.DataManagementSourceProfile, error)
|
||||
DeleteSourceProfile(ctx context.Context, sourceType, profileID string) error
|
||||
SetActiveSourceProfile(ctx context.Context, sourceType, profileID string) (service.DataManagementSourceProfile, error)
|
||||
ListS3Profiles(ctx context.Context) ([]service.DataManagementS3Profile, error)
|
||||
CreateS3Profile(ctx context.Context, input service.DataManagementCreateS3ProfileInput) (service.DataManagementS3Profile, error)
|
||||
UpdateS3Profile(ctx context.Context, input service.DataManagementUpdateS3ProfileInput) (service.DataManagementS3Profile, error)
|
||||
DeleteS3Profile(ctx context.Context, profileID string) error
|
||||
SetActiveS3Profile(ctx context.Context, profileID string) (service.DataManagementS3Profile, error)
|
||||
ListBackupJobs(ctx context.Context, input service.DataManagementListBackupJobsInput) (service.DataManagementListBackupJobsResult, error)
|
||||
GetBackupJob(ctx context.Context, jobID string) (service.DataManagementBackupJob, error)
|
||||
EnsureAgentEnabled(ctx context.Context) error
|
||||
GetAgentHealth(ctx context.Context) service.DataManagementAgentHealth
|
||||
}
|
||||
|
||||
type TestS3ConnectionRequest struct {
|
||||
Endpoint string `json:"endpoint"`
|
||||
Region string `json:"region" binding:"required"`
|
||||
Bucket string `json:"bucket" binding:"required"`
|
||||
AccessKeyID string `json:"access_key_id"`
|
||||
SecretAccessKey string `json:"secret_access_key"`
|
||||
Prefix string `json:"prefix"`
|
||||
ForcePathStyle bool `json:"force_path_style"`
|
||||
UseSSL bool `json:"use_ssl"`
|
||||
}
|
||||
|
||||
type CreateBackupJobRequest struct {
|
||||
BackupType string `json:"backup_type" binding:"required,oneof=postgres redis full"`
|
||||
UploadToS3 bool `json:"upload_to_s3"`
|
||||
S3ProfileID string `json:"s3_profile_id"`
|
||||
PostgresID string `json:"postgres_profile_id"`
|
||||
RedisID string `json:"redis_profile_id"`
|
||||
IdempotencyKey string `json:"idempotency_key"`
|
||||
}
|
||||
|
||||
type CreateSourceProfileRequest struct {
|
||||
ProfileID string `json:"profile_id" binding:"required"`
|
||||
Name string `json:"name" binding:"required"`
|
||||
Config service.DataManagementSourceConfig `json:"config" binding:"required"`
|
||||
SetActive bool `json:"set_active"`
|
||||
}
|
||||
|
||||
type UpdateSourceProfileRequest struct {
|
||||
Name string `json:"name" binding:"required"`
|
||||
Config service.DataManagementSourceConfig `json:"config" binding:"required"`
|
||||
}
|
||||
|
||||
type CreateS3ProfileRequest struct {
|
||||
ProfileID string `json:"profile_id" binding:"required"`
|
||||
Name string `json:"name" binding:"required"`
|
||||
Enabled bool `json:"enabled"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
Region string `json:"region"`
|
||||
Bucket string `json:"bucket"`
|
||||
AccessKeyID string `json:"access_key_id"`
|
||||
SecretAccessKey string `json:"secret_access_key"`
|
||||
Prefix string `json:"prefix"`
|
||||
ForcePathStyle bool `json:"force_path_style"`
|
||||
UseSSL bool `json:"use_ssl"`
|
||||
SetActive bool `json:"set_active"`
|
||||
}
|
||||
|
||||
type UpdateS3ProfileRequest struct {
|
||||
Name string `json:"name" binding:"required"`
|
||||
Enabled bool `json:"enabled"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
Region string `json:"region"`
|
||||
Bucket string `json:"bucket"`
|
||||
AccessKeyID string `json:"access_key_id"`
|
||||
SecretAccessKey string `json:"secret_access_key"`
|
||||
Prefix string `json:"prefix"`
|
||||
ForcePathStyle bool `json:"force_path_style"`
|
||||
UseSSL bool `json:"use_ssl"`
|
||||
}
|
||||
|
||||
func (h *DataManagementHandler) GetAgentHealth(c *gin.Context) {
|
||||
health := h.getAgentHealth(c)
|
||||
payload := gin.H{
|
||||
"enabled": health.Enabled,
|
||||
"reason": health.Reason,
|
||||
"socket_path": health.SocketPath,
|
||||
}
|
||||
if health.Agent != nil {
|
||||
payload["agent"] = gin.H{
|
||||
"status": health.Agent.Status,
|
||||
"version": health.Agent.Version,
|
||||
"uptime_seconds": health.Agent.UptimeSeconds,
|
||||
}
|
||||
}
|
||||
response.Success(c, payload)
|
||||
}
|
||||
|
||||
func (h *DataManagementHandler) GetConfig(c *gin.Context) {
|
||||
if !h.requireAgentEnabled(c) {
|
||||
return
|
||||
}
|
||||
cfg, err := h.dataManagementService.GetConfig(c.Request.Context())
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, cfg)
|
||||
}
|
||||
|
||||
func (h *DataManagementHandler) UpdateConfig(c *gin.Context) {
|
||||
var req service.DataManagementConfig
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
response.BadRequest(c, "Invalid request: "+err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if !h.requireAgentEnabled(c) {
|
||||
return
|
||||
}
|
||||
cfg, err := h.dataManagementService.UpdateConfig(c.Request.Context(), req)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, cfg)
|
||||
}
|
||||
|
||||
func (h *DataManagementHandler) TestS3(c *gin.Context) {
|
||||
var req TestS3ConnectionRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
response.BadRequest(c, "Invalid request: "+err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if !h.requireAgentEnabled(c) {
|
||||
return
|
||||
}
|
||||
result, err := h.dataManagementService.ValidateS3(c.Request.Context(), service.DataManagementS3Config{
|
||||
Enabled: true,
|
||||
Endpoint: req.Endpoint,
|
||||
Region: req.Region,
|
||||
Bucket: req.Bucket,
|
||||
AccessKeyID: req.AccessKeyID,
|
||||
SecretAccessKey: req.SecretAccessKey,
|
||||
Prefix: req.Prefix,
|
||||
ForcePathStyle: req.ForcePathStyle,
|
||||
UseSSL: req.UseSSL,
|
||||
})
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, gin.H{"ok": result.OK, "message": result.Message})
|
||||
}
|
||||
|
||||
func (h *DataManagementHandler) CreateBackupJob(c *gin.Context) {
|
||||
var req CreateBackupJobRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
response.BadRequest(c, "Invalid request: "+err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
req.IdempotencyKey = normalizeBackupIdempotencyKey(c.GetHeader("X-Idempotency-Key"), req.IdempotencyKey)
|
||||
if !h.requireAgentEnabled(c) {
|
||||
return
|
||||
}
|
||||
|
||||
triggeredBy := "admin:unknown"
|
||||
if subject, ok := middleware2.GetAuthSubjectFromContext(c); ok {
|
||||
triggeredBy = "admin:" + strconv.FormatInt(subject.UserID, 10)
|
||||
}
|
||||
job, err := h.dataManagementService.CreateBackupJob(c.Request.Context(), service.DataManagementCreateBackupJobInput{
|
||||
BackupType: req.BackupType,
|
||||
UploadToS3: req.UploadToS3,
|
||||
S3ProfileID: req.S3ProfileID,
|
||||
PostgresID: req.PostgresID,
|
||||
RedisID: req.RedisID,
|
||||
TriggeredBy: triggeredBy,
|
||||
IdempotencyKey: req.IdempotencyKey,
|
||||
})
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, gin.H{"job_id": job.JobID, "status": job.Status})
|
||||
}
|
||||
|
||||
func (h *DataManagementHandler) ListSourceProfiles(c *gin.Context) {
|
||||
sourceType := strings.TrimSpace(c.Param("source_type"))
|
||||
if sourceType == "" {
|
||||
response.BadRequest(c, "Invalid source_type")
|
||||
return
|
||||
}
|
||||
if sourceType != "postgres" && sourceType != "redis" {
|
||||
response.BadRequest(c, "source_type must be postgres or redis")
|
||||
return
|
||||
}
|
||||
|
||||
if !h.requireAgentEnabled(c) {
|
||||
return
|
||||
}
|
||||
items, err := h.dataManagementService.ListSourceProfiles(c.Request.Context(), sourceType)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, gin.H{"items": items})
|
||||
}
|
||||
|
||||
func (h *DataManagementHandler) CreateSourceProfile(c *gin.Context) {
|
||||
sourceType := strings.TrimSpace(c.Param("source_type"))
|
||||
if sourceType != "postgres" && sourceType != "redis" {
|
||||
response.BadRequest(c, "source_type must be postgres or redis")
|
||||
return
|
||||
}
|
||||
|
||||
var req CreateSourceProfileRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
response.BadRequest(c, "Invalid request: "+err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if !h.requireAgentEnabled(c) {
|
||||
return
|
||||
}
|
||||
profile, err := h.dataManagementService.CreateSourceProfile(c.Request.Context(), service.DataManagementCreateSourceProfileInput{
|
||||
SourceType: sourceType,
|
||||
ProfileID: req.ProfileID,
|
||||
Name: req.Name,
|
||||
Config: req.Config,
|
||||
SetActive: req.SetActive,
|
||||
})
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, profile)
|
||||
}
|
||||
|
||||
func (h *DataManagementHandler) UpdateSourceProfile(c *gin.Context) {
|
||||
sourceType := strings.TrimSpace(c.Param("source_type"))
|
||||
if sourceType != "postgres" && sourceType != "redis" {
|
||||
response.BadRequest(c, "source_type must be postgres or redis")
|
||||
return
|
||||
}
|
||||
profileID := strings.TrimSpace(c.Param("profile_id"))
|
||||
if profileID == "" {
|
||||
response.BadRequest(c, "Invalid profile_id")
|
||||
return
|
||||
}
|
||||
|
||||
var req UpdateSourceProfileRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
response.BadRequest(c, "Invalid request: "+err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if !h.requireAgentEnabled(c) {
|
||||
return
|
||||
}
|
||||
profile, err := h.dataManagementService.UpdateSourceProfile(c.Request.Context(), service.DataManagementUpdateSourceProfileInput{
|
||||
SourceType: sourceType,
|
||||
ProfileID: profileID,
|
||||
Name: req.Name,
|
||||
Config: req.Config,
|
||||
})
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, profile)
|
||||
}
|
||||
|
||||
func (h *DataManagementHandler) DeleteSourceProfile(c *gin.Context) {
|
||||
sourceType := strings.TrimSpace(c.Param("source_type"))
|
||||
if sourceType != "postgres" && sourceType != "redis" {
|
||||
response.BadRequest(c, "source_type must be postgres or redis")
|
||||
return
|
||||
}
|
||||
profileID := strings.TrimSpace(c.Param("profile_id"))
|
||||
if profileID == "" {
|
||||
response.BadRequest(c, "Invalid profile_id")
|
||||
return
|
||||
}
|
||||
|
||||
if !h.requireAgentEnabled(c) {
|
||||
return
|
||||
}
|
||||
if err := h.dataManagementService.DeleteSourceProfile(c.Request.Context(), sourceType, profileID); err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, gin.H{"deleted": true})
|
||||
}
|
||||
|
||||
func (h *DataManagementHandler) SetActiveSourceProfile(c *gin.Context) {
|
||||
sourceType := strings.TrimSpace(c.Param("source_type"))
|
||||
if sourceType != "postgres" && sourceType != "redis" {
|
||||
response.BadRequest(c, "source_type must be postgres or redis")
|
||||
return
|
||||
}
|
||||
profileID := strings.TrimSpace(c.Param("profile_id"))
|
||||
if profileID == "" {
|
||||
response.BadRequest(c, "Invalid profile_id")
|
||||
return
|
||||
}
|
||||
|
||||
if !h.requireAgentEnabled(c) {
|
||||
return
|
||||
}
|
||||
profile, err := h.dataManagementService.SetActiveSourceProfile(c.Request.Context(), sourceType, profileID)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, profile)
|
||||
}
|
||||
|
||||
func (h *DataManagementHandler) ListS3Profiles(c *gin.Context) {
|
||||
if !h.requireAgentEnabled(c) {
|
||||
return
|
||||
}
|
||||
|
||||
items, err := h.dataManagementService.ListS3Profiles(c.Request.Context())
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, gin.H{"items": items})
|
||||
}
|
||||
|
||||
func (h *DataManagementHandler) CreateS3Profile(c *gin.Context) {
|
||||
var req CreateS3ProfileRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
response.BadRequest(c, "Invalid request: "+err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if !h.requireAgentEnabled(c) {
|
||||
return
|
||||
}
|
||||
|
||||
profile, err := h.dataManagementService.CreateS3Profile(c.Request.Context(), service.DataManagementCreateS3ProfileInput{
|
||||
ProfileID: req.ProfileID,
|
||||
Name: req.Name,
|
||||
SetActive: req.SetActive,
|
||||
S3: service.DataManagementS3Config{
|
||||
Enabled: req.Enabled,
|
||||
Endpoint: req.Endpoint,
|
||||
Region: req.Region,
|
||||
Bucket: req.Bucket,
|
||||
AccessKeyID: req.AccessKeyID,
|
||||
SecretAccessKey: req.SecretAccessKey,
|
||||
Prefix: req.Prefix,
|
||||
ForcePathStyle: req.ForcePathStyle,
|
||||
UseSSL: req.UseSSL,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, profile)
|
||||
}
|
||||
|
||||
func (h *DataManagementHandler) UpdateS3Profile(c *gin.Context) {
|
||||
var req UpdateS3ProfileRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
response.BadRequest(c, "Invalid request: "+err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
profileID := strings.TrimSpace(c.Param("profile_id"))
|
||||
if profileID == "" {
|
||||
response.BadRequest(c, "Invalid profile_id")
|
||||
return
|
||||
}
|
||||
|
||||
if !h.requireAgentEnabled(c) {
|
||||
return
|
||||
}
|
||||
|
||||
profile, err := h.dataManagementService.UpdateS3Profile(c.Request.Context(), service.DataManagementUpdateS3ProfileInput{
|
||||
ProfileID: profileID,
|
||||
Name: req.Name,
|
||||
S3: service.DataManagementS3Config{
|
||||
Enabled: req.Enabled,
|
||||
Endpoint: req.Endpoint,
|
||||
Region: req.Region,
|
||||
Bucket: req.Bucket,
|
||||
AccessKeyID: req.AccessKeyID,
|
||||
SecretAccessKey: req.SecretAccessKey,
|
||||
Prefix: req.Prefix,
|
||||
ForcePathStyle: req.ForcePathStyle,
|
||||
UseSSL: req.UseSSL,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, profile)
|
||||
}
|
||||
|
||||
func (h *DataManagementHandler) DeleteS3Profile(c *gin.Context) {
|
||||
profileID := strings.TrimSpace(c.Param("profile_id"))
|
||||
if profileID == "" {
|
||||
response.BadRequest(c, "Invalid profile_id")
|
||||
return
|
||||
}
|
||||
|
||||
if !h.requireAgentEnabled(c) {
|
||||
return
|
||||
}
|
||||
if err := h.dataManagementService.DeleteS3Profile(c.Request.Context(), profileID); err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, gin.H{"deleted": true})
|
||||
}
|
||||
|
||||
func (h *DataManagementHandler) SetActiveS3Profile(c *gin.Context) {
|
||||
profileID := strings.TrimSpace(c.Param("profile_id"))
|
||||
if profileID == "" {
|
||||
response.BadRequest(c, "Invalid profile_id")
|
||||
return
|
||||
}
|
||||
|
||||
if !h.requireAgentEnabled(c) {
|
||||
return
|
||||
}
|
||||
profile, err := h.dataManagementService.SetActiveS3Profile(c.Request.Context(), profileID)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, profile)
|
||||
}
|
||||
|
||||
func (h *DataManagementHandler) ListBackupJobs(c *gin.Context) {
|
||||
if !h.requireAgentEnabled(c) {
|
||||
return
|
||||
}
|
||||
|
||||
pageSize := int32(20)
|
||||
if raw := strings.TrimSpace(c.Query("page_size")); raw != "" {
|
||||
v, err := strconv.Atoi(raw)
|
||||
if err != nil || v <= 0 {
|
||||
response.BadRequest(c, "Invalid page_size")
|
||||
return
|
||||
}
|
||||
pageSize = int32(v)
|
||||
}
|
||||
|
||||
result, err := h.dataManagementService.ListBackupJobs(c.Request.Context(), service.DataManagementListBackupJobsInput{
|
||||
PageSize: pageSize,
|
||||
PageToken: c.Query("page_token"),
|
||||
Status: c.Query("status"),
|
||||
BackupType: c.Query("backup_type"),
|
||||
})
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, result)
|
||||
}
|
||||
|
||||
func (h *DataManagementHandler) GetBackupJob(c *gin.Context) {
|
||||
jobID := strings.TrimSpace(c.Param("job_id"))
|
||||
if jobID == "" {
|
||||
response.BadRequest(c, "Invalid backup job ID")
|
||||
return
|
||||
}
|
||||
|
||||
if !h.requireAgentEnabled(c) {
|
||||
return
|
||||
}
|
||||
job, err := h.dataManagementService.GetBackupJob(c.Request.Context(), jobID)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, job)
|
||||
}
|
||||
|
||||
func (h *DataManagementHandler) requireAgentEnabled(c *gin.Context) bool {
|
||||
if h.dataManagementService == nil {
|
||||
err := infraerrors.ServiceUnavailable(
|
||||
service.DataManagementAgentUnavailableReason,
|
||||
"data management agent service is not configured",
|
||||
).WithMetadata(map[string]string{"socket_path": service.DefaultDataManagementAgentSocketPath})
|
||||
response.ErrorFrom(c, err)
|
||||
return false
|
||||
}
|
||||
|
||||
if err := h.dataManagementService.EnsureAgentEnabled(c.Request.Context()); err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (h *DataManagementHandler) getAgentHealth(c *gin.Context) service.DataManagementAgentHealth {
|
||||
if h.dataManagementService == nil {
|
||||
return service.DataManagementAgentHealth{
|
||||
Enabled: false,
|
||||
Reason: service.DataManagementAgentUnavailableReason,
|
||||
SocketPath: service.DefaultDataManagementAgentSocketPath,
|
||||
}
|
||||
}
|
||||
return h.dataManagementService.GetAgentHealth(c.Request.Context())
|
||||
}
|
||||
|
||||
func normalizeBackupIdempotencyKey(headerValue, bodyValue string) string {
|
||||
headerKey := strings.TrimSpace(headerValue)
|
||||
if headerKey != "" {
|
||||
return headerKey
|
||||
}
|
||||
return strings.TrimSpace(bodyValue)
|
||||
}
|
||||
@@ -0,0 +1,78 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type apiEnvelope struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Reason string `json:"reason"`
|
||||
Data json.RawMessage `json:"data"`
|
||||
}
|
||||
|
||||
func TestDataManagementHandler_AgentHealthAlways200(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
svc := service.NewDataManagementServiceWithOptions(filepath.Join(t.TempDir(), "missing.sock"), 50*time.Millisecond)
|
||||
h := NewDataManagementHandler(svc)
|
||||
|
||||
r := gin.New()
|
||||
r.GET("/api/v1/admin/data-management/agent/health", h.GetAgentHealth)
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
req := httptest.NewRequest(http.MethodGet, "/api/v1/admin/data-management/agent/health", nil)
|
||||
r.ServeHTTP(rec, req)
|
||||
|
||||
require.Equal(t, http.StatusOK, rec.Code)
|
||||
|
||||
var envelope apiEnvelope
|
||||
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &envelope))
|
||||
require.Equal(t, 0, envelope.Code)
|
||||
|
||||
var data struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
Reason string `json:"reason"`
|
||||
SocketPath string `json:"socket_path"`
|
||||
}
|
||||
require.NoError(t, json.Unmarshal(envelope.Data, &data))
|
||||
require.False(t, data.Enabled)
|
||||
require.Equal(t, service.DataManagementDeprecatedReason, data.Reason)
|
||||
require.Equal(t, svc.SocketPath(), data.SocketPath)
|
||||
}
|
||||
|
||||
func TestDataManagementHandler_NonHealthRouteReturns503WhenDisabled(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
svc := service.NewDataManagementServiceWithOptions(filepath.Join(t.TempDir(), "missing.sock"), 50*time.Millisecond)
|
||||
h := NewDataManagementHandler(svc)
|
||||
|
||||
r := gin.New()
|
||||
r.GET("/api/v1/admin/data-management/config", h.GetConfig)
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
req := httptest.NewRequest(http.MethodGet, "/api/v1/admin/data-management/config", nil)
|
||||
r.ServeHTTP(rec, req)
|
||||
|
||||
require.Equal(t, http.StatusServiceUnavailable, rec.Code)
|
||||
|
||||
var envelope apiEnvelope
|
||||
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &envelope))
|
||||
require.Equal(t, http.StatusServiceUnavailable, envelope.Code)
|
||||
require.Equal(t, service.DataManagementDeprecatedReason, envelope.Reason)
|
||||
}
|
||||
|
||||
func TestNormalizeBackupIdempotencyKey(t *testing.T) {
|
||||
require.Equal(t, "from-header", normalizeBackupIdempotencyKey("from-header", "from-body"))
|
||||
require.Equal(t, "from-body", normalizeBackupIdempotencyKey(" ", " from-body "))
|
||||
require.Equal(t, "", normalizeBackupIdempotencyKey("", ""))
|
||||
}
|
||||
@@ -52,6 +52,8 @@ type CreateGroupRequest struct {
|
||||
SimulateClaudeMaxEnabled *bool `json:"simulate_claude_max_enabled"`
|
||||
// 支持的模型系列(仅 antigravity 平台使用)
|
||||
SupportedModelScopes []string `json:"supported_model_scopes"`
|
||||
// Sora 存储配额
|
||||
SoraStorageQuotaBytes int64 `json:"sora_storage_quota_bytes"`
|
||||
// 从指定分组复制账号(创建后自动绑定)
|
||||
CopyAccountsFromGroupIDs []int64 `json:"copy_accounts_from_group_ids"`
|
||||
}
|
||||
@@ -86,6 +88,8 @@ type UpdateGroupRequest struct {
|
||||
SimulateClaudeMaxEnabled *bool `json:"simulate_claude_max_enabled"`
|
||||
// 支持的模型系列(仅 antigravity 平台使用)
|
||||
SupportedModelScopes *[]string `json:"supported_model_scopes"`
|
||||
// Sora 存储配额
|
||||
SoraStorageQuotaBytes *int64 `json:"sora_storage_quota_bytes"`
|
||||
// 从指定分组复制账号(同步操作:先清空当前分组的账号绑定,再绑定源分组的账号)
|
||||
CopyAccountsFromGroupIDs []int64 `json:"copy_accounts_from_group_ids"`
|
||||
}
|
||||
@@ -201,6 +205,7 @@ func (h *GroupHandler) Create(c *gin.Context) {
|
||||
MCPXMLInject: req.MCPXMLInject,
|
||||
SimulateClaudeMaxEnabled: req.SimulateClaudeMaxEnabled,
|
||||
SupportedModelScopes: req.SupportedModelScopes,
|
||||
SoraStorageQuotaBytes: req.SoraStorageQuotaBytes,
|
||||
CopyAccountsFromGroupIDs: req.CopyAccountsFromGroupIDs,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -252,6 +257,7 @@ func (h *GroupHandler) Update(c *gin.Context) {
|
||||
MCPXMLInject: req.MCPXMLInject,
|
||||
SimulateClaudeMaxEnabled: req.SimulateClaudeMaxEnabled,
|
||||
SupportedModelScopes: req.SupportedModelScopes,
|
||||
SoraStorageQuotaBytes: req.SoraStorageQuotaBytes,
|
||||
CopyAccountsFromGroupIDs: req.CopyAccountsFromGroupIDs,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
25
backend/internal/handler/admin/id_list_utils.go
Normal file
25
backend/internal/handler/admin/id_list_utils.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package admin
|
||||
|
||||
import "sort"
|
||||
|
||||
func normalizeInt64IDList(ids []int64) []int64 {
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
out := make([]int64, 0, len(ids))
|
||||
seen := make(map[int64]struct{}, len(ids))
|
||||
for _, id := range ids {
|
||||
if id <= 0 {
|
||||
continue
|
||||
}
|
||||
if _, ok := seen[id]; ok {
|
||||
continue
|
||||
}
|
||||
seen[id] = struct{}{}
|
||||
out = append(out, id)
|
||||
}
|
||||
|
||||
sort.Slice(out, func(i, j int) bool { return out[i] < out[j] })
|
||||
return out
|
||||
}
|
||||
57
backend/internal/handler/admin/id_list_utils_test.go
Normal file
57
backend/internal/handler/admin/id_list_utils_test.go
Normal file
@@ -0,0 +1,57 @@
|
||||
//go:build unit
|
||||
|
||||
package admin
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNormalizeInt64IDList(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
in []int64
|
||||
want []int64
|
||||
}{
|
||||
{"nil input", nil, nil},
|
||||
{"empty input", []int64{}, nil},
|
||||
{"single element", []int64{5}, []int64{5}},
|
||||
{"already sorted unique", []int64{1, 2, 3}, []int64{1, 2, 3}},
|
||||
{"duplicates removed", []int64{3, 1, 3, 2, 1}, []int64{1, 2, 3}},
|
||||
{"zero filtered", []int64{0, 1, 2}, []int64{1, 2}},
|
||||
{"negative filtered", []int64{-5, -1, 3}, []int64{3}},
|
||||
{"all invalid", []int64{0, -1, -2}, []int64{}},
|
||||
{"sorted output", []int64{9, 3, 7, 1}, []int64{1, 3, 7, 9}},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := normalizeInt64IDList(tc.in)
|
||||
if tc.want == nil {
|
||||
require.Nil(t, got)
|
||||
} else {
|
||||
require.Equal(t, tc.want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildAccountTodayStatsBatchCacheKey(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
ids []int64
|
||||
want string
|
||||
}{
|
||||
{"empty", nil, "accounts_today_stats_empty"},
|
||||
{"single", []int64{42}, "accounts_today_stats:42"},
|
||||
{"multiple", []int64{1, 2, 3}, "accounts_today_stats:1,2,3"},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := buildAccountTodayStatsBatchCacheKey(tc.ids)
|
||||
require.Equal(t, tc.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/Wei-Shaw/sub2api/internal/handler/dto"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/openai"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/response"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
|
||||
@@ -47,7 +48,12 @@ func (h *OpenAIOAuthHandler) GenerateAuthURL(c *gin.Context) {
|
||||
req = OpenAIGenerateAuthURLRequest{}
|
||||
}
|
||||
|
||||
result, err := h.openaiOAuthService.GenerateAuthURL(c.Request.Context(), req.ProxyID, req.RedirectURI)
|
||||
result, err := h.openaiOAuthService.GenerateAuthURL(
|
||||
c.Request.Context(),
|
||||
req.ProxyID,
|
||||
req.RedirectURI,
|
||||
oauthPlatformFromPath(c),
|
||||
)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
@@ -123,7 +129,14 @@ func (h *OpenAIOAuthHandler) RefreshToken(c *gin.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
tokenInfo, err := h.openaiOAuthService.RefreshTokenWithClientID(c.Request.Context(), refreshToken, proxyURL, strings.TrimSpace(req.ClientID))
|
||||
// 未指定 client_id 时,根据请求路径平台自动设置默认值,避免 repository 层盲猜
|
||||
clientID := strings.TrimSpace(req.ClientID)
|
||||
if clientID == "" {
|
||||
platform := oauthPlatformFromPath(c)
|
||||
clientID, _ = openai.OAuthClientConfigByPlatform(platform)
|
||||
}
|
||||
|
||||
tokenInfo, err := h.openaiOAuthService.RefreshTokenWithClientID(c.Request.Context(), refreshToken, proxyURL, clientID)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
|
||||
145
backend/internal/handler/admin/ops_snapshot_v2_handler.go
Normal file
145
backend/internal/handler/admin/ops_snapshot_v2_handler.go
Normal file
@@ -0,0 +1,145 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/response"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
"github.com/gin-gonic/gin"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
var opsDashboardSnapshotV2Cache = newSnapshotCache(30 * time.Second)
|
||||
|
||||
type opsDashboardSnapshotV2Response struct {
|
||||
GeneratedAt string `json:"generated_at"`
|
||||
|
||||
Overview *service.OpsDashboardOverview `json:"overview"`
|
||||
ThroughputTrend *service.OpsThroughputTrendResponse `json:"throughput_trend"`
|
||||
ErrorTrend *service.OpsErrorTrendResponse `json:"error_trend"`
|
||||
}
|
||||
|
||||
type opsDashboardSnapshotV2CacheKey struct {
|
||||
StartTime string `json:"start_time"`
|
||||
EndTime string `json:"end_time"`
|
||||
Platform string `json:"platform"`
|
||||
GroupID *int64 `json:"group_id"`
|
||||
QueryMode service.OpsQueryMode `json:"mode"`
|
||||
BucketSecond int `json:"bucket_second"`
|
||||
}
|
||||
|
||||
// GetDashboardSnapshotV2 returns ops dashboard core snapshot in one request.
|
||||
// GET /api/v1/admin/ops/dashboard/snapshot-v2
|
||||
func (h *OpsHandler) GetDashboardSnapshotV2(c *gin.Context) {
|
||||
if h.opsService == nil {
|
||||
response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
|
||||
return
|
||||
}
|
||||
if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
|
||||
startTime, endTime, err := parseOpsTimeRange(c, "1h")
|
||||
if err != nil {
|
||||
response.BadRequest(c, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
filter := &service.OpsDashboardFilter{
|
||||
StartTime: startTime,
|
||||
EndTime: endTime,
|
||||
Platform: strings.TrimSpace(c.Query("platform")),
|
||||
QueryMode: parseOpsQueryMode(c),
|
||||
}
|
||||
if v := strings.TrimSpace(c.Query("group_id")); v != "" {
|
||||
id, err := strconv.ParseInt(v, 10, 64)
|
||||
if err != nil || id <= 0 {
|
||||
response.BadRequest(c, "Invalid group_id")
|
||||
return
|
||||
}
|
||||
filter.GroupID = &id
|
||||
}
|
||||
bucketSeconds := pickThroughputBucketSeconds(endTime.Sub(startTime))
|
||||
|
||||
keyRaw, _ := json.Marshal(opsDashboardSnapshotV2CacheKey{
|
||||
StartTime: startTime.UTC().Format(time.RFC3339),
|
||||
EndTime: endTime.UTC().Format(time.RFC3339),
|
||||
Platform: filter.Platform,
|
||||
GroupID: filter.GroupID,
|
||||
QueryMode: filter.QueryMode,
|
||||
BucketSecond: bucketSeconds,
|
||||
})
|
||||
cacheKey := string(keyRaw)
|
||||
|
||||
if cached, ok := opsDashboardSnapshotV2Cache.Get(cacheKey); ok {
|
||||
if cached.ETag != "" {
|
||||
c.Header("ETag", cached.ETag)
|
||||
c.Header("Vary", "If-None-Match")
|
||||
if ifNoneMatchMatched(c.GetHeader("If-None-Match"), cached.ETag) {
|
||||
c.Status(http.StatusNotModified)
|
||||
return
|
||||
}
|
||||
}
|
||||
c.Header("X-Snapshot-Cache", "hit")
|
||||
response.Success(c, cached.Payload)
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
overview *service.OpsDashboardOverview
|
||||
trend *service.OpsThroughputTrendResponse
|
||||
errTrend *service.OpsErrorTrendResponse
|
||||
)
|
||||
g, gctx := errgroup.WithContext(c.Request.Context())
|
||||
g.Go(func() error {
|
||||
f := *filter
|
||||
result, err := h.opsService.GetDashboardOverview(gctx, &f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
overview = result
|
||||
return nil
|
||||
})
|
||||
g.Go(func() error {
|
||||
f := *filter
|
||||
result, err := h.opsService.GetThroughputTrend(gctx, &f, bucketSeconds)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
trend = result
|
||||
return nil
|
||||
})
|
||||
g.Go(func() error {
|
||||
f := *filter
|
||||
result, err := h.opsService.GetErrorTrend(gctx, &f, bucketSeconds)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
errTrend = result
|
||||
return nil
|
||||
})
|
||||
if err := g.Wait(); err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
|
||||
resp := &opsDashboardSnapshotV2Response{
|
||||
GeneratedAt: time.Now().UTC().Format(time.RFC3339),
|
||||
Overview: overview,
|
||||
ThroughputTrend: trend,
|
||||
ErrorTrend: errTrend,
|
||||
}
|
||||
|
||||
cached := opsDashboardSnapshotV2Cache.Set(cacheKey, resp)
|
||||
if cached.ETag != "" {
|
||||
c.Header("ETag", cached.ETag)
|
||||
c.Header("Vary", "If-None-Match")
|
||||
}
|
||||
c.Header("X-Snapshot-Cache", "miss")
|
||||
response.Success(c, resp)
|
||||
}
|
||||
@@ -62,7 +62,8 @@ const (
|
||||
)
|
||||
|
||||
var wsConnCount atomic.Int32
|
||||
var wsConnCountByIP sync.Map // map[string]*atomic.Int32
|
||||
var wsConnCountByIPMu sync.Mutex
|
||||
var wsConnCountByIP = make(map[string]int32)
|
||||
|
||||
const qpsWSIdleStopDelay = 30 * time.Second
|
||||
|
||||
@@ -389,42 +390,31 @@ func tryAcquireOpsWSIPSlot(clientIP string, limit int32) bool {
|
||||
if strings.TrimSpace(clientIP) == "" || limit <= 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
v, _ := wsConnCountByIP.LoadOrStore(clientIP, &atomic.Int32{})
|
||||
counter, ok := v.(*atomic.Int32)
|
||||
if !ok {
|
||||
wsConnCountByIPMu.Lock()
|
||||
defer wsConnCountByIPMu.Unlock()
|
||||
current := wsConnCountByIP[clientIP]
|
||||
if current >= limit {
|
||||
return false
|
||||
}
|
||||
|
||||
for {
|
||||
current := counter.Load()
|
||||
if current >= limit {
|
||||
return false
|
||||
}
|
||||
if counter.CompareAndSwap(current, current+1) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
wsConnCountByIP[clientIP] = current + 1
|
||||
return true
|
||||
}
|
||||
|
||||
func releaseOpsWSIPSlot(clientIP string) {
|
||||
if strings.TrimSpace(clientIP) == "" {
|
||||
return
|
||||
}
|
||||
|
||||
v, ok := wsConnCountByIP.Load(clientIP)
|
||||
wsConnCountByIPMu.Lock()
|
||||
defer wsConnCountByIPMu.Unlock()
|
||||
current, ok := wsConnCountByIP[clientIP]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
counter, ok := v.(*atomic.Int32)
|
||||
if !ok {
|
||||
if current <= 1 {
|
||||
delete(wsConnCountByIP, clientIP)
|
||||
return
|
||||
}
|
||||
next := counter.Add(-1)
|
||||
if next <= 0 {
|
||||
// Best-effort cleanup; safe even if a new slot was acquired concurrently.
|
||||
wsConnCountByIP.Delete(clientIP)
|
||||
}
|
||||
wsConnCountByIP[clientIP] = current - 1
|
||||
}
|
||||
|
||||
func handleQPSWebSocket(parentCtx context.Context, conn *websocket.Conn) {
|
||||
|
||||
@@ -64,9 +64,9 @@ func (h *ProxyHandler) List(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
out := make([]dto.ProxyWithAccountCount, 0, len(proxies))
|
||||
out := make([]dto.AdminProxyWithAccountCount, 0, len(proxies))
|
||||
for i := range proxies {
|
||||
out = append(out, *dto.ProxyWithAccountCountFromService(&proxies[i]))
|
||||
out = append(out, *dto.ProxyWithAccountCountFromServiceAdmin(&proxies[i]))
|
||||
}
|
||||
response.Paginated(c, out, total, page, pageSize)
|
||||
}
|
||||
@@ -83,9 +83,9 @@ func (h *ProxyHandler) GetAll(c *gin.Context) {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
out := make([]dto.ProxyWithAccountCount, 0, len(proxies))
|
||||
out := make([]dto.AdminProxyWithAccountCount, 0, len(proxies))
|
||||
for i := range proxies {
|
||||
out = append(out, *dto.ProxyWithAccountCountFromService(&proxies[i]))
|
||||
out = append(out, *dto.ProxyWithAccountCountFromServiceAdmin(&proxies[i]))
|
||||
}
|
||||
response.Success(c, out)
|
||||
return
|
||||
@@ -97,9 +97,9 @@ func (h *ProxyHandler) GetAll(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
out := make([]dto.Proxy, 0, len(proxies))
|
||||
out := make([]dto.AdminProxy, 0, len(proxies))
|
||||
for i := range proxies {
|
||||
out = append(out, *dto.ProxyFromService(&proxies[i]))
|
||||
out = append(out, *dto.ProxyFromServiceAdmin(&proxies[i]))
|
||||
}
|
||||
response.Success(c, out)
|
||||
}
|
||||
@@ -119,7 +119,7 @@ func (h *ProxyHandler) GetByID(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
response.Success(c, dto.ProxyFromService(proxy))
|
||||
response.Success(c, dto.ProxyFromServiceAdmin(proxy))
|
||||
}
|
||||
|
||||
// Create handles creating a new proxy
|
||||
@@ -143,7 +143,7 @@ func (h *ProxyHandler) Create(c *gin.Context) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dto.ProxyFromService(proxy), nil
|
||||
return dto.ProxyFromServiceAdmin(proxy), nil
|
||||
})
|
||||
}
|
||||
|
||||
@@ -176,7 +176,7 @@ func (h *ProxyHandler) Update(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
response.Success(c, dto.ProxyFromService(proxy))
|
||||
response.Success(c, dto.ProxyFromServiceAdmin(proxy))
|
||||
}
|
||||
|
||||
// Delete handles deleting a proxy
|
||||
|
||||
@@ -1,7 +1,13 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -14,21 +20,38 @@ import (
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// semverPattern 预编译 semver 格式校验正则
|
||||
var semverPattern = regexp.MustCompile(`^\d+\.\d+\.\d+$`)
|
||||
|
||||
// menuItemIDPattern validates custom menu item IDs: alphanumeric, hyphens, underscores only.
|
||||
var menuItemIDPattern = regexp.MustCompile(`^[a-zA-Z0-9_-]+$`)
|
||||
|
||||
// generateMenuItemID generates a short random hex ID for a custom menu item.
|
||||
func generateMenuItemID() (string, error) {
|
||||
b := make([]byte, 8)
|
||||
if _, err := rand.Read(b); err != nil {
|
||||
return "", fmt.Errorf("generate menu item ID: %w", err)
|
||||
}
|
||||
return hex.EncodeToString(b), nil
|
||||
}
|
||||
|
||||
// SettingHandler 系统设置处理器
|
||||
type SettingHandler struct {
|
||||
settingService *service.SettingService
|
||||
emailService *service.EmailService
|
||||
turnstileService *service.TurnstileService
|
||||
opsService *service.OpsService
|
||||
soraS3Storage *service.SoraS3Storage
|
||||
}
|
||||
|
||||
// NewSettingHandler 创建系统设置处理器
|
||||
func NewSettingHandler(settingService *service.SettingService, emailService *service.EmailService, turnstileService *service.TurnstileService, opsService *service.OpsService) *SettingHandler {
|
||||
func NewSettingHandler(settingService *service.SettingService, emailService *service.EmailService, turnstileService *service.TurnstileService, opsService *service.OpsService, soraS3Storage *service.SoraS3Storage) *SettingHandler {
|
||||
return &SettingHandler{
|
||||
settingService: settingService,
|
||||
emailService: emailService,
|
||||
turnstileService: turnstileService,
|
||||
opsService: opsService,
|
||||
soraS3Storage: soraS3Storage,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,10 +66,18 @@ func (h *SettingHandler) GetSettings(c *gin.Context) {
|
||||
|
||||
// Check if ops monitoring is enabled (respects config.ops.enabled)
|
||||
opsEnabled := h.opsService != nil && h.opsService.IsMonitoringEnabled(c.Request.Context())
|
||||
defaultSubscriptions := make([]dto.DefaultSubscriptionSetting, 0, len(settings.DefaultSubscriptions))
|
||||
for _, sub := range settings.DefaultSubscriptions {
|
||||
defaultSubscriptions = append(defaultSubscriptions, dto.DefaultSubscriptionSetting{
|
||||
GroupID: sub.GroupID,
|
||||
ValidityDays: sub.ValidityDays,
|
||||
})
|
||||
}
|
||||
|
||||
response.Success(c, dto.SystemSettings{
|
||||
RegistrationEnabled: settings.RegistrationEnabled,
|
||||
EmailVerifyEnabled: settings.EmailVerifyEnabled,
|
||||
RegistrationEmailSuffixWhitelist: settings.RegistrationEmailSuffixWhitelist,
|
||||
PromoCodeEnabled: settings.PromoCodeEnabled,
|
||||
PasswordResetEnabled: settings.PasswordResetEnabled,
|
||||
InvitationCodeEnabled: settings.InvitationCodeEnabled,
|
||||
@@ -76,8 +107,11 @@ func (h *SettingHandler) GetSettings(c *gin.Context) {
|
||||
HideCcsImportButton: settings.HideCcsImportButton,
|
||||
PurchaseSubscriptionEnabled: settings.PurchaseSubscriptionEnabled,
|
||||
PurchaseSubscriptionURL: settings.PurchaseSubscriptionURL,
|
||||
SoraClientEnabled: settings.SoraClientEnabled,
|
||||
CustomMenuItems: dto.ParseCustomMenuItems(settings.CustomMenuItems),
|
||||
DefaultConcurrency: settings.DefaultConcurrency,
|
||||
DefaultBalance: settings.DefaultBalance,
|
||||
DefaultSubscriptions: defaultSubscriptions,
|
||||
EnableModelFallback: settings.EnableModelFallback,
|
||||
FallbackModelAnthropic: settings.FallbackModelAnthropic,
|
||||
FallbackModelOpenAI: settings.FallbackModelOpenAI,
|
||||
@@ -89,18 +123,21 @@ func (h *SettingHandler) GetSettings(c *gin.Context) {
|
||||
OpsRealtimeMonitoringEnabled: settings.OpsRealtimeMonitoringEnabled,
|
||||
OpsQueryModeDefault: settings.OpsQueryModeDefault,
|
||||
OpsMetricsIntervalSeconds: settings.OpsMetricsIntervalSeconds,
|
||||
MinClaudeCodeVersion: settings.MinClaudeCodeVersion,
|
||||
AllowUngroupedKeyScheduling: settings.AllowUngroupedKeyScheduling,
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateSettingsRequest 更新设置请求
|
||||
type UpdateSettingsRequest struct {
|
||||
// 注册设置
|
||||
RegistrationEnabled bool `json:"registration_enabled"`
|
||||
EmailVerifyEnabled bool `json:"email_verify_enabled"`
|
||||
PromoCodeEnabled bool `json:"promo_code_enabled"`
|
||||
PasswordResetEnabled bool `json:"password_reset_enabled"`
|
||||
InvitationCodeEnabled bool `json:"invitation_code_enabled"`
|
||||
TotpEnabled bool `json:"totp_enabled"` // TOTP 双因素认证
|
||||
RegistrationEnabled bool `json:"registration_enabled"`
|
||||
EmailVerifyEnabled bool `json:"email_verify_enabled"`
|
||||
RegistrationEmailSuffixWhitelist []string `json:"registration_email_suffix_whitelist"`
|
||||
PromoCodeEnabled bool `json:"promo_code_enabled"`
|
||||
PasswordResetEnabled bool `json:"password_reset_enabled"`
|
||||
InvitationCodeEnabled bool `json:"invitation_code_enabled"`
|
||||
TotpEnabled bool `json:"totp_enabled"` // TOTP 双因素认证
|
||||
|
||||
// 邮件服务设置
|
||||
SMTPHost string `json:"smtp_host"`
|
||||
@@ -123,20 +160,23 @@ type UpdateSettingsRequest struct {
|
||||
LinuxDoConnectRedirectURL string `json:"linuxdo_connect_redirect_url"`
|
||||
|
||||
// OEM设置
|
||||
SiteName string `json:"site_name"`
|
||||
SiteLogo string `json:"site_logo"`
|
||||
SiteSubtitle string `json:"site_subtitle"`
|
||||
APIBaseURL string `json:"api_base_url"`
|
||||
ContactInfo string `json:"contact_info"`
|
||||
DocURL string `json:"doc_url"`
|
||||
HomeContent string `json:"home_content"`
|
||||
HideCcsImportButton bool `json:"hide_ccs_import_button"`
|
||||
PurchaseSubscriptionEnabled *bool `json:"purchase_subscription_enabled"`
|
||||
PurchaseSubscriptionURL *string `json:"purchase_subscription_url"`
|
||||
SiteName string `json:"site_name"`
|
||||
SiteLogo string `json:"site_logo"`
|
||||
SiteSubtitle string `json:"site_subtitle"`
|
||||
APIBaseURL string `json:"api_base_url"`
|
||||
ContactInfo string `json:"contact_info"`
|
||||
DocURL string `json:"doc_url"`
|
||||
HomeContent string `json:"home_content"`
|
||||
HideCcsImportButton bool `json:"hide_ccs_import_button"`
|
||||
PurchaseSubscriptionEnabled *bool `json:"purchase_subscription_enabled"`
|
||||
PurchaseSubscriptionURL *string `json:"purchase_subscription_url"`
|
||||
SoraClientEnabled bool `json:"sora_client_enabled"`
|
||||
CustomMenuItems *[]dto.CustomMenuItem `json:"custom_menu_items"`
|
||||
|
||||
// 默认配置
|
||||
DefaultConcurrency int `json:"default_concurrency"`
|
||||
DefaultBalance float64 `json:"default_balance"`
|
||||
DefaultConcurrency int `json:"default_concurrency"`
|
||||
DefaultBalance float64 `json:"default_balance"`
|
||||
DefaultSubscriptions []dto.DefaultSubscriptionSetting `json:"default_subscriptions"`
|
||||
|
||||
// Model fallback configuration
|
||||
EnableModelFallback bool `json:"enable_model_fallback"`
|
||||
@@ -154,6 +194,11 @@ type UpdateSettingsRequest struct {
|
||||
OpsRealtimeMonitoringEnabled *bool `json:"ops_realtime_monitoring_enabled"`
|
||||
OpsQueryModeDefault *string `json:"ops_query_mode_default"`
|
||||
OpsMetricsIntervalSeconds *int `json:"ops_metrics_interval_seconds"`
|
||||
|
||||
MinClaudeCodeVersion string `json:"min_claude_code_version"`
|
||||
|
||||
// 分组隔离
|
||||
AllowUngroupedKeyScheduling bool `json:"allow_ungrouped_key_scheduling"`
|
||||
}
|
||||
|
||||
// UpdateSettings 更新系统设置
|
||||
@@ -181,6 +226,7 @@ func (h *SettingHandler) UpdateSettings(c *gin.Context) {
|
||||
if req.SMTPPort <= 0 {
|
||||
req.SMTPPort = 587
|
||||
}
|
||||
req.DefaultSubscriptions = normalizeDefaultSubscriptions(req.DefaultSubscriptions)
|
||||
|
||||
// Turnstile 参数验证
|
||||
if req.TurnstileEnabled {
|
||||
@@ -276,6 +322,84 @@ func (h *SettingHandler) UpdateSettings(c *gin.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
// 自定义菜单项验证
|
||||
const (
|
||||
maxCustomMenuItems = 20
|
||||
maxMenuItemLabelLen = 50
|
||||
maxMenuItemURLLen = 2048
|
||||
maxMenuItemIconSVGLen = 10 * 1024 // 10KB
|
||||
maxMenuItemIDLen = 32
|
||||
)
|
||||
|
||||
customMenuJSON := previousSettings.CustomMenuItems
|
||||
if req.CustomMenuItems != nil {
|
||||
items := *req.CustomMenuItems
|
||||
if len(items) > maxCustomMenuItems {
|
||||
response.BadRequest(c, "Too many custom menu items (max 20)")
|
||||
return
|
||||
}
|
||||
for i, item := range items {
|
||||
if strings.TrimSpace(item.Label) == "" {
|
||||
response.BadRequest(c, "Custom menu item label is required")
|
||||
return
|
||||
}
|
||||
if len(item.Label) > maxMenuItemLabelLen {
|
||||
response.BadRequest(c, "Custom menu item label is too long (max 50 characters)")
|
||||
return
|
||||
}
|
||||
if strings.TrimSpace(item.URL) == "" {
|
||||
response.BadRequest(c, "Custom menu item URL is required")
|
||||
return
|
||||
}
|
||||
if len(item.URL) > maxMenuItemURLLen {
|
||||
response.BadRequest(c, "Custom menu item URL is too long (max 2048 characters)")
|
||||
return
|
||||
}
|
||||
if err := config.ValidateAbsoluteHTTPURL(strings.TrimSpace(item.URL)); err != nil {
|
||||
response.BadRequest(c, "Custom menu item URL must be an absolute http(s) URL")
|
||||
return
|
||||
}
|
||||
if item.Visibility != "user" && item.Visibility != "admin" {
|
||||
response.BadRequest(c, "Custom menu item visibility must be 'user' or 'admin'")
|
||||
return
|
||||
}
|
||||
if len(item.IconSVG) > maxMenuItemIconSVGLen {
|
||||
response.BadRequest(c, "Custom menu item icon SVG is too large (max 10KB)")
|
||||
return
|
||||
}
|
||||
// Auto-generate ID if missing
|
||||
if strings.TrimSpace(item.ID) == "" {
|
||||
id, err := generateMenuItemID()
|
||||
if err != nil {
|
||||
response.Error(c, http.StatusInternalServerError, "Failed to generate menu item ID")
|
||||
return
|
||||
}
|
||||
items[i].ID = id
|
||||
} else if len(item.ID) > maxMenuItemIDLen {
|
||||
response.BadRequest(c, "Custom menu item ID is too long (max 32 characters)")
|
||||
return
|
||||
} else if !menuItemIDPattern.MatchString(item.ID) {
|
||||
response.BadRequest(c, "Custom menu item ID contains invalid characters (only a-z, A-Z, 0-9, - and _ are allowed)")
|
||||
return
|
||||
}
|
||||
}
|
||||
// ID uniqueness check
|
||||
seen := make(map[string]struct{}, len(items))
|
||||
for _, item := range items {
|
||||
if _, exists := seen[item.ID]; exists {
|
||||
response.BadRequest(c, "Duplicate custom menu item ID: "+item.ID)
|
||||
return
|
||||
}
|
||||
seen[item.ID] = struct{}{}
|
||||
}
|
||||
menuBytes, err := json.Marshal(items)
|
||||
if err != nil {
|
||||
response.BadRequest(c, "Failed to serialize custom menu items")
|
||||
return
|
||||
}
|
||||
customMenuJSON = string(menuBytes)
|
||||
}
|
||||
|
||||
// Ops metrics collector interval validation (seconds).
|
||||
if req.OpsMetricsIntervalSeconds != nil {
|
||||
v := *req.OpsMetricsIntervalSeconds
|
||||
@@ -287,47 +411,68 @@ func (h *SettingHandler) UpdateSettings(c *gin.Context) {
|
||||
}
|
||||
req.OpsMetricsIntervalSeconds = &v
|
||||
}
|
||||
defaultSubscriptions := make([]service.DefaultSubscriptionSetting, 0, len(req.DefaultSubscriptions))
|
||||
for _, sub := range req.DefaultSubscriptions {
|
||||
defaultSubscriptions = append(defaultSubscriptions, service.DefaultSubscriptionSetting{
|
||||
GroupID: sub.GroupID,
|
||||
ValidityDays: sub.ValidityDays,
|
||||
})
|
||||
}
|
||||
|
||||
// 验证最低版本号格式(空字符串=禁用,或合法 semver)
|
||||
if req.MinClaudeCodeVersion != "" {
|
||||
if !semverPattern.MatchString(req.MinClaudeCodeVersion) {
|
||||
response.Error(c, http.StatusBadRequest, "min_claude_code_version must be empty or a valid semver (e.g. 2.1.63)")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
settings := &service.SystemSettings{
|
||||
RegistrationEnabled: req.RegistrationEnabled,
|
||||
EmailVerifyEnabled: req.EmailVerifyEnabled,
|
||||
PromoCodeEnabled: req.PromoCodeEnabled,
|
||||
PasswordResetEnabled: req.PasswordResetEnabled,
|
||||
InvitationCodeEnabled: req.InvitationCodeEnabled,
|
||||
TotpEnabled: req.TotpEnabled,
|
||||
SMTPHost: req.SMTPHost,
|
||||
SMTPPort: req.SMTPPort,
|
||||
SMTPUsername: req.SMTPUsername,
|
||||
SMTPPassword: req.SMTPPassword,
|
||||
SMTPFrom: req.SMTPFrom,
|
||||
SMTPFromName: req.SMTPFromName,
|
||||
SMTPUseTLS: req.SMTPUseTLS,
|
||||
TurnstileEnabled: req.TurnstileEnabled,
|
||||
TurnstileSiteKey: req.TurnstileSiteKey,
|
||||
TurnstileSecretKey: req.TurnstileSecretKey,
|
||||
LinuxDoConnectEnabled: req.LinuxDoConnectEnabled,
|
||||
LinuxDoConnectClientID: req.LinuxDoConnectClientID,
|
||||
LinuxDoConnectClientSecret: req.LinuxDoConnectClientSecret,
|
||||
LinuxDoConnectRedirectURL: req.LinuxDoConnectRedirectURL,
|
||||
SiteName: req.SiteName,
|
||||
SiteLogo: req.SiteLogo,
|
||||
SiteSubtitle: req.SiteSubtitle,
|
||||
APIBaseURL: req.APIBaseURL,
|
||||
ContactInfo: req.ContactInfo,
|
||||
DocURL: req.DocURL,
|
||||
HomeContent: req.HomeContent,
|
||||
HideCcsImportButton: req.HideCcsImportButton,
|
||||
PurchaseSubscriptionEnabled: purchaseEnabled,
|
||||
PurchaseSubscriptionURL: purchaseURL,
|
||||
DefaultConcurrency: req.DefaultConcurrency,
|
||||
DefaultBalance: req.DefaultBalance,
|
||||
EnableModelFallback: req.EnableModelFallback,
|
||||
FallbackModelAnthropic: req.FallbackModelAnthropic,
|
||||
FallbackModelOpenAI: req.FallbackModelOpenAI,
|
||||
FallbackModelGemini: req.FallbackModelGemini,
|
||||
FallbackModelAntigravity: req.FallbackModelAntigravity,
|
||||
EnableIdentityPatch: req.EnableIdentityPatch,
|
||||
IdentityPatchPrompt: req.IdentityPatchPrompt,
|
||||
RegistrationEnabled: req.RegistrationEnabled,
|
||||
EmailVerifyEnabled: req.EmailVerifyEnabled,
|
||||
RegistrationEmailSuffixWhitelist: req.RegistrationEmailSuffixWhitelist,
|
||||
PromoCodeEnabled: req.PromoCodeEnabled,
|
||||
PasswordResetEnabled: req.PasswordResetEnabled,
|
||||
InvitationCodeEnabled: req.InvitationCodeEnabled,
|
||||
TotpEnabled: req.TotpEnabled,
|
||||
SMTPHost: req.SMTPHost,
|
||||
SMTPPort: req.SMTPPort,
|
||||
SMTPUsername: req.SMTPUsername,
|
||||
SMTPPassword: req.SMTPPassword,
|
||||
SMTPFrom: req.SMTPFrom,
|
||||
SMTPFromName: req.SMTPFromName,
|
||||
SMTPUseTLS: req.SMTPUseTLS,
|
||||
TurnstileEnabled: req.TurnstileEnabled,
|
||||
TurnstileSiteKey: req.TurnstileSiteKey,
|
||||
TurnstileSecretKey: req.TurnstileSecretKey,
|
||||
LinuxDoConnectEnabled: req.LinuxDoConnectEnabled,
|
||||
LinuxDoConnectClientID: req.LinuxDoConnectClientID,
|
||||
LinuxDoConnectClientSecret: req.LinuxDoConnectClientSecret,
|
||||
LinuxDoConnectRedirectURL: req.LinuxDoConnectRedirectURL,
|
||||
SiteName: req.SiteName,
|
||||
SiteLogo: req.SiteLogo,
|
||||
SiteSubtitle: req.SiteSubtitle,
|
||||
APIBaseURL: req.APIBaseURL,
|
||||
ContactInfo: req.ContactInfo,
|
||||
DocURL: req.DocURL,
|
||||
HomeContent: req.HomeContent,
|
||||
HideCcsImportButton: req.HideCcsImportButton,
|
||||
PurchaseSubscriptionEnabled: purchaseEnabled,
|
||||
PurchaseSubscriptionURL: purchaseURL,
|
||||
SoraClientEnabled: req.SoraClientEnabled,
|
||||
CustomMenuItems: customMenuJSON,
|
||||
DefaultConcurrency: req.DefaultConcurrency,
|
||||
DefaultBalance: req.DefaultBalance,
|
||||
DefaultSubscriptions: defaultSubscriptions,
|
||||
EnableModelFallback: req.EnableModelFallback,
|
||||
FallbackModelAnthropic: req.FallbackModelAnthropic,
|
||||
FallbackModelOpenAI: req.FallbackModelOpenAI,
|
||||
FallbackModelGemini: req.FallbackModelGemini,
|
||||
FallbackModelAntigravity: req.FallbackModelAntigravity,
|
||||
EnableIdentityPatch: req.EnableIdentityPatch,
|
||||
IdentityPatchPrompt: req.IdentityPatchPrompt,
|
||||
MinClaudeCodeVersion: req.MinClaudeCodeVersion,
|
||||
AllowUngroupedKeyScheduling: req.AllowUngroupedKeyScheduling,
|
||||
OpsMonitoringEnabled: func() bool {
|
||||
if req.OpsMonitoringEnabled != nil {
|
||||
return *req.OpsMonitoringEnabled
|
||||
@@ -367,10 +512,18 @@ func (h *SettingHandler) UpdateSettings(c *gin.Context) {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
updatedDefaultSubscriptions := make([]dto.DefaultSubscriptionSetting, 0, len(updatedSettings.DefaultSubscriptions))
|
||||
for _, sub := range updatedSettings.DefaultSubscriptions {
|
||||
updatedDefaultSubscriptions = append(updatedDefaultSubscriptions, dto.DefaultSubscriptionSetting{
|
||||
GroupID: sub.GroupID,
|
||||
ValidityDays: sub.ValidityDays,
|
||||
})
|
||||
}
|
||||
|
||||
response.Success(c, dto.SystemSettings{
|
||||
RegistrationEnabled: updatedSettings.RegistrationEnabled,
|
||||
EmailVerifyEnabled: updatedSettings.EmailVerifyEnabled,
|
||||
RegistrationEmailSuffixWhitelist: updatedSettings.RegistrationEmailSuffixWhitelist,
|
||||
PromoCodeEnabled: updatedSettings.PromoCodeEnabled,
|
||||
PasswordResetEnabled: updatedSettings.PasswordResetEnabled,
|
||||
InvitationCodeEnabled: updatedSettings.InvitationCodeEnabled,
|
||||
@@ -400,8 +553,11 @@ func (h *SettingHandler) UpdateSettings(c *gin.Context) {
|
||||
HideCcsImportButton: updatedSettings.HideCcsImportButton,
|
||||
PurchaseSubscriptionEnabled: updatedSettings.PurchaseSubscriptionEnabled,
|
||||
PurchaseSubscriptionURL: updatedSettings.PurchaseSubscriptionURL,
|
||||
SoraClientEnabled: updatedSettings.SoraClientEnabled,
|
||||
CustomMenuItems: dto.ParseCustomMenuItems(updatedSettings.CustomMenuItems),
|
||||
DefaultConcurrency: updatedSettings.DefaultConcurrency,
|
||||
DefaultBalance: updatedSettings.DefaultBalance,
|
||||
DefaultSubscriptions: updatedDefaultSubscriptions,
|
||||
EnableModelFallback: updatedSettings.EnableModelFallback,
|
||||
FallbackModelAnthropic: updatedSettings.FallbackModelAnthropic,
|
||||
FallbackModelOpenAI: updatedSettings.FallbackModelOpenAI,
|
||||
@@ -413,6 +569,8 @@ func (h *SettingHandler) UpdateSettings(c *gin.Context) {
|
||||
OpsRealtimeMonitoringEnabled: updatedSettings.OpsRealtimeMonitoringEnabled,
|
||||
OpsQueryModeDefault: updatedSettings.OpsQueryModeDefault,
|
||||
OpsMetricsIntervalSeconds: updatedSettings.OpsMetricsIntervalSeconds,
|
||||
MinClaudeCodeVersion: updatedSettings.MinClaudeCodeVersion,
|
||||
AllowUngroupedKeyScheduling: updatedSettings.AllowUngroupedKeyScheduling,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -444,6 +602,9 @@ func diffSettings(before *service.SystemSettings, after *service.SystemSettings,
|
||||
if before.EmailVerifyEnabled != after.EmailVerifyEnabled {
|
||||
changed = append(changed, "email_verify_enabled")
|
||||
}
|
||||
if !equalStringSlice(before.RegistrationEmailSuffixWhitelist, after.RegistrationEmailSuffixWhitelist) {
|
||||
changed = append(changed, "registration_email_suffix_whitelist")
|
||||
}
|
||||
if before.PasswordResetEnabled != after.PasswordResetEnabled {
|
||||
changed = append(changed, "password_reset_enabled")
|
||||
}
|
||||
@@ -522,6 +683,9 @@ func diffSettings(before *service.SystemSettings, after *service.SystemSettings,
|
||||
if before.DefaultBalance != after.DefaultBalance {
|
||||
changed = append(changed, "default_balance")
|
||||
}
|
||||
if !equalDefaultSubscriptions(before.DefaultSubscriptions, after.DefaultSubscriptions) {
|
||||
changed = append(changed, "default_subscriptions")
|
||||
}
|
||||
if before.EnableModelFallback != after.EnableModelFallback {
|
||||
changed = append(changed, "enable_model_fallback")
|
||||
}
|
||||
@@ -555,9 +719,65 @@ func diffSettings(before *service.SystemSettings, after *service.SystemSettings,
|
||||
if before.OpsMetricsIntervalSeconds != after.OpsMetricsIntervalSeconds {
|
||||
changed = append(changed, "ops_metrics_interval_seconds")
|
||||
}
|
||||
if before.MinClaudeCodeVersion != after.MinClaudeCodeVersion {
|
||||
changed = append(changed, "min_claude_code_version")
|
||||
}
|
||||
if before.AllowUngroupedKeyScheduling != after.AllowUngroupedKeyScheduling {
|
||||
changed = append(changed, "allow_ungrouped_key_scheduling")
|
||||
}
|
||||
if before.PurchaseSubscriptionEnabled != after.PurchaseSubscriptionEnabled {
|
||||
changed = append(changed, "purchase_subscription_enabled")
|
||||
}
|
||||
if before.PurchaseSubscriptionURL != after.PurchaseSubscriptionURL {
|
||||
changed = append(changed, "purchase_subscription_url")
|
||||
}
|
||||
if before.CustomMenuItems != after.CustomMenuItems {
|
||||
changed = append(changed, "custom_menu_items")
|
||||
}
|
||||
return changed
|
||||
}
|
||||
|
||||
func normalizeDefaultSubscriptions(input []dto.DefaultSubscriptionSetting) []dto.DefaultSubscriptionSetting {
|
||||
if len(input) == 0 {
|
||||
return nil
|
||||
}
|
||||
normalized := make([]dto.DefaultSubscriptionSetting, 0, len(input))
|
||||
for _, item := range input {
|
||||
if item.GroupID <= 0 || item.ValidityDays <= 0 {
|
||||
continue
|
||||
}
|
||||
if item.ValidityDays > service.MaxValidityDays {
|
||||
item.ValidityDays = service.MaxValidityDays
|
||||
}
|
||||
normalized = append(normalized, item)
|
||||
}
|
||||
return normalized
|
||||
}
|
||||
|
||||
func equalStringSlice(a, b []string) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func equalDefaultSubscriptions(a, b []service.DefaultSubscriptionSetting) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i := range a {
|
||||
if a[i].GroupID != b[i].GroupID || a[i].ValidityDays != b[i].ValidityDays {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// TestSMTPRequest 测试SMTP连接请求
|
||||
type TestSMTPRequest struct {
|
||||
SMTPHost string `json:"smtp_host" binding:"required"`
|
||||
@@ -750,6 +970,384 @@ func (h *SettingHandler) GetStreamTimeoutSettings(c *gin.Context) {
|
||||
})
|
||||
}
|
||||
|
||||
func toSoraS3SettingsDTO(settings *service.SoraS3Settings) dto.SoraS3Settings {
|
||||
if settings == nil {
|
||||
return dto.SoraS3Settings{}
|
||||
}
|
||||
return dto.SoraS3Settings{
|
||||
Enabled: settings.Enabled,
|
||||
Endpoint: settings.Endpoint,
|
||||
Region: settings.Region,
|
||||
Bucket: settings.Bucket,
|
||||
AccessKeyID: settings.AccessKeyID,
|
||||
SecretAccessKeyConfigured: settings.SecretAccessKeyConfigured,
|
||||
Prefix: settings.Prefix,
|
||||
ForcePathStyle: settings.ForcePathStyle,
|
||||
CDNURL: settings.CDNURL,
|
||||
DefaultStorageQuotaBytes: settings.DefaultStorageQuotaBytes,
|
||||
}
|
||||
}
|
||||
|
||||
func toSoraS3ProfileDTO(profile service.SoraS3Profile) dto.SoraS3Profile {
|
||||
return dto.SoraS3Profile{
|
||||
ProfileID: profile.ProfileID,
|
||||
Name: profile.Name,
|
||||
IsActive: profile.IsActive,
|
||||
Enabled: profile.Enabled,
|
||||
Endpoint: profile.Endpoint,
|
||||
Region: profile.Region,
|
||||
Bucket: profile.Bucket,
|
||||
AccessKeyID: profile.AccessKeyID,
|
||||
SecretAccessKeyConfigured: profile.SecretAccessKeyConfigured,
|
||||
Prefix: profile.Prefix,
|
||||
ForcePathStyle: profile.ForcePathStyle,
|
||||
CDNURL: profile.CDNURL,
|
||||
DefaultStorageQuotaBytes: profile.DefaultStorageQuotaBytes,
|
||||
UpdatedAt: profile.UpdatedAt,
|
||||
}
|
||||
}
|
||||
|
||||
func validateSoraS3RequiredWhenEnabled(enabled bool, endpoint, bucket, accessKeyID, secretAccessKey string, hasStoredSecret bool) error {
|
||||
if !enabled {
|
||||
return nil
|
||||
}
|
||||
if strings.TrimSpace(endpoint) == "" {
|
||||
return fmt.Errorf("S3 Endpoint is required when enabled")
|
||||
}
|
||||
if strings.TrimSpace(bucket) == "" {
|
||||
return fmt.Errorf("S3 Bucket is required when enabled")
|
||||
}
|
||||
if strings.TrimSpace(accessKeyID) == "" {
|
||||
return fmt.Errorf("S3 Access Key ID is required when enabled")
|
||||
}
|
||||
if strings.TrimSpace(secretAccessKey) != "" || hasStoredSecret {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("S3 Secret Access Key is required when enabled")
|
||||
}
|
||||
|
||||
func findSoraS3ProfileByID(items []service.SoraS3Profile, profileID string) *service.SoraS3Profile {
|
||||
for idx := range items {
|
||||
if items[idx].ProfileID == profileID {
|
||||
return &items[idx]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetSoraS3Settings 获取 Sora S3 存储配置(兼容旧单配置接口)
|
||||
// GET /api/v1/admin/settings/sora-s3
|
||||
func (h *SettingHandler) GetSoraS3Settings(c *gin.Context) {
|
||||
settings, err := h.settingService.GetSoraS3Settings(c.Request.Context())
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, toSoraS3SettingsDTO(settings))
|
||||
}
|
||||
|
||||
// ListSoraS3Profiles 获取 Sora S3 多配置
|
||||
// GET /api/v1/admin/settings/sora-s3/profiles
|
||||
func (h *SettingHandler) ListSoraS3Profiles(c *gin.Context) {
|
||||
result, err := h.settingService.ListSoraS3Profiles(c.Request.Context())
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
items := make([]dto.SoraS3Profile, 0, len(result.Items))
|
||||
for idx := range result.Items {
|
||||
items = append(items, toSoraS3ProfileDTO(result.Items[idx]))
|
||||
}
|
||||
response.Success(c, dto.ListSoraS3ProfilesResponse{
|
||||
ActiveProfileID: result.ActiveProfileID,
|
||||
Items: items,
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateSoraS3SettingsRequest 更新/测试 Sora S3 配置请求(兼容旧接口)
|
||||
type UpdateSoraS3SettingsRequest struct {
|
||||
ProfileID string `json:"profile_id"`
|
||||
Enabled bool `json:"enabled"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
Region string `json:"region"`
|
||||
Bucket string `json:"bucket"`
|
||||
AccessKeyID string `json:"access_key_id"`
|
||||
SecretAccessKey string `json:"secret_access_key"`
|
||||
Prefix string `json:"prefix"`
|
||||
ForcePathStyle bool `json:"force_path_style"`
|
||||
CDNURL string `json:"cdn_url"`
|
||||
DefaultStorageQuotaBytes int64 `json:"default_storage_quota_bytes"`
|
||||
}
|
||||
|
||||
type CreateSoraS3ProfileRequest struct {
|
||||
ProfileID string `json:"profile_id"`
|
||||
Name string `json:"name"`
|
||||
SetActive bool `json:"set_active"`
|
||||
Enabled bool `json:"enabled"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
Region string `json:"region"`
|
||||
Bucket string `json:"bucket"`
|
||||
AccessKeyID string `json:"access_key_id"`
|
||||
SecretAccessKey string `json:"secret_access_key"`
|
||||
Prefix string `json:"prefix"`
|
||||
ForcePathStyle bool `json:"force_path_style"`
|
||||
CDNURL string `json:"cdn_url"`
|
||||
DefaultStorageQuotaBytes int64 `json:"default_storage_quota_bytes"`
|
||||
}
|
||||
|
||||
type UpdateSoraS3ProfileRequest struct {
|
||||
Name string `json:"name"`
|
||||
Enabled bool `json:"enabled"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
Region string `json:"region"`
|
||||
Bucket string `json:"bucket"`
|
||||
AccessKeyID string `json:"access_key_id"`
|
||||
SecretAccessKey string `json:"secret_access_key"`
|
||||
Prefix string `json:"prefix"`
|
||||
ForcePathStyle bool `json:"force_path_style"`
|
||||
CDNURL string `json:"cdn_url"`
|
||||
DefaultStorageQuotaBytes int64 `json:"default_storage_quota_bytes"`
|
||||
}
|
||||
|
||||
// CreateSoraS3Profile 创建 Sora S3 配置
|
||||
// POST /api/v1/admin/settings/sora-s3/profiles
|
||||
func (h *SettingHandler) CreateSoraS3Profile(c *gin.Context) {
|
||||
var req CreateSoraS3ProfileRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
response.BadRequest(c, "Invalid request: "+err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if req.DefaultStorageQuotaBytes < 0 {
|
||||
req.DefaultStorageQuotaBytes = 0
|
||||
}
|
||||
if strings.TrimSpace(req.Name) == "" {
|
||||
response.BadRequest(c, "Name is required")
|
||||
return
|
||||
}
|
||||
if strings.TrimSpace(req.ProfileID) == "" {
|
||||
response.BadRequest(c, "Profile ID is required")
|
||||
return
|
||||
}
|
||||
if err := validateSoraS3RequiredWhenEnabled(req.Enabled, req.Endpoint, req.Bucket, req.AccessKeyID, req.SecretAccessKey, false); err != nil {
|
||||
response.BadRequest(c, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
created, err := h.settingService.CreateSoraS3Profile(c.Request.Context(), &service.SoraS3Profile{
|
||||
ProfileID: req.ProfileID,
|
||||
Name: req.Name,
|
||||
Enabled: req.Enabled,
|
||||
Endpoint: req.Endpoint,
|
||||
Region: req.Region,
|
||||
Bucket: req.Bucket,
|
||||
AccessKeyID: req.AccessKeyID,
|
||||
SecretAccessKey: req.SecretAccessKey,
|
||||
Prefix: req.Prefix,
|
||||
ForcePathStyle: req.ForcePathStyle,
|
||||
CDNURL: req.CDNURL,
|
||||
DefaultStorageQuotaBytes: req.DefaultStorageQuotaBytes,
|
||||
}, req.SetActive)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
|
||||
response.Success(c, toSoraS3ProfileDTO(*created))
|
||||
}
|
||||
|
||||
// UpdateSoraS3Profile 更新 Sora S3 配置
|
||||
// PUT /api/v1/admin/settings/sora-s3/profiles/:profile_id
|
||||
func (h *SettingHandler) UpdateSoraS3Profile(c *gin.Context) {
|
||||
profileID := strings.TrimSpace(c.Param("profile_id"))
|
||||
if profileID == "" {
|
||||
response.BadRequest(c, "Profile ID is required")
|
||||
return
|
||||
}
|
||||
|
||||
var req UpdateSoraS3ProfileRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
response.BadRequest(c, "Invalid request: "+err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if req.DefaultStorageQuotaBytes < 0 {
|
||||
req.DefaultStorageQuotaBytes = 0
|
||||
}
|
||||
if strings.TrimSpace(req.Name) == "" {
|
||||
response.BadRequest(c, "Name is required")
|
||||
return
|
||||
}
|
||||
|
||||
existingList, err := h.settingService.ListSoraS3Profiles(c.Request.Context())
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
existing := findSoraS3ProfileByID(existingList.Items, profileID)
|
||||
if existing == nil {
|
||||
response.ErrorFrom(c, service.ErrSoraS3ProfileNotFound)
|
||||
return
|
||||
}
|
||||
if err := validateSoraS3RequiredWhenEnabled(req.Enabled, req.Endpoint, req.Bucket, req.AccessKeyID, req.SecretAccessKey, existing.SecretAccessKeyConfigured); err != nil {
|
||||
response.BadRequest(c, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
updated, updateErr := h.settingService.UpdateSoraS3Profile(c.Request.Context(), profileID, &service.SoraS3Profile{
|
||||
Name: req.Name,
|
||||
Enabled: req.Enabled,
|
||||
Endpoint: req.Endpoint,
|
||||
Region: req.Region,
|
||||
Bucket: req.Bucket,
|
||||
AccessKeyID: req.AccessKeyID,
|
||||
SecretAccessKey: req.SecretAccessKey,
|
||||
Prefix: req.Prefix,
|
||||
ForcePathStyle: req.ForcePathStyle,
|
||||
CDNURL: req.CDNURL,
|
||||
DefaultStorageQuotaBytes: req.DefaultStorageQuotaBytes,
|
||||
})
|
||||
if updateErr != nil {
|
||||
response.ErrorFrom(c, updateErr)
|
||||
return
|
||||
}
|
||||
|
||||
response.Success(c, toSoraS3ProfileDTO(*updated))
|
||||
}
|
||||
|
||||
// DeleteSoraS3Profile 删除 Sora S3 配置
|
||||
// DELETE /api/v1/admin/settings/sora-s3/profiles/:profile_id
|
||||
func (h *SettingHandler) DeleteSoraS3Profile(c *gin.Context) {
|
||||
profileID := strings.TrimSpace(c.Param("profile_id"))
|
||||
if profileID == "" {
|
||||
response.BadRequest(c, "Profile ID is required")
|
||||
return
|
||||
}
|
||||
if err := h.settingService.DeleteSoraS3Profile(c.Request.Context(), profileID); err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, gin.H{"deleted": true})
|
||||
}
|
||||
|
||||
// SetActiveSoraS3Profile 切换激活 Sora S3 配置
|
||||
// POST /api/v1/admin/settings/sora-s3/profiles/:profile_id/activate
|
||||
func (h *SettingHandler) SetActiveSoraS3Profile(c *gin.Context) {
|
||||
profileID := strings.TrimSpace(c.Param("profile_id"))
|
||||
if profileID == "" {
|
||||
response.BadRequest(c, "Profile ID is required")
|
||||
return
|
||||
}
|
||||
active, err := h.settingService.SetActiveSoraS3Profile(c.Request.Context(), profileID)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, toSoraS3ProfileDTO(*active))
|
||||
}
|
||||
|
||||
// UpdateSoraS3Settings 更新 Sora S3 存储配置(兼容旧单配置接口)
|
||||
// PUT /api/v1/admin/settings/sora-s3
|
||||
func (h *SettingHandler) UpdateSoraS3Settings(c *gin.Context) {
|
||||
var req UpdateSoraS3SettingsRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
response.BadRequest(c, "Invalid request: "+err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
existing, err := h.settingService.GetSoraS3Settings(c.Request.Context())
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
|
||||
if req.DefaultStorageQuotaBytes < 0 {
|
||||
req.DefaultStorageQuotaBytes = 0
|
||||
}
|
||||
if err := validateSoraS3RequiredWhenEnabled(req.Enabled, req.Endpoint, req.Bucket, req.AccessKeyID, req.SecretAccessKey, existing.SecretAccessKeyConfigured); err != nil {
|
||||
response.BadRequest(c, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
settings := &service.SoraS3Settings{
|
||||
Enabled: req.Enabled,
|
||||
Endpoint: req.Endpoint,
|
||||
Region: req.Region,
|
||||
Bucket: req.Bucket,
|
||||
AccessKeyID: req.AccessKeyID,
|
||||
SecretAccessKey: req.SecretAccessKey,
|
||||
Prefix: req.Prefix,
|
||||
ForcePathStyle: req.ForcePathStyle,
|
||||
CDNURL: req.CDNURL,
|
||||
DefaultStorageQuotaBytes: req.DefaultStorageQuotaBytes,
|
||||
}
|
||||
if err := h.settingService.SetSoraS3Settings(c.Request.Context(), settings); err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
|
||||
updatedSettings, err := h.settingService.GetSoraS3Settings(c.Request.Context())
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, toSoraS3SettingsDTO(updatedSettings))
|
||||
}
|
||||
|
||||
// TestSoraS3Connection 测试 Sora S3 连接(HeadBucket)
|
||||
// POST /api/v1/admin/settings/sora-s3/test
|
||||
func (h *SettingHandler) TestSoraS3Connection(c *gin.Context) {
|
||||
if h.soraS3Storage == nil {
|
||||
response.Error(c, 500, "S3 存储服务未初始化")
|
||||
return
|
||||
}
|
||||
|
||||
var req UpdateSoraS3SettingsRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
response.BadRequest(c, "Invalid request: "+err.Error())
|
||||
return
|
||||
}
|
||||
if !req.Enabled {
|
||||
response.BadRequest(c, "S3 未启用,无法测试连接")
|
||||
return
|
||||
}
|
||||
|
||||
if req.SecretAccessKey == "" {
|
||||
if req.ProfileID != "" {
|
||||
profiles, err := h.settingService.ListSoraS3Profiles(c.Request.Context())
|
||||
if err == nil {
|
||||
profile := findSoraS3ProfileByID(profiles.Items, req.ProfileID)
|
||||
if profile != nil {
|
||||
req.SecretAccessKey = profile.SecretAccessKey
|
||||
}
|
||||
}
|
||||
}
|
||||
if req.SecretAccessKey == "" {
|
||||
existing, err := h.settingService.GetSoraS3Settings(c.Request.Context())
|
||||
if err == nil {
|
||||
req.SecretAccessKey = existing.SecretAccessKey
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
testCfg := &service.SoraS3Settings{
|
||||
Enabled: true,
|
||||
Endpoint: req.Endpoint,
|
||||
Region: req.Region,
|
||||
Bucket: req.Bucket,
|
||||
AccessKeyID: req.AccessKeyID,
|
||||
SecretAccessKey: req.SecretAccessKey,
|
||||
Prefix: req.Prefix,
|
||||
ForcePathStyle: req.ForcePathStyle,
|
||||
CDNURL: req.CDNURL,
|
||||
}
|
||||
if err := h.soraS3Storage.TestConnectionWithSettings(c.Request.Context(), testCfg); err != nil {
|
||||
response.Error(c, 400, "S3 连接测试失败: "+err.Error())
|
||||
return
|
||||
}
|
||||
response.Success(c, gin.H{"message": "S3 连接成功"})
|
||||
}
|
||||
|
||||
// UpdateStreamTimeoutSettingsRequest 更新流超时配置请求
|
||||
type UpdateStreamTimeoutSettingsRequest struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
|
||||
95
backend/internal/handler/admin/snapshot_cache.go
Normal file
95
backend/internal/handler/admin/snapshot_cache.go
Normal file
@@ -0,0 +1,95 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type snapshotCacheEntry struct {
|
||||
ETag string
|
||||
Payload any
|
||||
ExpiresAt time.Time
|
||||
}
|
||||
|
||||
type snapshotCache struct {
|
||||
mu sync.RWMutex
|
||||
ttl time.Duration
|
||||
items map[string]snapshotCacheEntry
|
||||
}
|
||||
|
||||
func newSnapshotCache(ttl time.Duration) *snapshotCache {
|
||||
if ttl <= 0 {
|
||||
ttl = 30 * time.Second
|
||||
}
|
||||
return &snapshotCache{
|
||||
ttl: ttl,
|
||||
items: make(map[string]snapshotCacheEntry),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *snapshotCache) Get(key string) (snapshotCacheEntry, bool) {
|
||||
if c == nil || key == "" {
|
||||
return snapshotCacheEntry{}, false
|
||||
}
|
||||
now := time.Now()
|
||||
|
||||
c.mu.RLock()
|
||||
entry, ok := c.items[key]
|
||||
c.mu.RUnlock()
|
||||
if !ok {
|
||||
return snapshotCacheEntry{}, false
|
||||
}
|
||||
if now.After(entry.ExpiresAt) {
|
||||
c.mu.Lock()
|
||||
delete(c.items, key)
|
||||
c.mu.Unlock()
|
||||
return snapshotCacheEntry{}, false
|
||||
}
|
||||
return entry, true
|
||||
}
|
||||
|
||||
func (c *snapshotCache) Set(key string, payload any) snapshotCacheEntry {
|
||||
if c == nil {
|
||||
return snapshotCacheEntry{}
|
||||
}
|
||||
entry := snapshotCacheEntry{
|
||||
ETag: buildETagFromAny(payload),
|
||||
Payload: payload,
|
||||
ExpiresAt: time.Now().Add(c.ttl),
|
||||
}
|
||||
if key == "" {
|
||||
return entry
|
||||
}
|
||||
c.mu.Lock()
|
||||
c.items[key] = entry
|
||||
c.mu.Unlock()
|
||||
return entry
|
||||
}
|
||||
|
||||
func buildETagFromAny(payload any) string {
|
||||
raw, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
sum := sha256.Sum256(raw)
|
||||
return "\"" + hex.EncodeToString(sum[:]) + "\""
|
||||
}
|
||||
|
||||
func parseBoolQueryWithDefault(raw string, def bool) bool {
|
||||
value := strings.TrimSpace(strings.ToLower(raw))
|
||||
if value == "" {
|
||||
return def
|
||||
}
|
||||
switch value {
|
||||
case "1", "true", "yes", "on":
|
||||
return true
|
||||
case "0", "false", "no", "off":
|
||||
return false
|
||||
default:
|
||||
return def
|
||||
}
|
||||
}
|
||||
128
backend/internal/handler/admin/snapshot_cache_test.go
Normal file
128
backend/internal/handler/admin/snapshot_cache_test.go
Normal file
@@ -0,0 +1,128 @@
|
||||
//go:build unit
|
||||
|
||||
package admin
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSnapshotCache_SetAndGet(t *testing.T) {
|
||||
c := newSnapshotCache(5 * time.Second)
|
||||
|
||||
entry := c.Set("key1", map[string]string{"hello": "world"})
|
||||
require.NotEmpty(t, entry.ETag)
|
||||
require.NotNil(t, entry.Payload)
|
||||
|
||||
got, ok := c.Get("key1")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, entry.ETag, got.ETag)
|
||||
}
|
||||
|
||||
func TestSnapshotCache_Expiration(t *testing.T) {
|
||||
c := newSnapshotCache(1 * time.Millisecond)
|
||||
|
||||
c.Set("key1", "value")
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
|
||||
_, ok := c.Get("key1")
|
||||
require.False(t, ok, "expired entry should not be returned")
|
||||
}
|
||||
|
||||
func TestSnapshotCache_GetEmptyKey(t *testing.T) {
|
||||
c := newSnapshotCache(5 * time.Second)
|
||||
_, ok := c.Get("")
|
||||
require.False(t, ok)
|
||||
}
|
||||
|
||||
func TestSnapshotCache_GetMiss(t *testing.T) {
|
||||
c := newSnapshotCache(5 * time.Second)
|
||||
_, ok := c.Get("nonexistent")
|
||||
require.False(t, ok)
|
||||
}
|
||||
|
||||
func TestSnapshotCache_NilReceiver(t *testing.T) {
|
||||
var c *snapshotCache
|
||||
_, ok := c.Get("key")
|
||||
require.False(t, ok)
|
||||
|
||||
entry := c.Set("key", "value")
|
||||
require.Empty(t, entry.ETag)
|
||||
}
|
||||
|
||||
func TestSnapshotCache_SetEmptyKey(t *testing.T) {
|
||||
c := newSnapshotCache(5 * time.Second)
|
||||
|
||||
// Set with empty key should return entry but not store it
|
||||
entry := c.Set("", "value")
|
||||
require.NotEmpty(t, entry.ETag)
|
||||
|
||||
_, ok := c.Get("")
|
||||
require.False(t, ok)
|
||||
}
|
||||
|
||||
func TestSnapshotCache_DefaultTTL(t *testing.T) {
|
||||
c := newSnapshotCache(0)
|
||||
require.Equal(t, 30*time.Second, c.ttl)
|
||||
|
||||
c2 := newSnapshotCache(-1 * time.Second)
|
||||
require.Equal(t, 30*time.Second, c2.ttl)
|
||||
}
|
||||
|
||||
func TestSnapshotCache_ETagDeterministic(t *testing.T) {
|
||||
c := newSnapshotCache(5 * time.Second)
|
||||
payload := map[string]int{"a": 1, "b": 2}
|
||||
|
||||
entry1 := c.Set("k1", payload)
|
||||
entry2 := c.Set("k2", payload)
|
||||
require.Equal(t, entry1.ETag, entry2.ETag, "same payload should produce same ETag")
|
||||
}
|
||||
|
||||
func TestSnapshotCache_ETagFormat(t *testing.T) {
|
||||
c := newSnapshotCache(5 * time.Second)
|
||||
entry := c.Set("k", "test")
|
||||
// ETag should be quoted hex string: "abcdef..."
|
||||
require.True(t, len(entry.ETag) > 2)
|
||||
require.Equal(t, byte('"'), entry.ETag[0])
|
||||
require.Equal(t, byte('"'), entry.ETag[len(entry.ETag)-1])
|
||||
}
|
||||
|
||||
func TestBuildETagFromAny_UnmarshalablePayload(t *testing.T) {
|
||||
// channels are not JSON-serializable
|
||||
etag := buildETagFromAny(make(chan int))
|
||||
require.Empty(t, etag)
|
||||
}
|
||||
|
||||
func TestParseBoolQueryWithDefault(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
raw string
|
||||
def bool
|
||||
want bool
|
||||
}{
|
||||
{"empty returns default true", "", true, true},
|
||||
{"empty returns default false", "", false, false},
|
||||
{"1", "1", false, true},
|
||||
{"true", "true", false, true},
|
||||
{"TRUE", "TRUE", false, true},
|
||||
{"yes", "yes", false, true},
|
||||
{"on", "on", false, true},
|
||||
{"0", "0", true, false},
|
||||
{"false", "false", true, false},
|
||||
{"FALSE", "FALSE", true, false},
|
||||
{"no", "no", true, false},
|
||||
{"off", "off", true, false},
|
||||
{"whitespace trimmed", " true ", false, true},
|
||||
{"unknown returns default true", "maybe", true, true},
|
||||
{"unknown returns default false", "maybe", false, false},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := parseBoolQueryWithDefault(tc.raw, tc.def)
|
||||
require.Equal(t, tc.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -225,6 +225,92 @@ func TestUsageHandlerCreateCleanupTaskInvalidEndDate(t *testing.T) {
|
||||
require.Equal(t, http.StatusBadRequest, recorder.Code)
|
||||
}
|
||||
|
||||
func TestUsageHandlerCreateCleanupTaskInvalidRequestType(t *testing.T) {
|
||||
repo := &cleanupRepoStub{}
|
||||
cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, MaxRangeDays: 31}}
|
||||
cleanupService := service.NewUsageCleanupService(repo, nil, nil, cfg)
|
||||
router := setupCleanupRouter(cleanupService, 88)
|
||||
|
||||
payload := map[string]any{
|
||||
"start_date": "2024-01-01",
|
||||
"end_date": "2024-01-02",
|
||||
"timezone": "UTC",
|
||||
"request_type": "invalid",
|
||||
}
|
||||
body, err := json.Marshal(payload)
|
||||
require.NoError(t, err)
|
||||
|
||||
req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/usage/cleanup-tasks", bytes.NewReader(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
recorder := httptest.NewRecorder()
|
||||
router.ServeHTTP(recorder, req)
|
||||
|
||||
require.Equal(t, http.StatusBadRequest, recorder.Code)
|
||||
}
|
||||
|
||||
func TestUsageHandlerCreateCleanupTaskRequestTypePriority(t *testing.T) {
|
||||
repo := &cleanupRepoStub{}
|
||||
cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, MaxRangeDays: 31}}
|
||||
cleanupService := service.NewUsageCleanupService(repo, nil, nil, cfg)
|
||||
router := setupCleanupRouter(cleanupService, 99)
|
||||
|
||||
payload := map[string]any{
|
||||
"start_date": "2024-01-01",
|
||||
"end_date": "2024-01-02",
|
||||
"timezone": "UTC",
|
||||
"request_type": "ws_v2",
|
||||
"stream": false,
|
||||
}
|
||||
body, err := json.Marshal(payload)
|
||||
require.NoError(t, err)
|
||||
|
||||
req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/usage/cleanup-tasks", bytes.NewReader(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
recorder := httptest.NewRecorder()
|
||||
router.ServeHTTP(recorder, req)
|
||||
|
||||
require.Equal(t, http.StatusOK, recorder.Code)
|
||||
|
||||
repo.mu.Lock()
|
||||
defer repo.mu.Unlock()
|
||||
require.Len(t, repo.created, 1)
|
||||
created := repo.created[0]
|
||||
require.NotNil(t, created.Filters.RequestType)
|
||||
require.Equal(t, int16(service.RequestTypeWSV2), *created.Filters.RequestType)
|
||||
require.Nil(t, created.Filters.Stream)
|
||||
}
|
||||
|
||||
func TestUsageHandlerCreateCleanupTaskWithLegacyStream(t *testing.T) {
|
||||
repo := &cleanupRepoStub{}
|
||||
cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, MaxRangeDays: 31}}
|
||||
cleanupService := service.NewUsageCleanupService(repo, nil, nil, cfg)
|
||||
router := setupCleanupRouter(cleanupService, 99)
|
||||
|
||||
payload := map[string]any{
|
||||
"start_date": "2024-01-01",
|
||||
"end_date": "2024-01-02",
|
||||
"timezone": "UTC",
|
||||
"stream": true,
|
||||
}
|
||||
body, err := json.Marshal(payload)
|
||||
require.NoError(t, err)
|
||||
|
||||
req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/usage/cleanup-tasks", bytes.NewReader(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
recorder := httptest.NewRecorder()
|
||||
router.ServeHTTP(recorder, req)
|
||||
|
||||
require.Equal(t, http.StatusOK, recorder.Code)
|
||||
|
||||
repo.mu.Lock()
|
||||
defer repo.mu.Unlock()
|
||||
require.Len(t, repo.created, 1)
|
||||
created := repo.created[0]
|
||||
require.Nil(t, created.Filters.RequestType)
|
||||
require.NotNil(t, created.Filters.Stream)
|
||||
require.True(t, *created.Filters.Stream)
|
||||
}
|
||||
|
||||
func TestUsageHandlerCreateCleanupTaskSuccess(t *testing.T) {
|
||||
repo := &cleanupRepoStub{}
|
||||
cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, MaxRangeDays: 31}}
|
||||
|
||||
@@ -51,6 +51,7 @@ type CreateUsageCleanupTaskRequest struct {
|
||||
AccountID *int64 `json:"account_id"`
|
||||
GroupID *int64 `json:"group_id"`
|
||||
Model *string `json:"model"`
|
||||
RequestType *string `json:"request_type"`
|
||||
Stream *bool `json:"stream"`
|
||||
BillingType *int8 `json:"billing_type"`
|
||||
Timezone string `json:"timezone"`
|
||||
@@ -60,6 +61,15 @@ type CreateUsageCleanupTaskRequest struct {
|
||||
// GET /api/v1/admin/usage
|
||||
func (h *UsageHandler) List(c *gin.Context) {
|
||||
page, pageSize := response.ParsePagination(c)
|
||||
exactTotal := false
|
||||
if exactTotalRaw := strings.TrimSpace(c.Query("exact_total")); exactTotalRaw != "" {
|
||||
parsed, err := strconv.ParseBool(exactTotalRaw)
|
||||
if err != nil {
|
||||
response.BadRequest(c, "Invalid exact_total value, use true or false")
|
||||
return
|
||||
}
|
||||
exactTotal = parsed
|
||||
}
|
||||
|
||||
// Parse filters
|
||||
var userID, apiKeyID, accountID, groupID int64
|
||||
@@ -101,8 +111,17 @@ func (h *UsageHandler) List(c *gin.Context) {
|
||||
|
||||
model := c.Query("model")
|
||||
|
||||
var requestType *int16
|
||||
var stream *bool
|
||||
if streamStr := c.Query("stream"); streamStr != "" {
|
||||
if requestTypeStr := strings.TrimSpace(c.Query("request_type")); requestTypeStr != "" {
|
||||
parsed, err := service.ParseUsageRequestType(requestTypeStr)
|
||||
if err != nil {
|
||||
response.BadRequest(c, err.Error())
|
||||
return
|
||||
}
|
||||
value := int16(parsed)
|
||||
requestType = &value
|
||||
} else if streamStr := c.Query("stream"); streamStr != "" {
|
||||
val, err := strconv.ParseBool(streamStr)
|
||||
if err != nil {
|
||||
response.BadRequest(c, "Invalid stream value, use true or false")
|
||||
@@ -152,10 +171,12 @@ func (h *UsageHandler) List(c *gin.Context) {
|
||||
AccountID: accountID,
|
||||
GroupID: groupID,
|
||||
Model: model,
|
||||
RequestType: requestType,
|
||||
Stream: stream,
|
||||
BillingType: billingType,
|
||||
StartTime: startTime,
|
||||
EndTime: endTime,
|
||||
ExactTotal: exactTotal,
|
||||
}
|
||||
|
||||
records, result, err := h.usageService.ListWithFilters(c.Request.Context(), params, filters)
|
||||
@@ -214,8 +235,17 @@ func (h *UsageHandler) Stats(c *gin.Context) {
|
||||
|
||||
model := c.Query("model")
|
||||
|
||||
var requestType *int16
|
||||
var stream *bool
|
||||
if streamStr := c.Query("stream"); streamStr != "" {
|
||||
if requestTypeStr := strings.TrimSpace(c.Query("request_type")); requestTypeStr != "" {
|
||||
parsed, err := service.ParseUsageRequestType(requestTypeStr)
|
||||
if err != nil {
|
||||
response.BadRequest(c, err.Error())
|
||||
return
|
||||
}
|
||||
value := int16(parsed)
|
||||
requestType = &value
|
||||
} else if streamStr := c.Query("stream"); streamStr != "" {
|
||||
val, err := strconv.ParseBool(streamStr)
|
||||
if err != nil {
|
||||
response.BadRequest(c, "Invalid stream value, use true or false")
|
||||
@@ -278,6 +308,7 @@ func (h *UsageHandler) Stats(c *gin.Context) {
|
||||
AccountID: accountID,
|
||||
GroupID: groupID,
|
||||
Model: model,
|
||||
RequestType: requestType,
|
||||
Stream: stream,
|
||||
BillingType: billingType,
|
||||
StartTime: &startTime,
|
||||
@@ -432,6 +463,19 @@ func (h *UsageHandler) CreateCleanupTask(c *gin.Context) {
|
||||
}
|
||||
endTime = endTime.Add(24*time.Hour - time.Nanosecond)
|
||||
|
||||
var requestType *int16
|
||||
stream := req.Stream
|
||||
if req.RequestType != nil {
|
||||
parsed, err := service.ParseUsageRequestType(*req.RequestType)
|
||||
if err != nil {
|
||||
response.BadRequest(c, err.Error())
|
||||
return
|
||||
}
|
||||
value := int16(parsed)
|
||||
requestType = &value
|
||||
stream = nil
|
||||
}
|
||||
|
||||
filters := service.UsageCleanupFilters{
|
||||
StartTime: startTime,
|
||||
EndTime: endTime,
|
||||
@@ -440,7 +484,8 @@ func (h *UsageHandler) CreateCleanupTask(c *gin.Context) {
|
||||
AccountID: req.AccountID,
|
||||
GroupID: req.GroupID,
|
||||
Model: req.Model,
|
||||
Stream: req.Stream,
|
||||
RequestType: requestType,
|
||||
Stream: stream,
|
||||
BillingType: req.BillingType,
|
||||
}
|
||||
|
||||
@@ -464,9 +509,13 @@ func (h *UsageHandler) CreateCleanupTask(c *gin.Context) {
|
||||
if filters.Model != nil {
|
||||
model = *filters.Model
|
||||
}
|
||||
var stream any
|
||||
var streamValue any
|
||||
if filters.Stream != nil {
|
||||
stream = *filters.Stream
|
||||
streamValue = *filters.Stream
|
||||
}
|
||||
var requestTypeName any
|
||||
if filters.RequestType != nil {
|
||||
requestTypeName = service.RequestTypeFromInt16(*filters.RequestType).String()
|
||||
}
|
||||
var billingType any
|
||||
if filters.BillingType != nil {
|
||||
@@ -481,7 +530,7 @@ func (h *UsageHandler) CreateCleanupTask(c *gin.Context) {
|
||||
Body: req,
|
||||
}
|
||||
executeAdminIdempotentJSON(c, "admin.usage.cleanup_tasks.create", idempotencyPayload, service.DefaultWriteIdempotencyTTL(), func(ctx context.Context) (any, error) {
|
||||
logger.LegacyPrintf("handler.admin.usage", "[UsageCleanup] 请求创建清理任务: operator=%d start=%s end=%s user_id=%v api_key_id=%v account_id=%v group_id=%v model=%v stream=%v billing_type=%v tz=%q",
|
||||
logger.LegacyPrintf("handler.admin.usage", "[UsageCleanup] 请求创建清理任务: operator=%d start=%s end=%s user_id=%v api_key_id=%v account_id=%v group_id=%v model=%v request_type=%v stream=%v billing_type=%v tz=%q",
|
||||
subject.UserID,
|
||||
filters.StartTime.Format(time.RFC3339),
|
||||
filters.EndTime.Format(time.RFC3339),
|
||||
@@ -490,7 +539,8 @@ func (h *UsageHandler) CreateCleanupTask(c *gin.Context) {
|
||||
accountID,
|
||||
groupID,
|
||||
model,
|
||||
stream,
|
||||
requestTypeName,
|
||||
streamValue,
|
||||
billingType,
|
||||
req.Timezone,
|
||||
)
|
||||
|
||||
@@ -0,0 +1,140 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/pagination"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/usagestats"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type adminUsageRepoCapture struct {
|
||||
service.UsageLogRepository
|
||||
listFilters usagestats.UsageLogFilters
|
||||
statsFilters usagestats.UsageLogFilters
|
||||
}
|
||||
|
||||
func (s *adminUsageRepoCapture) ListWithFilters(ctx context.Context, params pagination.PaginationParams, filters usagestats.UsageLogFilters) ([]service.UsageLog, *pagination.PaginationResult, error) {
|
||||
s.listFilters = filters
|
||||
return []service.UsageLog{}, &pagination.PaginationResult{
|
||||
Total: 0,
|
||||
Page: params.Page,
|
||||
PageSize: params.PageSize,
|
||||
Pages: 0,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *adminUsageRepoCapture) GetStatsWithFilters(ctx context.Context, filters usagestats.UsageLogFilters) (*usagestats.UsageStats, error) {
|
||||
s.statsFilters = filters
|
||||
return &usagestats.UsageStats{}, nil
|
||||
}
|
||||
|
||||
func newAdminUsageRequestTypeTestRouter(repo *adminUsageRepoCapture) *gin.Engine {
|
||||
gin.SetMode(gin.TestMode)
|
||||
usageSvc := service.NewUsageService(repo, nil, nil, nil)
|
||||
handler := NewUsageHandler(usageSvc, nil, nil, nil)
|
||||
router := gin.New()
|
||||
router.GET("/admin/usage", handler.List)
|
||||
router.GET("/admin/usage/stats", handler.Stats)
|
||||
return router
|
||||
}
|
||||
|
||||
func TestAdminUsageListRequestTypePriority(t *testing.T) {
|
||||
repo := &adminUsageRepoCapture{}
|
||||
router := newAdminUsageRequestTypeTestRouter(repo)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/admin/usage?request_type=ws_v2&stream=false", nil)
|
||||
rec := httptest.NewRecorder()
|
||||
router.ServeHTTP(rec, req)
|
||||
|
||||
require.Equal(t, http.StatusOK, rec.Code)
|
||||
require.NotNil(t, repo.listFilters.RequestType)
|
||||
require.Equal(t, int16(service.RequestTypeWSV2), *repo.listFilters.RequestType)
|
||||
require.Nil(t, repo.listFilters.Stream)
|
||||
}
|
||||
|
||||
func TestAdminUsageListInvalidRequestType(t *testing.T) {
|
||||
repo := &adminUsageRepoCapture{}
|
||||
router := newAdminUsageRequestTypeTestRouter(repo)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/admin/usage?request_type=bad", nil)
|
||||
rec := httptest.NewRecorder()
|
||||
router.ServeHTTP(rec, req)
|
||||
|
||||
require.Equal(t, http.StatusBadRequest, rec.Code)
|
||||
}
|
||||
|
||||
func TestAdminUsageListInvalidStream(t *testing.T) {
|
||||
repo := &adminUsageRepoCapture{}
|
||||
router := newAdminUsageRequestTypeTestRouter(repo)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/admin/usage?stream=bad", nil)
|
||||
rec := httptest.NewRecorder()
|
||||
router.ServeHTTP(rec, req)
|
||||
|
||||
require.Equal(t, http.StatusBadRequest, rec.Code)
|
||||
}
|
||||
|
||||
func TestAdminUsageListExactTotalTrue(t *testing.T) {
|
||||
repo := &adminUsageRepoCapture{}
|
||||
router := newAdminUsageRequestTypeTestRouter(repo)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/admin/usage?exact_total=true", nil)
|
||||
rec := httptest.NewRecorder()
|
||||
router.ServeHTTP(rec, req)
|
||||
|
||||
require.Equal(t, http.StatusOK, rec.Code)
|
||||
require.True(t, repo.listFilters.ExactTotal)
|
||||
}
|
||||
|
||||
func TestAdminUsageListInvalidExactTotal(t *testing.T) {
|
||||
repo := &adminUsageRepoCapture{}
|
||||
router := newAdminUsageRequestTypeTestRouter(repo)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/admin/usage?exact_total=oops", nil)
|
||||
rec := httptest.NewRecorder()
|
||||
router.ServeHTTP(rec, req)
|
||||
|
||||
require.Equal(t, http.StatusBadRequest, rec.Code)
|
||||
}
|
||||
|
||||
func TestAdminUsageStatsRequestTypePriority(t *testing.T) {
|
||||
repo := &adminUsageRepoCapture{}
|
||||
router := newAdminUsageRequestTypeTestRouter(repo)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/admin/usage/stats?request_type=stream&stream=bad", nil)
|
||||
rec := httptest.NewRecorder()
|
||||
router.ServeHTTP(rec, req)
|
||||
|
||||
require.Equal(t, http.StatusOK, rec.Code)
|
||||
require.NotNil(t, repo.statsFilters.RequestType)
|
||||
require.Equal(t, int16(service.RequestTypeStream), *repo.statsFilters.RequestType)
|
||||
require.Nil(t, repo.statsFilters.Stream)
|
||||
}
|
||||
|
||||
func TestAdminUsageStatsInvalidRequestType(t *testing.T) {
|
||||
repo := &adminUsageRepoCapture{}
|
||||
router := newAdminUsageRequestTypeTestRouter(repo)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/admin/usage/stats?request_type=oops", nil)
|
||||
rec := httptest.NewRecorder()
|
||||
router.ServeHTTP(rec, req)
|
||||
|
||||
require.Equal(t, http.StatusBadRequest, rec.Code)
|
||||
}
|
||||
|
||||
func TestAdminUsageStatsInvalidStream(t *testing.T) {
|
||||
repo := &adminUsageRepoCapture{}
|
||||
router := newAdminUsageRequestTypeTestRouter(repo)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/admin/usage/stats?stream=oops", nil)
|
||||
rec := httptest.NewRecorder()
|
||||
router.ServeHTTP(rec, req)
|
||||
|
||||
require.Equal(t, http.StatusBadRequest, rec.Code)
|
||||
}
|
||||
@@ -1,7 +1,9 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/response"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
@@ -67,6 +69,8 @@ type BatchUserAttributesResponse struct {
|
||||
Attributes map[int64]map[int64]string `json:"attributes"`
|
||||
}
|
||||
|
||||
var userAttributesBatchCache = newSnapshotCache(30 * time.Second)
|
||||
|
||||
// AttributeDefinitionResponse represents attribute definition response
|
||||
type AttributeDefinitionResponse struct {
|
||||
ID int64 `json:"id"`
|
||||
@@ -327,16 +331,32 @@ func (h *UserAttributeHandler) GetBatchUserAttributes(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
if len(req.UserIDs) == 0 {
|
||||
userIDs := normalizeInt64IDList(req.UserIDs)
|
||||
if len(userIDs) == 0 {
|
||||
response.Success(c, BatchUserAttributesResponse{Attributes: map[int64]map[int64]string{}})
|
||||
return
|
||||
}
|
||||
|
||||
attrs, err := h.attrService.GetBatchUserAttributes(c.Request.Context(), req.UserIDs)
|
||||
keyRaw, _ := json.Marshal(struct {
|
||||
UserIDs []int64 `json:"user_ids"`
|
||||
}{
|
||||
UserIDs: userIDs,
|
||||
})
|
||||
cacheKey := string(keyRaw)
|
||||
if cached, ok := userAttributesBatchCache.Get(cacheKey); ok {
|
||||
c.Header("X-Snapshot-Cache", "hit")
|
||||
response.Success(c, cached.Payload)
|
||||
return
|
||||
}
|
||||
|
||||
attrs, err := h.attrService.GetBatchUserAttributes(c.Request.Context(), userIDs)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
|
||||
response.Success(c, BatchUserAttributesResponse{Attributes: attrs})
|
||||
payload := BatchUserAttributesResponse{Attributes: attrs}
|
||||
userAttributesBatchCache.Set(cacheKey, payload)
|
||||
c.Header("X-Snapshot-Cache", "miss")
|
||||
response.Success(c, payload)
|
||||
}
|
||||
|
||||
@@ -34,13 +34,14 @@ func NewUserHandler(adminService service.AdminService, concurrencyService *servi
|
||||
|
||||
// CreateUserRequest represents admin create user request
|
||||
type CreateUserRequest struct {
|
||||
Email string `json:"email" binding:"required,email"`
|
||||
Password string `json:"password" binding:"required,min=6"`
|
||||
Username string `json:"username"`
|
||||
Notes string `json:"notes"`
|
||||
Balance float64 `json:"balance"`
|
||||
Concurrency int `json:"concurrency"`
|
||||
AllowedGroups []int64 `json:"allowed_groups"`
|
||||
Email string `json:"email" binding:"required,email"`
|
||||
Password string `json:"password" binding:"required,min=6"`
|
||||
Username string `json:"username"`
|
||||
Notes string `json:"notes"`
|
||||
Balance float64 `json:"balance"`
|
||||
Concurrency int `json:"concurrency"`
|
||||
AllowedGroups []int64 `json:"allowed_groups"`
|
||||
SoraStorageQuotaBytes int64 `json:"sora_storage_quota_bytes"`
|
||||
}
|
||||
|
||||
// UpdateUserRequest represents admin update user request
|
||||
@@ -56,7 +57,8 @@ type UpdateUserRequest struct {
|
||||
AllowedGroups *[]int64 `json:"allowed_groups"`
|
||||
// GroupRates 用户专属分组倍率配置
|
||||
// map[groupID]*rate,nil 表示删除该分组的专属倍率
|
||||
GroupRates map[int64]*float64 `json:"group_rates"`
|
||||
GroupRates map[int64]*float64 `json:"group_rates"`
|
||||
SoraStorageQuotaBytes *int64 `json:"sora_storage_quota_bytes"`
|
||||
}
|
||||
|
||||
// UpdateBalanceRequest represents balance update request
|
||||
@@ -89,6 +91,10 @@ func (h *UserHandler) List(c *gin.Context) {
|
||||
Search: search,
|
||||
Attributes: parseAttributeFilters(c),
|
||||
}
|
||||
if raw, ok := c.GetQuery("include_subscriptions"); ok {
|
||||
includeSubscriptions := parseBoolQueryWithDefault(raw, true)
|
||||
filters.IncludeSubscriptions = &includeSubscriptions
|
||||
}
|
||||
|
||||
users, total, err := h.adminService.ListUsers(c.Request.Context(), page, pageSize, filters)
|
||||
if err != nil {
|
||||
@@ -174,13 +180,14 @@ func (h *UserHandler) Create(c *gin.Context) {
|
||||
}
|
||||
|
||||
user, err := h.adminService.CreateUser(c.Request.Context(), &service.CreateUserInput{
|
||||
Email: req.Email,
|
||||
Password: req.Password,
|
||||
Username: req.Username,
|
||||
Notes: req.Notes,
|
||||
Balance: req.Balance,
|
||||
Concurrency: req.Concurrency,
|
||||
AllowedGroups: req.AllowedGroups,
|
||||
Email: req.Email,
|
||||
Password: req.Password,
|
||||
Username: req.Username,
|
||||
Notes: req.Notes,
|
||||
Balance: req.Balance,
|
||||
Concurrency: req.Concurrency,
|
||||
AllowedGroups: req.AllowedGroups,
|
||||
SoraStorageQuotaBytes: req.SoraStorageQuotaBytes,
|
||||
})
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
@@ -207,15 +214,16 @@ func (h *UserHandler) Update(c *gin.Context) {
|
||||
|
||||
// 使用指针类型直接传递,nil 表示未提供该字段
|
||||
user, err := h.adminService.UpdateUser(c.Request.Context(), userID, &service.UpdateUserInput{
|
||||
Email: req.Email,
|
||||
Password: req.Password,
|
||||
Username: req.Username,
|
||||
Notes: req.Notes,
|
||||
Balance: req.Balance,
|
||||
Concurrency: req.Concurrency,
|
||||
Status: req.Status,
|
||||
AllowedGroups: req.AllowedGroups,
|
||||
GroupRates: req.GroupRates,
|
||||
Email: req.Email,
|
||||
Password: req.Password,
|
||||
Username: req.Username,
|
||||
Notes: req.Notes,
|
||||
Balance: req.Balance,
|
||||
Concurrency: req.Concurrency,
|
||||
Status: req.Status,
|
||||
AllowedGroups: req.AllowedGroups,
|
||||
GroupRates: req.GroupRates,
|
||||
SoraStorageQuotaBytes: req.SoraStorageQuotaBytes,
|
||||
})
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
|
||||
@@ -4,6 +4,7 @@ package handler
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Wei-Shaw/sub2api/internal/handler/dto"
|
||||
@@ -36,6 +37,11 @@ type CreateAPIKeyRequest struct {
|
||||
IPBlacklist []string `json:"ip_blacklist"` // IP 黑名单
|
||||
Quota *float64 `json:"quota"` // 配额限制 (USD)
|
||||
ExpiresInDays *int `json:"expires_in_days"` // 过期天数
|
||||
|
||||
// Rate limit fields (0 = unlimited)
|
||||
RateLimit5h *float64 `json:"rate_limit_5h"`
|
||||
RateLimit1d *float64 `json:"rate_limit_1d"`
|
||||
RateLimit7d *float64 `json:"rate_limit_7d"`
|
||||
}
|
||||
|
||||
// UpdateAPIKeyRequest represents the update API key request payload
|
||||
@@ -48,6 +54,12 @@ type UpdateAPIKeyRequest struct {
|
||||
Quota *float64 `json:"quota"` // 配额限制 (USD), 0=无限制
|
||||
ExpiresAt *string `json:"expires_at"` // 过期时间 (ISO 8601)
|
||||
ResetQuota *bool `json:"reset_quota"` // 重置已用配额
|
||||
|
||||
// Rate limit fields (nil = no change, 0 = unlimited)
|
||||
RateLimit5h *float64 `json:"rate_limit_5h"`
|
||||
RateLimit1d *float64 `json:"rate_limit_1d"`
|
||||
RateLimit7d *float64 `json:"rate_limit_7d"`
|
||||
ResetRateLimitUsage *bool `json:"reset_rate_limit_usage"` // 重置限速用量
|
||||
}
|
||||
|
||||
// List handles listing user's API keys with pagination
|
||||
@@ -62,7 +74,23 @@ func (h *APIKeyHandler) List(c *gin.Context) {
|
||||
page, pageSize := response.ParsePagination(c)
|
||||
params := pagination.PaginationParams{Page: page, PageSize: pageSize}
|
||||
|
||||
keys, result, err := h.apiKeyService.List(c.Request.Context(), subject.UserID, params)
|
||||
// Parse filter parameters
|
||||
var filters service.APIKeyListFilters
|
||||
if search := strings.TrimSpace(c.Query("search")); search != "" {
|
||||
if len(search) > 100 {
|
||||
search = search[:100]
|
||||
}
|
||||
filters.Search = search
|
||||
}
|
||||
filters.Status = c.Query("status")
|
||||
if groupIDStr := c.Query("group_id"); groupIDStr != "" {
|
||||
gid, err := strconv.ParseInt(groupIDStr, 10, 64)
|
||||
if err == nil {
|
||||
filters.GroupID = &gid
|
||||
}
|
||||
}
|
||||
|
||||
keys, result, err := h.apiKeyService.List(c.Request.Context(), subject.UserID, params, filters)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
@@ -131,6 +159,15 @@ func (h *APIKeyHandler) Create(c *gin.Context) {
|
||||
if req.Quota != nil {
|
||||
svcReq.Quota = *req.Quota
|
||||
}
|
||||
if req.RateLimit5h != nil {
|
||||
svcReq.RateLimit5h = *req.RateLimit5h
|
||||
}
|
||||
if req.RateLimit1d != nil {
|
||||
svcReq.RateLimit1d = *req.RateLimit1d
|
||||
}
|
||||
if req.RateLimit7d != nil {
|
||||
svcReq.RateLimit7d = *req.RateLimit7d
|
||||
}
|
||||
|
||||
executeUserIdempotentJSON(c, "user.api_keys.create", req, service.DefaultWriteIdempotencyTTL(), func(ctx context.Context) (any, error) {
|
||||
key, err := h.apiKeyService.Create(ctx, subject.UserID, svcReq)
|
||||
@@ -163,10 +200,14 @@ func (h *APIKeyHandler) Update(c *gin.Context) {
|
||||
}
|
||||
|
||||
svcReq := service.UpdateAPIKeyRequest{
|
||||
IPWhitelist: req.IPWhitelist,
|
||||
IPBlacklist: req.IPBlacklist,
|
||||
Quota: req.Quota,
|
||||
ResetQuota: req.ResetQuota,
|
||||
IPWhitelist: req.IPWhitelist,
|
||||
IPBlacklist: req.IPBlacklist,
|
||||
Quota: req.Quota,
|
||||
ResetQuota: req.ResetQuota,
|
||||
RateLimit5h: req.RateLimit5h,
|
||||
RateLimit1d: req.RateLimit1d,
|
||||
RateLimit7d: req.RateLimit7d,
|
||||
ResetRateLimitUsage: req.ResetRateLimitUsage,
|
||||
}
|
||||
if req.Name != "" {
|
||||
svcReq.Name = &req.Name
|
||||
|
||||
@@ -59,9 +59,11 @@ func UserFromServiceAdmin(u *service.User) *AdminUser {
|
||||
return nil
|
||||
}
|
||||
return &AdminUser{
|
||||
User: *base,
|
||||
Notes: u.Notes,
|
||||
GroupRates: u.GroupRates,
|
||||
User: *base,
|
||||
Notes: u.Notes,
|
||||
GroupRates: u.GroupRates,
|
||||
SoraStorageQuotaBytes: u.SoraStorageQuotaBytes,
|
||||
SoraStorageUsedBytes: u.SoraStorageUsedBytes,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -70,22 +72,31 @@ func APIKeyFromService(k *service.APIKey) *APIKey {
|
||||
return nil
|
||||
}
|
||||
return &APIKey{
|
||||
ID: k.ID,
|
||||
UserID: k.UserID,
|
||||
Key: k.Key,
|
||||
Name: k.Name,
|
||||
GroupID: k.GroupID,
|
||||
Status: k.Status,
|
||||
IPWhitelist: k.IPWhitelist,
|
||||
IPBlacklist: k.IPBlacklist,
|
||||
LastUsedAt: k.LastUsedAt,
|
||||
Quota: k.Quota,
|
||||
QuotaUsed: k.QuotaUsed,
|
||||
ExpiresAt: k.ExpiresAt,
|
||||
CreatedAt: k.CreatedAt,
|
||||
UpdatedAt: k.UpdatedAt,
|
||||
User: UserFromServiceShallow(k.User),
|
||||
Group: GroupFromServiceShallow(k.Group),
|
||||
ID: k.ID,
|
||||
UserID: k.UserID,
|
||||
Key: k.Key,
|
||||
Name: k.Name,
|
||||
GroupID: k.GroupID,
|
||||
Status: k.Status,
|
||||
IPWhitelist: k.IPWhitelist,
|
||||
IPBlacklist: k.IPBlacklist,
|
||||
LastUsedAt: k.LastUsedAt,
|
||||
Quota: k.Quota,
|
||||
QuotaUsed: k.QuotaUsed,
|
||||
ExpiresAt: k.ExpiresAt,
|
||||
CreatedAt: k.CreatedAt,
|
||||
UpdatedAt: k.UpdatedAt,
|
||||
RateLimit5h: k.RateLimit5h,
|
||||
RateLimit1d: k.RateLimit1d,
|
||||
RateLimit7d: k.RateLimit7d,
|
||||
Usage5h: k.Usage5h,
|
||||
Usage1d: k.Usage1d,
|
||||
Usage7d: k.Usage7d,
|
||||
Window5hStart: k.Window5hStart,
|
||||
Window1dStart: k.Window1dStart,
|
||||
Window7dStart: k.Window7dStart,
|
||||
User: UserFromServiceShallow(k.User),
|
||||
Group: GroupFromServiceShallow(k.Group),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -153,6 +164,7 @@ func groupFromServiceBase(g *service.Group) Group {
|
||||
ClaudeCodeOnly: g.ClaudeCodeOnly,
|
||||
FallbackGroupID: g.FallbackGroupID,
|
||||
FallbackGroupIDOnInvalidRequest: g.FallbackGroupIDOnInvalidRequest,
|
||||
SoraStorageQuotaBytes: g.SoraStorageQuotaBytes,
|
||||
CreatedAt: g.CreatedAt,
|
||||
UpdatedAt: g.UpdatedAt,
|
||||
}
|
||||
@@ -207,6 +219,17 @@ func AccountFromServiceShallow(a *service.Account) *Account {
|
||||
if idleTimeout := a.GetSessionIdleTimeoutMinutes(); idleTimeout > 0 {
|
||||
out.SessionIdleTimeoutMin = &idleTimeout
|
||||
}
|
||||
if rpm := a.GetBaseRPM(); rpm > 0 {
|
||||
out.BaseRPM = &rpm
|
||||
strategy := a.GetRPMStrategy()
|
||||
out.RPMStrategy = &strategy
|
||||
buffer := a.GetRPMStickyBuffer()
|
||||
out.RPMStickyBuffer = &buffer
|
||||
}
|
||||
// 用户消息队列模式
|
||||
if mode := a.GetUserMsgQueueMode(); mode != "" {
|
||||
out.UserMsgQueueMode = &mode
|
||||
}
|
||||
// TLS指纹伪装开关
|
||||
if a.IsTLSFingerprintEnabled() {
|
||||
enabled := true
|
||||
@@ -284,7 +307,6 @@ func ProxyFromService(p *service.Proxy) *Proxy {
|
||||
Host: p.Host,
|
||||
Port: p.Port,
|
||||
Username: p.Username,
|
||||
Password: p.Password,
|
||||
Status: p.Status,
|
||||
CreatedAt: p.CreatedAt,
|
||||
UpdatedAt: p.UpdatedAt,
|
||||
@@ -314,6 +336,51 @@ func ProxyWithAccountCountFromService(p *service.ProxyWithAccountCount) *ProxyWi
|
||||
}
|
||||
}
|
||||
|
||||
// ProxyFromServiceAdmin converts a service Proxy to AdminProxy DTO for admin users.
|
||||
// It includes the password field - user-facing endpoints must not use this.
|
||||
func ProxyFromServiceAdmin(p *service.Proxy) *AdminProxy {
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
base := ProxyFromService(p)
|
||||
if base == nil {
|
||||
return nil
|
||||
}
|
||||
return &AdminProxy{
|
||||
Proxy: *base,
|
||||
Password: p.Password,
|
||||
}
|
||||
}
|
||||
|
||||
// ProxyWithAccountCountFromServiceAdmin converts a service ProxyWithAccountCount to AdminProxyWithAccountCount DTO.
|
||||
// It includes the password field - user-facing endpoints must not use this.
|
||||
func ProxyWithAccountCountFromServiceAdmin(p *service.ProxyWithAccountCount) *AdminProxyWithAccountCount {
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
admin := ProxyFromServiceAdmin(&p.Proxy)
|
||||
if admin == nil {
|
||||
return nil
|
||||
}
|
||||
return &AdminProxyWithAccountCount{
|
||||
AdminProxy: *admin,
|
||||
AccountCount: p.AccountCount,
|
||||
LatencyMs: p.LatencyMs,
|
||||
LatencyStatus: p.LatencyStatus,
|
||||
LatencyMessage: p.LatencyMessage,
|
||||
IPAddress: p.IPAddress,
|
||||
Country: p.Country,
|
||||
CountryCode: p.CountryCode,
|
||||
Region: p.Region,
|
||||
City: p.City,
|
||||
QualityStatus: p.QualityStatus,
|
||||
QualityScore: p.QualityScore,
|
||||
QualityGrade: p.QualityGrade,
|
||||
QualitySummary: p.QualitySummary,
|
||||
QualityChecked: p.QualityChecked,
|
||||
}
|
||||
}
|
||||
|
||||
func ProxyAccountSummaryFromService(a *service.ProxyAccountSummary) *ProxyAccountSummary {
|
||||
if a == nil {
|
||||
return nil
|
||||
@@ -386,6 +453,8 @@ func AccountSummaryFromService(a *service.Account) *AccountSummary {
|
||||
|
||||
func usageLogFromServiceUser(l *service.UsageLog) UsageLog {
|
||||
// 普通用户 DTO:严禁包含管理员字段(例如 account_rate_multiplier、ip_address、account)。
|
||||
requestType := l.EffectiveRequestType()
|
||||
stream, openAIWSMode := service.ApplyLegacyRequestFields(requestType, l.Stream, l.OpenAIWSMode)
|
||||
return UsageLog{
|
||||
ID: l.ID,
|
||||
UserID: l.UserID,
|
||||
@@ -410,7 +479,9 @@ func usageLogFromServiceUser(l *service.UsageLog) UsageLog {
|
||||
ActualCost: l.ActualCost,
|
||||
RateMultiplier: l.RateMultiplier,
|
||||
BillingType: l.BillingType,
|
||||
Stream: l.Stream,
|
||||
RequestType: requestType.String(),
|
||||
Stream: stream,
|
||||
OpenAIWSMode: openAIWSMode,
|
||||
DurationMs: l.DurationMs,
|
||||
FirstTokenMs: l.FirstTokenMs,
|
||||
ImageCount: l.ImageCount,
|
||||
@@ -465,6 +536,7 @@ func UsageCleanupTaskFromService(task *service.UsageCleanupTask) *UsageCleanupTa
|
||||
AccountID: task.Filters.AccountID,
|
||||
GroupID: task.Filters.GroupID,
|
||||
Model: task.Filters.Model,
|
||||
RequestType: requestTypeStringPtr(task.Filters.RequestType),
|
||||
Stream: task.Filters.Stream,
|
||||
BillingType: task.Filters.BillingType,
|
||||
},
|
||||
@@ -480,6 +552,14 @@ func UsageCleanupTaskFromService(task *service.UsageCleanupTask) *UsageCleanupTa
|
||||
}
|
||||
}
|
||||
|
||||
func requestTypeStringPtr(requestType *int16) *string {
|
||||
if requestType == nil {
|
||||
return nil
|
||||
}
|
||||
value := service.RequestTypeFromInt16(*requestType).String()
|
||||
return &value
|
||||
}
|
||||
|
||||
func SettingFromService(s *service.Setting) *Setting {
|
||||
if s == nil {
|
||||
return nil
|
||||
|
||||
73
backend/internal/handler/dto/mappers_usage_test.go
Normal file
73
backend/internal/handler/dto/mappers_usage_test.go
Normal file
@@ -0,0 +1,73 @@
|
||||
package dto
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestUsageLogFromService_IncludesOpenAIWSMode(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
wsLog := &service.UsageLog{
|
||||
RequestID: "req_1",
|
||||
Model: "gpt-5.3-codex",
|
||||
OpenAIWSMode: true,
|
||||
}
|
||||
httpLog := &service.UsageLog{
|
||||
RequestID: "resp_1",
|
||||
Model: "gpt-5.3-codex",
|
||||
OpenAIWSMode: false,
|
||||
}
|
||||
|
||||
require.True(t, UsageLogFromService(wsLog).OpenAIWSMode)
|
||||
require.False(t, UsageLogFromService(httpLog).OpenAIWSMode)
|
||||
require.True(t, UsageLogFromServiceAdmin(wsLog).OpenAIWSMode)
|
||||
require.False(t, UsageLogFromServiceAdmin(httpLog).OpenAIWSMode)
|
||||
}
|
||||
|
||||
func TestUsageLogFromService_PrefersRequestTypeForLegacyFields(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
log := &service.UsageLog{
|
||||
RequestID: "req_2",
|
||||
Model: "gpt-5.3-codex",
|
||||
RequestType: service.RequestTypeWSV2,
|
||||
Stream: false,
|
||||
OpenAIWSMode: false,
|
||||
}
|
||||
|
||||
userDTO := UsageLogFromService(log)
|
||||
adminDTO := UsageLogFromServiceAdmin(log)
|
||||
|
||||
require.Equal(t, "ws_v2", userDTO.RequestType)
|
||||
require.True(t, userDTO.Stream)
|
||||
require.True(t, userDTO.OpenAIWSMode)
|
||||
require.Equal(t, "ws_v2", adminDTO.RequestType)
|
||||
require.True(t, adminDTO.Stream)
|
||||
require.True(t, adminDTO.OpenAIWSMode)
|
||||
}
|
||||
|
||||
func TestUsageCleanupTaskFromService_RequestTypeMapping(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
requestType := int16(service.RequestTypeStream)
|
||||
task := &service.UsageCleanupTask{
|
||||
ID: 1,
|
||||
Status: service.UsageCleanupStatusPending,
|
||||
Filters: service.UsageCleanupFilters{
|
||||
RequestType: &requestType,
|
||||
},
|
||||
}
|
||||
|
||||
dtoTask := UsageCleanupTaskFromService(task)
|
||||
require.NotNil(t, dtoTask)
|
||||
require.NotNil(t, dtoTask.Filters.RequestType)
|
||||
require.Equal(t, "stream", *dtoTask.Filters.RequestType)
|
||||
}
|
||||
|
||||
func TestRequestTypeStringPtrNil(t *testing.T) {
|
||||
t.Parallel()
|
||||
require.Nil(t, requestTypeStringPtr(nil))
|
||||
}
|
||||
@@ -1,14 +1,30 @@
|
||||
package dto
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// CustomMenuItem represents a user-configured custom menu entry.
|
||||
type CustomMenuItem struct {
|
||||
ID string `json:"id"`
|
||||
Label string `json:"label"`
|
||||
IconSVG string `json:"icon_svg"`
|
||||
URL string `json:"url"`
|
||||
Visibility string `json:"visibility"` // "user" or "admin"
|
||||
SortOrder int `json:"sort_order"`
|
||||
}
|
||||
|
||||
// SystemSettings represents the admin settings API response payload.
|
||||
type SystemSettings struct {
|
||||
RegistrationEnabled bool `json:"registration_enabled"`
|
||||
EmailVerifyEnabled bool `json:"email_verify_enabled"`
|
||||
PromoCodeEnabled bool `json:"promo_code_enabled"`
|
||||
PasswordResetEnabled bool `json:"password_reset_enabled"`
|
||||
InvitationCodeEnabled bool `json:"invitation_code_enabled"`
|
||||
TotpEnabled bool `json:"totp_enabled"` // TOTP 双因素认证
|
||||
TotpEncryptionKeyConfigured bool `json:"totp_encryption_key_configured"` // TOTP 加密密钥是否已配置
|
||||
RegistrationEnabled bool `json:"registration_enabled"`
|
||||
EmailVerifyEnabled bool `json:"email_verify_enabled"`
|
||||
RegistrationEmailSuffixWhitelist []string `json:"registration_email_suffix_whitelist"`
|
||||
PromoCodeEnabled bool `json:"promo_code_enabled"`
|
||||
PasswordResetEnabled bool `json:"password_reset_enabled"`
|
||||
InvitationCodeEnabled bool `json:"invitation_code_enabled"`
|
||||
TotpEnabled bool `json:"totp_enabled"` // TOTP 双因素认证
|
||||
TotpEncryptionKeyConfigured bool `json:"totp_encryption_key_configured"` // TOTP 加密密钥是否已配置
|
||||
|
||||
SMTPHost string `json:"smtp_host"`
|
||||
SMTPPort int `json:"smtp_port"`
|
||||
@@ -27,19 +43,22 @@ type SystemSettings struct {
|
||||
LinuxDoConnectClientSecretConfigured bool `json:"linuxdo_connect_client_secret_configured"`
|
||||
LinuxDoConnectRedirectURL string `json:"linuxdo_connect_redirect_url"`
|
||||
|
||||
SiteName string `json:"site_name"`
|
||||
SiteLogo string `json:"site_logo"`
|
||||
SiteSubtitle string `json:"site_subtitle"`
|
||||
APIBaseURL string `json:"api_base_url"`
|
||||
ContactInfo string `json:"contact_info"`
|
||||
DocURL string `json:"doc_url"`
|
||||
HomeContent string `json:"home_content"`
|
||||
HideCcsImportButton bool `json:"hide_ccs_import_button"`
|
||||
PurchaseSubscriptionEnabled bool `json:"purchase_subscription_enabled"`
|
||||
PurchaseSubscriptionURL string `json:"purchase_subscription_url"`
|
||||
SiteName string `json:"site_name"`
|
||||
SiteLogo string `json:"site_logo"`
|
||||
SiteSubtitle string `json:"site_subtitle"`
|
||||
APIBaseURL string `json:"api_base_url"`
|
||||
ContactInfo string `json:"contact_info"`
|
||||
DocURL string `json:"doc_url"`
|
||||
HomeContent string `json:"home_content"`
|
||||
HideCcsImportButton bool `json:"hide_ccs_import_button"`
|
||||
PurchaseSubscriptionEnabled bool `json:"purchase_subscription_enabled"`
|
||||
PurchaseSubscriptionURL string `json:"purchase_subscription_url"`
|
||||
SoraClientEnabled bool `json:"sora_client_enabled"`
|
||||
CustomMenuItems []CustomMenuItem `json:"custom_menu_items"`
|
||||
|
||||
DefaultConcurrency int `json:"default_concurrency"`
|
||||
DefaultBalance float64 `json:"default_balance"`
|
||||
DefaultConcurrency int `json:"default_concurrency"`
|
||||
DefaultBalance float64 `json:"default_balance"`
|
||||
DefaultSubscriptions []DefaultSubscriptionSetting `json:"default_subscriptions"`
|
||||
|
||||
// Model fallback configuration
|
||||
EnableModelFallback bool `json:"enable_model_fallback"`
|
||||
@@ -57,29 +76,80 @@ type SystemSettings struct {
|
||||
OpsRealtimeMonitoringEnabled bool `json:"ops_realtime_monitoring_enabled"`
|
||||
OpsQueryModeDefault string `json:"ops_query_mode_default"`
|
||||
OpsMetricsIntervalSeconds int `json:"ops_metrics_interval_seconds"`
|
||||
|
||||
MinClaudeCodeVersion string `json:"min_claude_code_version"`
|
||||
|
||||
// 分组隔离
|
||||
AllowUngroupedKeyScheduling bool `json:"allow_ungrouped_key_scheduling"`
|
||||
}
|
||||
|
||||
type DefaultSubscriptionSetting struct {
|
||||
GroupID int64 `json:"group_id"`
|
||||
ValidityDays int `json:"validity_days"`
|
||||
}
|
||||
|
||||
type PublicSettings struct {
|
||||
RegistrationEnabled bool `json:"registration_enabled"`
|
||||
EmailVerifyEnabled bool `json:"email_verify_enabled"`
|
||||
PromoCodeEnabled bool `json:"promo_code_enabled"`
|
||||
PasswordResetEnabled bool `json:"password_reset_enabled"`
|
||||
InvitationCodeEnabled bool `json:"invitation_code_enabled"`
|
||||
TotpEnabled bool `json:"totp_enabled"` // TOTP 双因素认证
|
||||
TurnstileEnabled bool `json:"turnstile_enabled"`
|
||||
TurnstileSiteKey string `json:"turnstile_site_key"`
|
||||
SiteName string `json:"site_name"`
|
||||
SiteLogo string `json:"site_logo"`
|
||||
SiteSubtitle string `json:"site_subtitle"`
|
||||
APIBaseURL string `json:"api_base_url"`
|
||||
ContactInfo string `json:"contact_info"`
|
||||
DocURL string `json:"doc_url"`
|
||||
HomeContent string `json:"home_content"`
|
||||
HideCcsImportButton bool `json:"hide_ccs_import_button"`
|
||||
PurchaseSubscriptionEnabled bool `json:"purchase_subscription_enabled"`
|
||||
PurchaseSubscriptionURL string `json:"purchase_subscription_url"`
|
||||
LinuxDoOAuthEnabled bool `json:"linuxdo_oauth_enabled"`
|
||||
Version string `json:"version"`
|
||||
RegistrationEnabled bool `json:"registration_enabled"`
|
||||
EmailVerifyEnabled bool `json:"email_verify_enabled"`
|
||||
RegistrationEmailSuffixWhitelist []string `json:"registration_email_suffix_whitelist"`
|
||||
PromoCodeEnabled bool `json:"promo_code_enabled"`
|
||||
PasswordResetEnabled bool `json:"password_reset_enabled"`
|
||||
InvitationCodeEnabled bool `json:"invitation_code_enabled"`
|
||||
TotpEnabled bool `json:"totp_enabled"` // TOTP 双因素认证
|
||||
TurnstileEnabled bool `json:"turnstile_enabled"`
|
||||
TurnstileSiteKey string `json:"turnstile_site_key"`
|
||||
SiteName string `json:"site_name"`
|
||||
SiteLogo string `json:"site_logo"`
|
||||
SiteSubtitle string `json:"site_subtitle"`
|
||||
APIBaseURL string `json:"api_base_url"`
|
||||
ContactInfo string `json:"contact_info"`
|
||||
DocURL string `json:"doc_url"`
|
||||
HomeContent string `json:"home_content"`
|
||||
HideCcsImportButton bool `json:"hide_ccs_import_button"`
|
||||
PurchaseSubscriptionEnabled bool `json:"purchase_subscription_enabled"`
|
||||
PurchaseSubscriptionURL string `json:"purchase_subscription_url"`
|
||||
CustomMenuItems []CustomMenuItem `json:"custom_menu_items"`
|
||||
LinuxDoOAuthEnabled bool `json:"linuxdo_oauth_enabled"`
|
||||
SoraClientEnabled bool `json:"sora_client_enabled"`
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
// SoraS3Settings Sora S3 存储配置 DTO(响应用,不含敏感字段)
|
||||
type SoraS3Settings struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
Region string `json:"region"`
|
||||
Bucket string `json:"bucket"`
|
||||
AccessKeyID string `json:"access_key_id"`
|
||||
SecretAccessKeyConfigured bool `json:"secret_access_key_configured"`
|
||||
Prefix string `json:"prefix"`
|
||||
ForcePathStyle bool `json:"force_path_style"`
|
||||
CDNURL string `json:"cdn_url"`
|
||||
DefaultStorageQuotaBytes int64 `json:"default_storage_quota_bytes"`
|
||||
}
|
||||
|
||||
// SoraS3Profile Sora S3 存储配置项 DTO(响应用,不含敏感字段)
|
||||
type SoraS3Profile struct {
|
||||
ProfileID string `json:"profile_id"`
|
||||
Name string `json:"name"`
|
||||
IsActive bool `json:"is_active"`
|
||||
Enabled bool `json:"enabled"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
Region string `json:"region"`
|
||||
Bucket string `json:"bucket"`
|
||||
AccessKeyID string `json:"access_key_id"`
|
||||
SecretAccessKeyConfigured bool `json:"secret_access_key_configured"`
|
||||
Prefix string `json:"prefix"`
|
||||
ForcePathStyle bool `json:"force_path_style"`
|
||||
CDNURL string `json:"cdn_url"`
|
||||
DefaultStorageQuotaBytes int64 `json:"default_storage_quota_bytes"`
|
||||
UpdatedAt string `json:"updated_at"`
|
||||
}
|
||||
|
||||
// ListSoraS3ProfilesResponse Sora S3 配置列表响应
|
||||
type ListSoraS3ProfilesResponse struct {
|
||||
ActiveProfileID string `json:"active_profile_id"`
|
||||
Items []SoraS3Profile `json:"items"`
|
||||
}
|
||||
|
||||
// StreamTimeoutSettings 流超时处理配置 DTO
|
||||
@@ -90,3 +160,29 @@ type StreamTimeoutSettings struct {
|
||||
ThresholdCount int `json:"threshold_count"`
|
||||
ThresholdWindowMinutes int `json:"threshold_window_minutes"`
|
||||
}
|
||||
|
||||
// ParseCustomMenuItems parses a JSON string into a slice of CustomMenuItem.
|
||||
// Returns empty slice on empty/invalid input.
|
||||
func ParseCustomMenuItems(raw string) []CustomMenuItem {
|
||||
raw = strings.TrimSpace(raw)
|
||||
if raw == "" || raw == "[]" {
|
||||
return []CustomMenuItem{}
|
||||
}
|
||||
var items []CustomMenuItem
|
||||
if err := json.Unmarshal([]byte(raw), &items); err != nil {
|
||||
return []CustomMenuItem{}
|
||||
}
|
||||
return items
|
||||
}
|
||||
|
||||
// ParseUserVisibleMenuItems parses custom menu items and filters out admin-only entries.
|
||||
func ParseUserVisibleMenuItems(raw string) []CustomMenuItem {
|
||||
items := ParseCustomMenuItems(raw)
|
||||
filtered := make([]CustomMenuItem, 0, len(items))
|
||||
for _, item := range items {
|
||||
if item.Visibility != "admin" {
|
||||
filtered = append(filtered, item)
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
@@ -26,7 +26,9 @@ type AdminUser struct {
|
||||
Notes string `json:"notes"`
|
||||
// GroupRates 用户专属分组倍率配置
|
||||
// map[groupID]rateMultiplier
|
||||
GroupRates map[int64]float64 `json:"group_rates,omitempty"`
|
||||
GroupRates map[int64]float64 `json:"group_rates,omitempty"`
|
||||
SoraStorageQuotaBytes int64 `json:"sora_storage_quota_bytes"`
|
||||
SoraStorageUsedBytes int64 `json:"sora_storage_used_bytes"`
|
||||
}
|
||||
|
||||
type APIKey struct {
|
||||
@@ -45,6 +47,17 @@ type APIKey struct {
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
|
||||
// Rate limit fields
|
||||
RateLimit5h float64 `json:"rate_limit_5h"`
|
||||
RateLimit1d float64 `json:"rate_limit_1d"`
|
||||
RateLimit7d float64 `json:"rate_limit_7d"`
|
||||
Usage5h float64 `json:"usage_5h"`
|
||||
Usage1d float64 `json:"usage_1d"`
|
||||
Usage7d float64 `json:"usage_7d"`
|
||||
Window5hStart *time.Time `json:"window_5h_start"`
|
||||
Window1dStart *time.Time `json:"window_1d_start"`
|
||||
Window7dStart *time.Time `json:"window_7d_start"`
|
||||
|
||||
User *User `json:"user,omitempty"`
|
||||
Group *Group `json:"group,omitempty"`
|
||||
}
|
||||
@@ -80,6 +93,9 @@ type Group struct {
|
||||
// 无效请求兜底分组
|
||||
FallbackGroupIDOnInvalidRequest *int64 `json:"fallback_group_id_on_invalid_request"`
|
||||
|
||||
// Sora 存储配额
|
||||
SoraStorageQuotaBytes int64 `json:"sora_storage_quota_bytes"`
|
||||
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
}
|
||||
@@ -150,6 +166,13 @@ type Account struct {
|
||||
MaxSessions *int `json:"max_sessions,omitempty"`
|
||||
SessionIdleTimeoutMin *int `json:"session_idle_timeout_minutes,omitempty"`
|
||||
|
||||
// RPM 限制(仅 Anthropic OAuth/SetupToken 账号有效)
|
||||
// 从 extra 字段提取,方便前端显示和编辑
|
||||
BaseRPM *int `json:"base_rpm,omitempty"`
|
||||
RPMStrategy *string `json:"rpm_strategy,omitempty"`
|
||||
RPMStickyBuffer *int `json:"rpm_sticky_buffer,omitempty"`
|
||||
UserMsgQueueMode *string `json:"user_msg_queue_mode,omitempty"`
|
||||
|
||||
// TLS指纹伪装(仅 Anthropic OAuth/SetupToken 账号有效)
|
||||
// 从 extra 字段提取,方便前端显示和编辑
|
||||
EnableTLSFingerprint *bool `json:"enable_tls_fingerprint,omitempty"`
|
||||
@@ -212,6 +235,32 @@ type ProxyWithAccountCount struct {
|
||||
QualityChecked *int64 `json:"quality_checked,omitempty"`
|
||||
}
|
||||
|
||||
// AdminProxy 是管理员接口使用的 proxy DTO(包含密码等敏感字段)。
|
||||
// 注意:普通接口不得使用此 DTO。
|
||||
type AdminProxy struct {
|
||||
Proxy
|
||||
Password string `json:"password,omitempty"`
|
||||
}
|
||||
|
||||
// AdminProxyWithAccountCount 是管理员接口使用的带账号统计的 proxy DTO。
|
||||
type AdminProxyWithAccountCount struct {
|
||||
AdminProxy
|
||||
AccountCount int64 `json:"account_count"`
|
||||
LatencyMs *int64 `json:"latency_ms,omitempty"`
|
||||
LatencyStatus string `json:"latency_status,omitempty"`
|
||||
LatencyMessage string `json:"latency_message,omitempty"`
|
||||
IPAddress string `json:"ip_address,omitempty"`
|
||||
Country string `json:"country,omitempty"`
|
||||
CountryCode string `json:"country_code,omitempty"`
|
||||
Region string `json:"region,omitempty"`
|
||||
City string `json:"city,omitempty"`
|
||||
QualityStatus string `json:"quality_status,omitempty"`
|
||||
QualityScore *int `json:"quality_score,omitempty"`
|
||||
QualityGrade string `json:"quality_grade,omitempty"`
|
||||
QualitySummary string `json:"quality_summary,omitempty"`
|
||||
QualityChecked *int64 `json:"quality_checked,omitempty"`
|
||||
}
|
||||
|
||||
type ProxyAccountSummary struct {
|
||||
ID int64 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
@@ -280,10 +329,12 @@ type UsageLog struct {
|
||||
ActualCost float64 `json:"actual_cost"`
|
||||
RateMultiplier float64 `json:"rate_multiplier"`
|
||||
|
||||
BillingType int8 `json:"billing_type"`
|
||||
Stream bool `json:"stream"`
|
||||
DurationMs *int `json:"duration_ms"`
|
||||
FirstTokenMs *int `json:"first_token_ms"`
|
||||
BillingType int8 `json:"billing_type"`
|
||||
RequestType string `json:"request_type"`
|
||||
Stream bool `json:"stream"`
|
||||
OpenAIWSMode bool `json:"openai_ws_mode"`
|
||||
DurationMs *int `json:"duration_ms"`
|
||||
FirstTokenMs *int `json:"first_token_ms"`
|
||||
|
||||
// 图片生成字段
|
||||
ImageCount int `json:"image_count"`
|
||||
@@ -326,6 +377,7 @@ type UsageCleanupFilters struct {
|
||||
AccountID *int64 `json:"account_id,omitempty"`
|
||||
GroupID *int64 `json:"group_id,omitempty"`
|
||||
Model *string `json:"model,omitempty"`
|
||||
RequestType *string `json:"request_type,omitempty"`
|
||||
Stream *bool `json:"stream,omitempty"`
|
||||
BillingType *int8 `json:"billing_type,omitempty"`
|
||||
}
|
||||
|
||||
@@ -2,11 +2,12 @@ package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/logger"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// TempUnscheduler 用于 HandleFailoverError 中同账号重试耗尽后的临时封禁。
|
||||
@@ -78,8 +79,12 @@ func (s *FailoverState) HandleFailoverError(
|
||||
// 同账号重试:对 RetryableOnSameAccount 的临时性错误,先在同一账号上重试
|
||||
if failoverErr.RetryableOnSameAccount && s.SameAccountRetryCount[accountID] < maxSameAccountRetries {
|
||||
s.SameAccountRetryCount[accountID]++
|
||||
log.Printf("Account %d: retryable error %d, same-account retry %d/%d",
|
||||
accountID, failoverErr.StatusCode, s.SameAccountRetryCount[accountID], maxSameAccountRetries)
|
||||
logger.FromContext(ctx).Warn("gateway.failover_same_account_retry",
|
||||
zap.Int64("account_id", accountID),
|
||||
zap.Int("upstream_status", failoverErr.StatusCode),
|
||||
zap.Int("same_account_retry_count", s.SameAccountRetryCount[accountID]),
|
||||
zap.Int("same_account_retry_max", maxSameAccountRetries),
|
||||
)
|
||||
if !sleepWithContext(ctx, sameAccountRetryDelay) {
|
||||
return FailoverCanceled
|
||||
}
|
||||
@@ -101,8 +106,12 @@ func (s *FailoverState) HandleFailoverError(
|
||||
|
||||
// 递增切换计数
|
||||
s.SwitchCount++
|
||||
log.Printf("Account %d: upstream error %d, switching account %d/%d",
|
||||
accountID, failoverErr.StatusCode, s.SwitchCount, s.MaxSwitches)
|
||||
logger.FromContext(ctx).Warn("gateway.failover_switch_account",
|
||||
zap.Int64("account_id", accountID),
|
||||
zap.Int("upstream_status", failoverErr.StatusCode),
|
||||
zap.Int("switch_count", s.SwitchCount),
|
||||
zap.Int("max_switches", s.MaxSwitches),
|
||||
)
|
||||
|
||||
// Antigravity 平台换号线性递增延时
|
||||
if platform == service.PlatformAntigravity {
|
||||
@@ -127,13 +136,18 @@ func (s *FailoverState) HandleSelectionExhausted(ctx context.Context) FailoverAc
|
||||
s.LastFailoverErr.StatusCode == http.StatusServiceUnavailable &&
|
||||
s.SwitchCount <= s.MaxSwitches {
|
||||
|
||||
log.Printf("Antigravity single-account 503 backoff: waiting %v before retry (attempt %d)",
|
||||
singleAccountBackoffDelay, s.SwitchCount)
|
||||
logger.FromContext(ctx).Warn("gateway.failover_single_account_backoff",
|
||||
zap.Duration("backoff_delay", singleAccountBackoffDelay),
|
||||
zap.Int("switch_count", s.SwitchCount),
|
||||
zap.Int("max_switches", s.MaxSwitches),
|
||||
)
|
||||
if !sleepWithContext(ctx, singleAccountBackoffDelay) {
|
||||
return FailoverCanceled
|
||||
}
|
||||
log.Printf("Antigravity single-account 503 retry: clearing failed accounts, retry %d/%d",
|
||||
s.SwitchCount, s.MaxSwitches)
|
||||
logger.FromContext(ctx).Warn("gateway.failover_single_account_retry",
|
||||
zap.Int("switch_count", s.SwitchCount),
|
||||
zap.Int("max_switches", s.MaxSwitches),
|
||||
)
|
||||
s.FailedAccountIDs = make(map[int64]struct{})
|
||||
return FailoverContinue
|
||||
}
|
||||
|
||||
@@ -6,9 +6,10 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/Wei-Shaw/sub2api/internal/config"
|
||||
@@ -17,9 +18,11 @@ import (
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/claude"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey"
|
||||
pkgerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors"
|
||||
pkghttputil "github.com/Wei-Shaw/sub2api/internal/pkg/httputil"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/ip"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/logger"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/openai"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/timezone"
|
||||
middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
|
||||
@@ -27,6 +30,10 @@ import (
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const gatewayCompatibilityMetricsLogInterval = 1024
|
||||
|
||||
var gatewayCompatibilityMetricsLogCounter atomic.Uint64
|
||||
|
||||
// GatewayHandler handles API gateway requests
|
||||
type GatewayHandler struct {
|
||||
gatewayService *service.GatewayService
|
||||
@@ -39,9 +46,11 @@ type GatewayHandler struct {
|
||||
usageRecordWorkerPool *service.UsageRecordWorkerPool
|
||||
errorPassthroughService *service.ErrorPassthroughService
|
||||
concurrencyHelper *ConcurrencyHelper
|
||||
userMsgQueueHelper *UserMsgQueueHelper
|
||||
maxAccountSwitches int
|
||||
maxAccountSwitchesGemini int
|
||||
cfg *config.Config
|
||||
settingService *service.SettingService
|
||||
}
|
||||
|
||||
// NewGatewayHandler creates a new GatewayHandler
|
||||
@@ -56,7 +65,9 @@ func NewGatewayHandler(
|
||||
apiKeyService *service.APIKeyService,
|
||||
usageRecordWorkerPool *service.UsageRecordWorkerPool,
|
||||
errorPassthroughService *service.ErrorPassthroughService,
|
||||
userMsgQueueService *service.UserMessageQueueService,
|
||||
cfg *config.Config,
|
||||
settingService *service.SettingService,
|
||||
) *GatewayHandler {
|
||||
pingInterval := time.Duration(0)
|
||||
maxAccountSwitches := 10
|
||||
@@ -70,6 +81,13 @@ func NewGatewayHandler(
|
||||
maxAccountSwitchesGemini = cfg.Gateway.MaxAccountSwitchesGemini
|
||||
}
|
||||
}
|
||||
|
||||
// 初始化用户消息串行队列 helper
|
||||
var umqHelper *UserMsgQueueHelper
|
||||
if userMsgQueueService != nil && cfg != nil {
|
||||
umqHelper = NewUserMsgQueueHelper(userMsgQueueService, SSEPingFormatClaude, pingInterval)
|
||||
}
|
||||
|
||||
return &GatewayHandler{
|
||||
gatewayService: gatewayService,
|
||||
geminiCompatService: geminiCompatService,
|
||||
@@ -81,9 +99,11 @@ func NewGatewayHandler(
|
||||
usageRecordWorkerPool: usageRecordWorkerPool,
|
||||
errorPassthroughService: errorPassthroughService,
|
||||
concurrencyHelper: NewConcurrencyHelper(concurrencyService, SSEPingFormatClaude, pingInterval),
|
||||
userMsgQueueHelper: umqHelper,
|
||||
maxAccountSwitches: maxAccountSwitches,
|
||||
maxAccountSwitchesGemini: maxAccountSwitchesGemini,
|
||||
cfg: cfg,
|
||||
settingService: settingService,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -109,9 +129,10 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
||||
zap.Int64("api_key_id", apiKey.ID),
|
||||
zap.Any("group_id", apiKey.GroupID),
|
||||
)
|
||||
defer h.maybeLogCompatibilityFallbackMetrics(reqLog)
|
||||
|
||||
// 读取请求体
|
||||
body, err := io.ReadAll(c.Request.Body)
|
||||
body, err := pkghttputil.ReadRequestBodyWithPrealloc(c.Request)
|
||||
if err != nil {
|
||||
if maxErr, ok := extractMaxBytesError(err); ok {
|
||||
h.errorResponse(c, http.StatusRequestEntityTooLarge, "invalid_request_error", buildBodyTooLargeMessage(maxErr.Limit))
|
||||
@@ -140,16 +161,21 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
||||
// 设置 max_tokens=1 + haiku 探测请求标识到 context 中
|
||||
// 必须在 SetClaudeCodeClientContext 之前设置,因为 ClaudeCodeValidator 需要读取此标识进行绕过判断
|
||||
if isMaxTokensOneHaikuRequest(reqModel, parsedReq.MaxTokens, reqStream) {
|
||||
ctx := context.WithValue(c.Request.Context(), ctxkey.IsMaxTokensOneHaikuRequest, true)
|
||||
ctx := service.WithIsMaxTokensOneHaikuRequest(c.Request.Context(), true, h.metadataBridgeEnabled())
|
||||
c.Request = c.Request.WithContext(ctx)
|
||||
}
|
||||
|
||||
// 检查是否为 Claude Code 客户端,设置到 context 中
|
||||
SetClaudeCodeClientContext(c, body)
|
||||
// 检查是否为 Claude Code 客户端,设置到 context 中(复用已解析请求,避免二次反序列化)。
|
||||
SetClaudeCodeClientContext(c, body, parsedReq)
|
||||
isClaudeCodeClient := service.IsClaudeCodeClient(c.Request.Context())
|
||||
|
||||
// 版本检查:仅对 Claude Code 客户端,拒绝低于最低版本的请求
|
||||
if !h.checkClaudeCodeVersion(c) {
|
||||
return
|
||||
}
|
||||
|
||||
// 在请求上下文中记录 thinking 状态,供 Antigravity 最终模型 key 推导/模型维度限流使用
|
||||
c.Request = c.Request.WithContext(context.WithValue(c.Request.Context(), ctxkey.ThinkingEnabled, parsedReq.ThinkingEnabled))
|
||||
c.Request = c.Request.WithContext(service.WithThinkingEnabled(c.Request.Context(), parsedReq.ThinkingEnabled, h.metadataBridgeEnabled()))
|
||||
|
||||
setOpsRequestContext(c, reqModel, reqStream, body)
|
||||
|
||||
@@ -247,8 +273,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
||||
if apiKey.GroupID != nil {
|
||||
prefetchedGroupID = *apiKey.GroupID
|
||||
}
|
||||
ctx := context.WithValue(c.Request.Context(), ctxkey.PrefetchedStickyAccountID, sessionBoundAccountID)
|
||||
ctx = context.WithValue(ctx, ctxkey.PrefetchedStickyGroupID, prefetchedGroupID)
|
||||
ctx := service.WithPrefetchedStickySession(c.Request.Context(), sessionBoundAccountID, prefetchedGroupID, h.metadataBridgeEnabled())
|
||||
c.Request = c.Request.WithContext(ctx)
|
||||
}
|
||||
}
|
||||
@@ -261,7 +286,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
||||
// 单账号分组提前设置 SingleAccountRetry 标记,让 Service 层首次 503 就不设模型限流标记。
|
||||
// 避免单账号分组收到 503 (MODEL_CAPACITY_EXHAUSTED) 时设 29s 限流,导致后续请求连续快速失败。
|
||||
if h.gatewayService.IsSingleAntigravityAccountGroup(c.Request.Context(), apiKey.GroupID) {
|
||||
ctx := context.WithValue(c.Request.Context(), ctxkey.SingleAccountRetry, true)
|
||||
ctx := service.WithSingleAccountRetry(c.Request.Context(), true, h.metadataBridgeEnabled())
|
||||
c.Request = c.Request.WithContext(ctx)
|
||||
}
|
||||
|
||||
@@ -275,7 +300,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
||||
action := fs.HandleSelectionExhausted(c.Request.Context())
|
||||
switch action {
|
||||
case FailoverContinue:
|
||||
ctx := context.WithValue(c.Request.Context(), ctxkey.SingleAccountRetry, true)
|
||||
ctx := service.WithSingleAccountRetry(c.Request.Context(), true, h.metadataBridgeEnabled())
|
||||
c.Request = c.Request.WithContext(ctx)
|
||||
continue
|
||||
case FailoverCanceled:
|
||||
@@ -364,7 +389,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
||||
var result *service.ForwardResult
|
||||
requestCtx := c.Request.Context()
|
||||
if fs.SwitchCount > 0 {
|
||||
requestCtx = context.WithValue(requestCtx, ctxkey.AccountSwitchCount, fs.SwitchCount)
|
||||
requestCtx = service.WithAccountSwitchCount(requestCtx, fs.SwitchCount, h.metadataBridgeEnabled())
|
||||
}
|
||||
if account.Platform == service.PlatformAntigravity {
|
||||
result, err = h.antigravityGatewayService.ForwardGemini(requestCtx, c, account, reqModel, "generateContent", reqStream, body, hasBoundSession)
|
||||
@@ -397,6 +422,15 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
// RPM 计数递增(Forward 成功后)
|
||||
// 注意:TOCTOU 竞态是已知且可接受的设计权衡,与 WindowCost 一致的 soft-limit 模式。
|
||||
// 在高并发下可能短暂超出 RPM 限制,但不会导致请求失败。
|
||||
if account.IsAnthropicOAuthOrSetupToken() && account.GetBaseRPM() > 0 {
|
||||
if err := h.gatewayService.IncrementAccountRPM(c.Request.Context(), account.ID); err != nil {
|
||||
reqLog.Warn("gateway.rpm_increment_failed", zap.Int64("account_id", account.ID), zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
// 捕获请求信息(用于异步记录,避免在 goroutine 中访问 gin.Context)
|
||||
userAgent := c.GetHeader("User-Agent")
|
||||
clientIP := ip.GetClientIP(c)
|
||||
@@ -440,7 +474,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
||||
// 单账号分组提前设置 SingleAccountRetry 标记,让 Service 层首次 503 就不设模型限流标记。
|
||||
// 避免单账号分组收到 503 (MODEL_CAPACITY_EXHAUSTED) 时设 29s 限流,导致后续请求连续快速失败。
|
||||
if h.gatewayService.IsSingleAntigravityAccountGroup(c.Request.Context(), currentAPIKey.GroupID) {
|
||||
ctx := context.WithValue(c.Request.Context(), ctxkey.SingleAccountRetry, true)
|
||||
ctx := service.WithSingleAccountRetry(c.Request.Context(), true, h.metadataBridgeEnabled())
|
||||
c.Request = c.Request.WithContext(ctx)
|
||||
}
|
||||
|
||||
@@ -459,7 +493,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
||||
action := fs.HandleSelectionExhausted(c.Request.Context())
|
||||
switch action {
|
||||
case FailoverContinue:
|
||||
ctx := context.WithValue(c.Request.Context(), ctxkey.SingleAccountRetry, true)
|
||||
ctx := service.WithSingleAccountRetry(c.Request.Context(), true, h.metadataBridgeEnabled())
|
||||
c.Request = c.Request.WithContext(ctx)
|
||||
continue
|
||||
case FailoverCanceled:
|
||||
@@ -544,18 +578,78 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
||||
// 账号槽位/等待计数需要在超时或断开时安全回收
|
||||
accountReleaseFunc = wrapReleaseOnDone(c.Request.Context(), accountReleaseFunc)
|
||||
|
||||
// ===== 用户消息串行队列 START =====
|
||||
var queueRelease func()
|
||||
umqMode := h.getUserMsgQueueMode(account, parsedReq)
|
||||
|
||||
switch umqMode {
|
||||
case config.UMQModeSerialize:
|
||||
// 串行模式:获取锁 + RPM 延迟 + 释放(当前行为不变)
|
||||
baseRPM := account.GetBaseRPM()
|
||||
release, qErr := h.userMsgQueueHelper.AcquireWithWait(
|
||||
c, account.ID, baseRPM, reqStream, &streamStarted,
|
||||
h.cfg.Gateway.UserMessageQueue.WaitTimeout(),
|
||||
reqLog,
|
||||
)
|
||||
if qErr != nil {
|
||||
// fail-open: 记录 warn,不阻止请求
|
||||
reqLog.Warn("gateway.umq_acquire_failed",
|
||||
zap.Int64("account_id", account.ID),
|
||||
zap.Error(qErr),
|
||||
)
|
||||
} else {
|
||||
queueRelease = release
|
||||
}
|
||||
|
||||
case config.UMQModeThrottle:
|
||||
// 软性限速:仅施加 RPM 自适应延迟,不阻塞并发
|
||||
baseRPM := account.GetBaseRPM()
|
||||
if tErr := h.userMsgQueueHelper.ThrottleWithPing(
|
||||
c, account.ID, baseRPM, reqStream, &streamStarted,
|
||||
h.cfg.Gateway.UserMessageQueue.WaitTimeout(),
|
||||
reqLog,
|
||||
); tErr != nil {
|
||||
reqLog.Warn("gateway.umq_throttle_failed",
|
||||
zap.Int64("account_id", account.ID),
|
||||
zap.Error(tErr),
|
||||
)
|
||||
}
|
||||
|
||||
default:
|
||||
if umqMode != "" {
|
||||
reqLog.Warn("gateway.umq_unknown_mode",
|
||||
zap.String("mode", umqMode),
|
||||
zap.Int64("account_id", account.ID),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// 用 wrapReleaseOnDone 确保 context 取消时自动释放(仅 serialize 模式有 queueRelease)
|
||||
queueRelease = wrapReleaseOnDone(c.Request.Context(), queueRelease)
|
||||
// 注入回调到 ParsedRequest:使用外层 wrapper 以便提前清理 AfterFunc
|
||||
parsedReq.OnUpstreamAccepted = queueRelease
|
||||
// ===== 用户消息串行队列 END =====
|
||||
|
||||
// 转发请求 - 根据账号平台分流
|
||||
c.Set("parsed_request", parsedReq)
|
||||
var result *service.ForwardResult
|
||||
requestCtx := c.Request.Context()
|
||||
if fs.SwitchCount > 0 {
|
||||
requestCtx = context.WithValue(requestCtx, ctxkey.AccountSwitchCount, fs.SwitchCount)
|
||||
requestCtx = service.WithAccountSwitchCount(requestCtx, fs.SwitchCount, h.metadataBridgeEnabled())
|
||||
}
|
||||
if account.Platform == service.PlatformAntigravity && account.Type != service.AccountTypeAPIKey {
|
||||
result, err = h.antigravityGatewayService.Forward(requestCtx, c, account, body, hasBoundSession)
|
||||
} else {
|
||||
result, err = h.gatewayService.Forward(requestCtx, c, account, parsedReq)
|
||||
}
|
||||
|
||||
// 兜底释放串行锁(正常情况已通过回调提前释放)
|
||||
if queueRelease != nil {
|
||||
queueRelease()
|
||||
}
|
||||
// 清理回调引用,防止 failover 重试时旧回调被错误调用
|
||||
parsedReq.OnUpstreamAccepted = nil
|
||||
|
||||
if accountReleaseFunc != nil {
|
||||
accountReleaseFunc()
|
||||
}
|
||||
@@ -591,7 +685,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
||||
h.handleStreamingAwareError(c, status, code, message, streamStarted)
|
||||
return
|
||||
}
|
||||
// 兜底重试按“直接请求兜底分组”处理:清除强制平台,允许按分组平台调度
|
||||
// 兜底重试按"直接请求兜底分组"处理:清除强制平台,允许按分组平台调度
|
||||
ctx := context.WithValue(c.Request.Context(), ctxkey.ForcePlatform, "")
|
||||
c.Request = c.Request.WithContext(ctx)
|
||||
currentAPIKey = fallbackAPIKey
|
||||
@@ -625,6 +719,15 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
// RPM 计数递增(Forward 成功后)
|
||||
// 注意:TOCTOU 竞态是已知且可接受的设计权衡,与 WindowCost 一致的 soft-limit 模式。
|
||||
// 在高并发下可能短暂超出 RPM 限制,但不会导致请求失败。
|
||||
if account.IsAnthropicOAuthOrSetupToken() && account.GetBaseRPM() > 0 {
|
||||
if err := h.gatewayService.IncrementAccountRPM(c.Request.Context(), account.ID); err != nil {
|
||||
reqLog.Warn("gateway.rpm_increment_failed", zap.Int64("account_id", account.ID), zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
// 捕获请求信息(用于异步记录,避免在 goroutine 中访问 gin.Context)
|
||||
userAgent := c.GetHeader("User-Agent")
|
||||
clientIP := ip.GetClientIP(c)
|
||||
@@ -745,6 +848,10 @@ func cloneAPIKeyWithGroup(apiKey *service.APIKey, group *service.Group) *service
|
||||
|
||||
// Usage handles getting account balance and usage statistics for CC Switch integration
|
||||
// GET /v1/usage
|
||||
//
|
||||
// Two modes:
|
||||
// - quota_limited: API Key has quota or rate limits configured. Returns key-level limits/usage.
|
||||
// - unrestricted: No key-level limits. Returns subscription or wallet balance info.
|
||||
func (h *GatewayHandler) Usage(c *gin.Context) {
|
||||
apiKey, ok := middleware2.GetAPIKeyFromContext(c)
|
||||
if !ok {
|
||||
@@ -758,54 +865,183 @@ func (h *GatewayHandler) Usage(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
ctx := c.Request.Context()
|
||||
|
||||
// 解析可选的日期范围参数(用于 model_stats 查询)
|
||||
startTime, endTime := h.parseUsageDateRange(c)
|
||||
|
||||
// Best-effort: 获取用量统计(按当前 API Key 过滤),失败不影响基础响应
|
||||
var usageData gin.H
|
||||
usageData := h.buildUsageData(ctx, apiKey.ID)
|
||||
|
||||
// Best-effort: 获取模型统计
|
||||
var modelStats any
|
||||
if h.usageService != nil {
|
||||
dashStats, err := h.usageService.GetAPIKeyDashboardStats(c.Request.Context(), apiKey.ID)
|
||||
if err == nil && dashStats != nil {
|
||||
usageData = gin.H{
|
||||
"today": gin.H{
|
||||
"requests": dashStats.TodayRequests,
|
||||
"input_tokens": dashStats.TodayInputTokens,
|
||||
"output_tokens": dashStats.TodayOutputTokens,
|
||||
"cache_creation_tokens": dashStats.TodayCacheCreationTokens,
|
||||
"cache_read_tokens": dashStats.TodayCacheReadTokens,
|
||||
"total_tokens": dashStats.TodayTokens,
|
||||
"cost": dashStats.TodayCost,
|
||||
"actual_cost": dashStats.TodayActualCost,
|
||||
},
|
||||
"total": gin.H{
|
||||
"requests": dashStats.TotalRequests,
|
||||
"input_tokens": dashStats.TotalInputTokens,
|
||||
"output_tokens": dashStats.TotalOutputTokens,
|
||||
"cache_creation_tokens": dashStats.TotalCacheCreationTokens,
|
||||
"cache_read_tokens": dashStats.TotalCacheReadTokens,
|
||||
"total_tokens": dashStats.TotalTokens,
|
||||
"cost": dashStats.TotalCost,
|
||||
"actual_cost": dashStats.TotalActualCost,
|
||||
},
|
||||
"average_duration_ms": dashStats.AverageDurationMs,
|
||||
"rpm": dashStats.Rpm,
|
||||
"tpm": dashStats.Tpm,
|
||||
if stats, err := h.usageService.GetAPIKeyModelStats(ctx, apiKey.ID, startTime, endTime); err == nil && len(stats) > 0 {
|
||||
modelStats = stats
|
||||
}
|
||||
}
|
||||
|
||||
// 判断模式: key 有总额度或速率限制 → quota_limited,否则 → unrestricted
|
||||
isQuotaLimited := apiKey.Quota > 0 || apiKey.HasRateLimits()
|
||||
|
||||
if isQuotaLimited {
|
||||
h.usageQuotaLimited(c, ctx, apiKey, usageData, modelStats)
|
||||
return
|
||||
}
|
||||
|
||||
h.usageUnrestricted(c, ctx, apiKey, subject, usageData, modelStats)
|
||||
}
|
||||
|
||||
// parseUsageDateRange 解析 start_date / end_date query params,默认返回近 30 天范围
|
||||
func (h *GatewayHandler) parseUsageDateRange(c *gin.Context) (time.Time, time.Time) {
|
||||
now := timezone.Now()
|
||||
endTime := now
|
||||
startTime := now.AddDate(0, 0, -30)
|
||||
|
||||
if s := c.Query("start_date"); s != "" {
|
||||
if t, err := timezone.ParseInLocation("2006-01-02", s); err == nil {
|
||||
startTime = t
|
||||
}
|
||||
}
|
||||
if s := c.Query("end_date"); s != "" {
|
||||
if t, err := timezone.ParseInLocation("2006-01-02", s); err == nil {
|
||||
endTime = t.Add(24*time.Hour - time.Second) // end of day
|
||||
}
|
||||
}
|
||||
return startTime, endTime
|
||||
}
|
||||
|
||||
// buildUsageData 构建 today/total 用量摘要
|
||||
func (h *GatewayHandler) buildUsageData(ctx context.Context, apiKeyID int64) gin.H {
|
||||
if h.usageService == nil {
|
||||
return nil
|
||||
}
|
||||
dashStats, err := h.usageService.GetAPIKeyDashboardStats(ctx, apiKeyID)
|
||||
if err != nil || dashStats == nil {
|
||||
return nil
|
||||
}
|
||||
return gin.H{
|
||||
"today": gin.H{
|
||||
"requests": dashStats.TodayRequests,
|
||||
"input_tokens": dashStats.TodayInputTokens,
|
||||
"output_tokens": dashStats.TodayOutputTokens,
|
||||
"cache_creation_tokens": dashStats.TodayCacheCreationTokens,
|
||||
"cache_read_tokens": dashStats.TodayCacheReadTokens,
|
||||
"total_tokens": dashStats.TodayTokens,
|
||||
"cost": dashStats.TodayCost,
|
||||
"actual_cost": dashStats.TodayActualCost,
|
||||
},
|
||||
"total": gin.H{
|
||||
"requests": dashStats.TotalRequests,
|
||||
"input_tokens": dashStats.TotalInputTokens,
|
||||
"output_tokens": dashStats.TotalOutputTokens,
|
||||
"cache_creation_tokens": dashStats.TotalCacheCreationTokens,
|
||||
"cache_read_tokens": dashStats.TotalCacheReadTokens,
|
||||
"total_tokens": dashStats.TotalTokens,
|
||||
"cost": dashStats.TotalCost,
|
||||
"actual_cost": dashStats.TotalActualCost,
|
||||
},
|
||||
"average_duration_ms": dashStats.AverageDurationMs,
|
||||
"rpm": dashStats.Rpm,
|
||||
"tpm": dashStats.Tpm,
|
||||
}
|
||||
}
|
||||
|
||||
// usageQuotaLimited 处理 quota_limited 模式的响应
|
||||
func (h *GatewayHandler) usageQuotaLimited(c *gin.Context, ctx context.Context, apiKey *service.APIKey, usageData gin.H, modelStats any) {
|
||||
resp := gin.H{
|
||||
"mode": "quota_limited",
|
||||
"isValid": apiKey.Status == service.StatusAPIKeyActive || apiKey.Status == service.StatusAPIKeyQuotaExhausted || apiKey.Status == service.StatusAPIKeyExpired,
|
||||
"status": apiKey.Status,
|
||||
}
|
||||
|
||||
// 总额度信息
|
||||
if apiKey.Quota > 0 {
|
||||
remaining := apiKey.GetQuotaRemaining()
|
||||
resp["quota"] = gin.H{
|
||||
"limit": apiKey.Quota,
|
||||
"used": apiKey.QuotaUsed,
|
||||
"remaining": remaining,
|
||||
"unit": "USD",
|
||||
}
|
||||
resp["remaining"] = remaining
|
||||
resp["unit"] = "USD"
|
||||
}
|
||||
|
||||
// 速率限制信息(从 DB 获取实时用量)
|
||||
if apiKey.HasRateLimits() && h.apiKeyService != nil {
|
||||
rateLimitData, err := h.apiKeyService.GetRateLimitData(ctx, apiKey.ID)
|
||||
if err == nil && rateLimitData != nil {
|
||||
var rateLimits []gin.H
|
||||
if apiKey.RateLimit5h > 0 {
|
||||
used := rateLimitData.Usage5h
|
||||
rateLimits = append(rateLimits, gin.H{
|
||||
"window": "5h",
|
||||
"limit": apiKey.RateLimit5h,
|
||||
"used": used,
|
||||
"remaining": max(0, apiKey.RateLimit5h-used),
|
||||
"window_start": rateLimitData.Window5hStart,
|
||||
})
|
||||
}
|
||||
if apiKey.RateLimit1d > 0 {
|
||||
used := rateLimitData.Usage1d
|
||||
rateLimits = append(rateLimits, gin.H{
|
||||
"window": "1d",
|
||||
"limit": apiKey.RateLimit1d,
|
||||
"used": used,
|
||||
"remaining": max(0, apiKey.RateLimit1d-used),
|
||||
"window_start": rateLimitData.Window1dStart,
|
||||
})
|
||||
}
|
||||
if apiKey.RateLimit7d > 0 {
|
||||
used := rateLimitData.Usage7d
|
||||
rateLimits = append(rateLimits, gin.H{
|
||||
"window": "7d",
|
||||
"limit": apiKey.RateLimit7d,
|
||||
"used": used,
|
||||
"remaining": max(0, apiKey.RateLimit7d-used),
|
||||
"window_start": rateLimitData.Window7dStart,
|
||||
})
|
||||
}
|
||||
if len(rateLimits) > 0 {
|
||||
resp["rate_limits"] = rateLimits
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 订阅模式:返回订阅限额信息 + 用量统计
|
||||
// 过期时间
|
||||
if apiKey.ExpiresAt != nil {
|
||||
resp["expires_at"] = apiKey.ExpiresAt
|
||||
resp["days_until_expiry"] = apiKey.GetDaysUntilExpiry()
|
||||
}
|
||||
|
||||
if usageData != nil {
|
||||
resp["usage"] = usageData
|
||||
}
|
||||
if modelStats != nil {
|
||||
resp["model_stats"] = modelStats
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, resp)
|
||||
}
|
||||
|
||||
// usageUnrestricted 处理 unrestricted 模式的响应(向后兼容)
|
||||
func (h *GatewayHandler) usageUnrestricted(c *gin.Context, ctx context.Context, apiKey *service.APIKey, subject middleware2.AuthSubject, usageData gin.H, modelStats any) {
|
||||
// 订阅模式
|
||||
if apiKey.Group != nil && apiKey.Group.IsSubscriptionType() {
|
||||
subscription, ok := middleware2.GetSubscriptionFromContext(c)
|
||||
if !ok {
|
||||
h.errorResponse(c, http.StatusForbidden, "subscription_error", "No active subscription")
|
||||
return
|
||||
resp := gin.H{
|
||||
"mode": "unrestricted",
|
||||
"isValid": true,
|
||||
"planName": apiKey.Group.Name,
|
||||
"unit": "USD",
|
||||
}
|
||||
|
||||
remaining := h.calculateSubscriptionRemaining(apiKey.Group, subscription)
|
||||
resp := gin.H{
|
||||
"isValid": true,
|
||||
"planName": apiKey.Group.Name,
|
||||
"remaining": remaining,
|
||||
"unit": "USD",
|
||||
"subscription": gin.H{
|
||||
// 订阅信息可能不在 context 中(/v1/usage 路径跳过了中间件的计费检查)
|
||||
subscription, ok := middleware2.GetSubscriptionFromContext(c)
|
||||
if ok {
|
||||
remaining := h.calculateSubscriptionRemaining(apiKey.Group, subscription)
|
||||
resp["remaining"] = remaining
|
||||
resp["subscription"] = gin.H{
|
||||
"daily_usage_usd": subscription.DailyUsageUSD,
|
||||
"weekly_usage_usd": subscription.WeeklyUsageUSD,
|
||||
"monthly_usage_usd": subscription.MonthlyUsageUSD,
|
||||
@@ -813,23 +1049,28 @@ func (h *GatewayHandler) Usage(c *gin.Context) {
|
||||
"weekly_limit_usd": apiKey.Group.WeeklyLimitUSD,
|
||||
"monthly_limit_usd": apiKey.Group.MonthlyLimitUSD,
|
||||
"expires_at": subscription.ExpiresAt,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if usageData != nil {
|
||||
resp["usage"] = usageData
|
||||
}
|
||||
if modelStats != nil {
|
||||
resp["model_stats"] = modelStats
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
return
|
||||
}
|
||||
|
||||
// 余额模式:返回钱包余额 + 用量统计
|
||||
latestUser, err := h.userService.GetByID(c.Request.Context(), subject.UserID)
|
||||
// 余额模式
|
||||
latestUser, err := h.userService.GetByID(ctx, subject.UserID)
|
||||
if err != nil {
|
||||
h.errorResponse(c, http.StatusInternalServerError, "api_error", "Failed to get user info")
|
||||
return
|
||||
}
|
||||
|
||||
resp := gin.H{
|
||||
"mode": "unrestricted",
|
||||
"isValid": true,
|
||||
"planName": "钱包余额",
|
||||
"remaining": latestUser.Balance,
|
||||
@@ -839,6 +1080,9 @@ func (h *GatewayHandler) Usage(c *gin.Context) {
|
||||
if usageData != nil {
|
||||
resp["usage"] = usageData
|
||||
}
|
||||
if modelStats != nil {
|
||||
resp["model_stats"] = modelStats
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
}
|
||||
|
||||
@@ -959,20 +1203,8 @@ func (h *GatewayHandler) handleStreamingAwareError(c *gin.Context, status int, e
|
||||
// Stream already started, send error as SSE event then close
|
||||
flusher, ok := c.Writer.(http.Flusher)
|
||||
if ok {
|
||||
// Send error event in SSE format with proper JSON marshaling
|
||||
errorData := map[string]any{
|
||||
"type": "error",
|
||||
"error": map[string]string{
|
||||
"type": errType,
|
||||
"message": message,
|
||||
},
|
||||
}
|
||||
jsonBytes, err := json.Marshal(errorData)
|
||||
if err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
errorEvent := fmt.Sprintf("data: %s\n\n", string(jsonBytes))
|
||||
// SSE 错误事件固定 schema,使用 Quote 直拼可避免额外 Marshal 分配。
|
||||
errorEvent := `data: {"type":"error","error":{"type":` + strconv.Quote(errType) + `,"message":` + strconv.Quote(message) + `}}` + "\n\n"
|
||||
if _, err := fmt.Fprint(c.Writer, errorEvent); err != nil {
|
||||
_ = c.Error(err)
|
||||
}
|
||||
@@ -994,6 +1226,41 @@ func (h *GatewayHandler) ensureForwardErrorResponse(c *gin.Context, streamStarte
|
||||
return true
|
||||
}
|
||||
|
||||
// checkClaudeCodeVersion 检查 Claude Code 客户端版本是否满足最低要求
|
||||
// 仅对已识别的 Claude Code 客户端执行,count_tokens 路径除外
|
||||
func (h *GatewayHandler) checkClaudeCodeVersion(c *gin.Context) bool {
|
||||
ctx := c.Request.Context()
|
||||
if !service.IsClaudeCodeClient(ctx) {
|
||||
return true
|
||||
}
|
||||
|
||||
// 排除 count_tokens 子路径
|
||||
if strings.HasSuffix(c.Request.URL.Path, "/count_tokens") {
|
||||
return true
|
||||
}
|
||||
|
||||
minVersion := h.settingService.GetMinClaudeCodeVersion(ctx)
|
||||
if minVersion == "" {
|
||||
return true // 未设置,不检查
|
||||
}
|
||||
|
||||
clientVersion := service.GetClaudeCodeVersion(ctx)
|
||||
if clientVersion == "" {
|
||||
h.errorResponse(c, http.StatusBadRequest, "invalid_request_error",
|
||||
"Unable to determine Claude Code version. Please update Claude Code: npm update -g @anthropic-ai/claude-code")
|
||||
return false
|
||||
}
|
||||
|
||||
if service.CompareVersions(clientVersion, minVersion) < 0 {
|
||||
h.errorResponse(c, http.StatusBadRequest, "invalid_request_error",
|
||||
fmt.Sprintf("Your Claude Code version (%s) is below the minimum required version (%s). Please update: npm update -g @anthropic-ai/claude-code",
|
||||
clientVersion, minVersion))
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// errorResponse 返回Claude API格式的错误响应
|
||||
func (h *GatewayHandler) errorResponse(c *gin.Context, status int, errType, message string) {
|
||||
c.JSON(status, gin.H{
|
||||
@@ -1027,9 +1294,10 @@ func (h *GatewayHandler) CountTokens(c *gin.Context) {
|
||||
zap.Int64("api_key_id", apiKey.ID),
|
||||
zap.Any("group_id", apiKey.GroupID),
|
||||
)
|
||||
defer h.maybeLogCompatibilityFallbackMetrics(reqLog)
|
||||
|
||||
// 读取请求体
|
||||
body, err := io.ReadAll(c.Request.Body)
|
||||
body, err := pkghttputil.ReadRequestBodyWithPrealloc(c.Request)
|
||||
if err != nil {
|
||||
if maxErr, ok := extractMaxBytesError(err); ok {
|
||||
h.errorResponse(c, http.StatusRequestEntityTooLarge, "invalid_request_error", buildBodyTooLargeMessage(maxErr.Limit))
|
||||
@@ -1044,9 +1312,6 @@ func (h *GatewayHandler) CountTokens(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
// 检查是否为 Claude Code 客户端,设置到 context 中
|
||||
SetClaudeCodeClientContext(c, body)
|
||||
|
||||
setOpsRequestContext(c, "", false, body)
|
||||
|
||||
parsedReq, err := service.ParseGatewayRequest(body, domain.PlatformAnthropic)
|
||||
@@ -1054,9 +1319,11 @@ func (h *GatewayHandler) CountTokens(c *gin.Context) {
|
||||
h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "Failed to parse request body")
|
||||
return
|
||||
}
|
||||
// count_tokens 走 messages 严格校验时,复用已解析请求,避免二次反序列化。
|
||||
SetClaudeCodeClientContext(c, body, parsedReq)
|
||||
reqLog = reqLog.With(zap.String("model", parsedReq.Model), zap.Bool("stream", parsedReq.Stream))
|
||||
// 在请求上下文中记录 thinking 状态,供 Antigravity 最终模型 key 推导/模型维度限流使用
|
||||
c.Request = c.Request.WithContext(context.WithValue(c.Request.Context(), ctxkey.ThinkingEnabled, parsedReq.ThinkingEnabled))
|
||||
c.Request = c.Request.WithContext(service.WithThinkingEnabled(c.Request.Context(), parsedReq.ThinkingEnabled, h.metadataBridgeEnabled()))
|
||||
|
||||
// 验证 model 必填
|
||||
if parsedReq.Model == "" {
|
||||
@@ -1220,24 +1487,8 @@ func sendMockInterceptStream(c *gin.Context, model string, interceptType Interce
|
||||
textDeltas = []string{"New", " Conversation"}
|
||||
}
|
||||
|
||||
// Build message_start event with proper JSON marshaling
|
||||
messageStart := map[string]any{
|
||||
"type": "message_start",
|
||||
"message": map[string]any{
|
||||
"id": msgID,
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"model": model,
|
||||
"content": []any{},
|
||||
"stop_reason": nil,
|
||||
"stop_sequence": nil,
|
||||
"usage": map[string]int{
|
||||
"input_tokens": 10,
|
||||
"output_tokens": 0,
|
||||
},
|
||||
},
|
||||
}
|
||||
messageStartJSON, _ := json.Marshal(messageStart)
|
||||
// Build message_start event with fixed schema.
|
||||
messageStartJSON := `{"type":"message_start","message":{"id":` + strconv.Quote(msgID) + `,"type":"message","role":"assistant","model":` + strconv.Quote(model) + `,"content":[],"stop_reason":null,"stop_sequence":null,"usage":{"input_tokens":10,"output_tokens":0}}}`
|
||||
|
||||
// Build events
|
||||
events := []string{
|
||||
@@ -1247,31 +1498,12 @@ func sendMockInterceptStream(c *gin.Context, model string, interceptType Interce
|
||||
|
||||
// Add text deltas
|
||||
for _, text := range textDeltas {
|
||||
delta := map[string]any{
|
||||
"type": "content_block_delta",
|
||||
"index": 0,
|
||||
"delta": map[string]string{
|
||||
"type": "text_delta",
|
||||
"text": text,
|
||||
},
|
||||
}
|
||||
deltaJSON, _ := json.Marshal(delta)
|
||||
deltaJSON := `{"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":` + strconv.Quote(text) + `}}`
|
||||
events = append(events, `event: content_block_delta`+"\n"+`data: `+string(deltaJSON))
|
||||
}
|
||||
|
||||
// Add final events
|
||||
messageDelta := map[string]any{
|
||||
"type": "message_delta",
|
||||
"delta": map[string]any{
|
||||
"stop_reason": "end_turn",
|
||||
"stop_sequence": nil,
|
||||
},
|
||||
"usage": map[string]int{
|
||||
"input_tokens": 10,
|
||||
"output_tokens": outputTokens,
|
||||
},
|
||||
}
|
||||
messageDeltaJSON, _ := json.Marshal(messageDelta)
|
||||
messageDeltaJSON := `{"type":"message_delta","delta":{"stop_reason":"end_turn","stop_sequence":null},"usage":{"input_tokens":10,"output_tokens":` + strconv.Itoa(outputTokens) + `}}`
|
||||
|
||||
events = append(events,
|
||||
`event: content_block_stop`+"\n"+`data: {"index":0,"type":"content_block_stop"}`,
|
||||
@@ -1358,6 +1590,18 @@ func billingErrorDetails(err error) (status int, code, message string) {
|
||||
}
|
||||
return http.StatusServiceUnavailable, "billing_service_error", msg
|
||||
}
|
||||
if errors.Is(err, service.ErrAPIKeyRateLimit5hExceeded) {
|
||||
msg := pkgerrors.Message(err)
|
||||
return http.StatusTooManyRequests, "rate_limit_exceeded", msg
|
||||
}
|
||||
if errors.Is(err, service.ErrAPIKeyRateLimit1dExceeded) {
|
||||
msg := pkgerrors.Message(err)
|
||||
return http.StatusTooManyRequests, "rate_limit_exceeded", msg
|
||||
}
|
||||
if errors.Is(err, service.ErrAPIKeyRateLimit7dExceeded) {
|
||||
msg := pkgerrors.Message(err)
|
||||
return http.StatusTooManyRequests, "rate_limit_exceeded", msg
|
||||
}
|
||||
msg := pkgerrors.Message(err)
|
||||
if msg == "" {
|
||||
logger.L().With(
|
||||
@@ -1369,6 +1613,30 @@ func billingErrorDetails(err error) (status int, code, message string) {
|
||||
return http.StatusForbidden, "billing_error", msg
|
||||
}
|
||||
|
||||
func (h *GatewayHandler) metadataBridgeEnabled() bool {
|
||||
if h == nil || h.cfg == nil {
|
||||
return true
|
||||
}
|
||||
return h.cfg.Gateway.OpenAIWS.MetadataBridgeEnabled
|
||||
}
|
||||
|
||||
func (h *GatewayHandler) maybeLogCompatibilityFallbackMetrics(reqLog *zap.Logger) {
|
||||
if reqLog == nil {
|
||||
return
|
||||
}
|
||||
if gatewayCompatibilityMetricsLogCounter.Add(1)%gatewayCompatibilityMetricsLogInterval != 0 {
|
||||
return
|
||||
}
|
||||
metrics := service.SnapshotOpenAICompatibilityFallbackMetrics()
|
||||
reqLog.Info("gateway.compatibility_fallback_metrics",
|
||||
zap.Int64("session_hash_legacy_read_fallback_total", metrics.SessionHashLegacyReadFallbackTotal),
|
||||
zap.Int64("session_hash_legacy_read_fallback_hit", metrics.SessionHashLegacyReadFallbackHit),
|
||||
zap.Int64("session_hash_legacy_dual_write_total", metrics.SessionHashLegacyDualWriteTotal),
|
||||
zap.Float64("session_hash_legacy_read_hit_rate", metrics.SessionHashLegacyReadHitRate),
|
||||
zap.Int64("metadata_legacy_fallback_total", metrics.MetadataLegacyFallbackTotal),
|
||||
)
|
||||
}
|
||||
|
||||
func (h *GatewayHandler) submitUsageRecordTask(task service.UsageRecordTask) {
|
||||
if task == nil {
|
||||
return
|
||||
@@ -1380,5 +1648,34 @@ func (h *GatewayHandler) submitUsageRecordTask(task service.UsageRecordTask) {
|
||||
// 回退路径:worker 池未注入时同步执行,避免退回到无界 goroutine 模式。
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
defer func() {
|
||||
if recovered := recover(); recovered != nil {
|
||||
logger.L().With(
|
||||
zap.String("component", "handler.gateway.messages"),
|
||||
zap.Any("panic", recovered),
|
||||
).Error("gateway.usage_record_task_panic_recovered")
|
||||
}
|
||||
}()
|
||||
task(ctx)
|
||||
}
|
||||
|
||||
// getUserMsgQueueMode 获取当前请求的 UMQ 模式
|
||||
// 返回 "serialize" | "throttle" | ""
|
||||
func (h *GatewayHandler) getUserMsgQueueMode(account *service.Account, parsed *service.ParsedRequest) string {
|
||||
if h.userMsgQueueHelper == nil {
|
||||
return ""
|
||||
}
|
||||
// 仅适用于 Anthropic OAuth/SetupToken 账号
|
||||
if !account.IsAnthropicOAuthOrSetupToken() {
|
||||
return ""
|
||||
}
|
||||
if !service.IsRealUserMessage(parsed) {
|
||||
return ""
|
||||
}
|
||||
// 账号级模式优先,fallback 到全局配置
|
||||
mode := account.GetUserMsgQueueMode()
|
||||
if mode == "" {
|
||||
mode = h.cfg.Gateway.UserMessageQueue.GetEffectiveMode()
|
||||
}
|
||||
return mode
|
||||
}
|
||||
|
||||
@@ -119,6 +119,13 @@ func (f *fakeConcurrencyCache) GetAccountsLoadBatch(context.Context, []service.A
|
||||
func (f *fakeConcurrencyCache) GetUsersLoadBatch(context.Context, []service.UserWithConcurrency) (map[int64]*service.UserLoadInfo, error) {
|
||||
return map[int64]*service.UserLoadInfo{}, nil
|
||||
}
|
||||
func (f *fakeConcurrencyCache) GetAccountConcurrencyBatch(_ context.Context, accountIDs []int64) (map[int64]int, error) {
|
||||
result := make(map[int64]int, len(accountIDs))
|
||||
for _, id := range accountIDs {
|
||||
result[id] = 0
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
func (f *fakeConcurrencyCache) CleanupExpiredAccountSlots(context.Context, int64) error { return nil }
|
||||
|
||||
func newTestGatewayHandler(t *testing.T, group *service.Group, accounts []*service.Account) (*GatewayHandler, func()) {
|
||||
@@ -146,12 +153,13 @@ func newTestGatewayHandler(t *testing.T, group *service.Group, accounts []*servi
|
||||
nil, // deferredService
|
||||
nil, // claudeTokenProvider
|
||||
nil, // sessionLimitCache
|
||||
nil, // rpmCache
|
||||
nil, // digestStore
|
||||
)
|
||||
|
||||
// RunModeSimple:跳过计费检查,避免引入 repo/cache 依赖。
|
||||
cfg := &config.Config{RunMode: config.RunModeSimple}
|
||||
billingCacheSvc := service.NewBillingCacheService(nil, nil, nil, cfg)
|
||||
billingCacheSvc := service.NewBillingCacheService(nil, nil, nil, nil, cfg)
|
||||
|
||||
concurrencySvc := service.NewConcurrencyService(&fakeConcurrencyCache{})
|
||||
concurrencyHelper := NewConcurrencyHelper(concurrencySvc, SSEPingFormatClaude, 0)
|
||||
|
||||
@@ -18,14 +18,21 @@ import (
|
||||
// claudeCodeValidator is a singleton validator for Claude Code client detection
|
||||
var claudeCodeValidator = service.NewClaudeCodeValidator()
|
||||
|
||||
const claudeCodeParsedRequestContextKey = "claude_code_parsed_request"
|
||||
|
||||
// SetClaudeCodeClientContext 检查请求是否来自 Claude Code 客户端,并设置到 context 中
|
||||
// 返回更新后的 context
|
||||
func SetClaudeCodeClientContext(c *gin.Context, body []byte) {
|
||||
func SetClaudeCodeClientContext(c *gin.Context, body []byte, parsedReq *service.ParsedRequest) {
|
||||
if c == nil || c.Request == nil {
|
||||
return
|
||||
}
|
||||
if parsedReq != nil {
|
||||
c.Set(claudeCodeParsedRequestContextKey, parsedReq)
|
||||
}
|
||||
|
||||
ua := c.GetHeader("User-Agent")
|
||||
// Fast path:非 Claude CLI UA 直接判定 false,避免热路径二次 JSON 反序列化。
|
||||
if !claudeCodeValidator.ValidateUserAgent(c.GetHeader("User-Agent")) {
|
||||
if !claudeCodeValidator.ValidateUserAgent(ua) {
|
||||
ctx := service.SetClaudeCodeClient(c.Request.Context(), false)
|
||||
c.Request = c.Request.WithContext(ctx)
|
||||
return
|
||||
@@ -37,8 +44,11 @@ func SetClaudeCodeClientContext(c *gin.Context, body []byte) {
|
||||
isClaudeCode = true
|
||||
} else {
|
||||
// 仅在确认为 Claude CLI 且 messages 路径时再做 body 解析。
|
||||
var bodyMap map[string]any
|
||||
if len(body) > 0 {
|
||||
bodyMap := claudeCodeBodyMapFromParsedRequest(parsedReq)
|
||||
if bodyMap == nil {
|
||||
bodyMap = claudeCodeBodyMapFromContextCache(c)
|
||||
}
|
||||
if bodyMap == nil && len(body) > 0 {
|
||||
_ = json.Unmarshal(body, &bodyMap)
|
||||
}
|
||||
isClaudeCode = claudeCodeValidator.Validate(c.Request, bodyMap)
|
||||
@@ -46,9 +56,53 @@ func SetClaudeCodeClientContext(c *gin.Context, body []byte) {
|
||||
|
||||
// 更新 request context
|
||||
ctx := service.SetClaudeCodeClient(c.Request.Context(), isClaudeCode)
|
||||
|
||||
// 仅在确认为 Claude Code 客户端时提取版本号写入 context
|
||||
if isClaudeCode {
|
||||
if version := claudeCodeValidator.ExtractVersion(ua); version != "" {
|
||||
ctx = service.SetClaudeCodeVersion(ctx, version)
|
||||
}
|
||||
}
|
||||
|
||||
c.Request = c.Request.WithContext(ctx)
|
||||
}
|
||||
|
||||
func claudeCodeBodyMapFromParsedRequest(parsedReq *service.ParsedRequest) map[string]any {
|
||||
if parsedReq == nil {
|
||||
return nil
|
||||
}
|
||||
bodyMap := map[string]any{
|
||||
"model": parsedReq.Model,
|
||||
}
|
||||
if parsedReq.System != nil || parsedReq.HasSystem {
|
||||
bodyMap["system"] = parsedReq.System
|
||||
}
|
||||
if parsedReq.MetadataUserID != "" {
|
||||
bodyMap["metadata"] = map[string]any{"user_id": parsedReq.MetadataUserID}
|
||||
}
|
||||
return bodyMap
|
||||
}
|
||||
|
||||
func claudeCodeBodyMapFromContextCache(c *gin.Context) map[string]any {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
if cached, ok := c.Get(service.OpenAIParsedRequestBodyKey); ok {
|
||||
if bodyMap, ok := cached.(map[string]any); ok {
|
||||
return bodyMap
|
||||
}
|
||||
}
|
||||
if cached, ok := c.Get(claudeCodeParsedRequestContextKey); ok {
|
||||
switch v := cached.(type) {
|
||||
case *service.ParsedRequest:
|
||||
return claudeCodeBodyMapFromParsedRequest(v)
|
||||
case service.ParsedRequest:
|
||||
return claudeCodeBodyMapFromParsedRequest(&v)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// 并发槽位等待相关常量
|
||||
//
|
||||
// 性能优化说明:
|
||||
|
||||
@@ -33,6 +33,14 @@ func (m *concurrencyCacheMock) GetAccountConcurrency(ctx context.Context, accoun
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (m *concurrencyCacheMock) GetAccountConcurrencyBatch(ctx context.Context, accountIDs []int64) (map[int64]int, error) {
|
||||
result := make(map[int64]int, len(accountIDs))
|
||||
for _, accountID := range accountIDs {
|
||||
result[accountID] = 0
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (m *concurrencyCacheMock) IncrementAccountWaitCount(ctx context.Context, accountID int64, maxWait int) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -49,6 +49,14 @@ func (s *helperConcurrencyCacheStub) GetAccountConcurrency(ctx context.Context,
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (s *helperConcurrencyCacheStub) GetAccountConcurrencyBatch(ctx context.Context, accountIDs []int64) (map[int64]int, error) {
|
||||
out := make(map[int64]int, len(accountIDs))
|
||||
for _, accountID := range accountIDs {
|
||||
out[accountID] = 0
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (s *helperConcurrencyCacheStub) IncrementAccountWaitCount(ctx context.Context, accountID int64, maxWait int) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
@@ -133,7 +141,7 @@ func TestSetClaudeCodeClientContext_FastPathAndStrictPath(t *testing.T) {
|
||||
c, _ := newHelperTestContext(http.MethodPost, "/v1/messages")
|
||||
c.Request.Header.Set("User-Agent", "curl/8.6.0")
|
||||
|
||||
SetClaudeCodeClientContext(c, validClaudeCodeBodyJSON())
|
||||
SetClaudeCodeClientContext(c, validClaudeCodeBodyJSON(), nil)
|
||||
require.False(t, service.IsClaudeCodeClient(c.Request.Context()))
|
||||
})
|
||||
|
||||
@@ -141,7 +149,7 @@ func TestSetClaudeCodeClientContext_FastPathAndStrictPath(t *testing.T) {
|
||||
c, _ := newHelperTestContext(http.MethodGet, "/v1/models")
|
||||
c.Request.Header.Set("User-Agent", "claude-cli/1.0.1")
|
||||
|
||||
SetClaudeCodeClientContext(c, nil)
|
||||
SetClaudeCodeClientContext(c, nil, nil)
|
||||
require.True(t, service.IsClaudeCodeClient(c.Request.Context()))
|
||||
})
|
||||
|
||||
@@ -152,7 +160,7 @@ func TestSetClaudeCodeClientContext_FastPathAndStrictPath(t *testing.T) {
|
||||
c.Request.Header.Set("anthropic-beta", "message-batches-2024-09-24")
|
||||
c.Request.Header.Set("anthropic-version", "2023-06-01")
|
||||
|
||||
SetClaudeCodeClientContext(c, validClaudeCodeBodyJSON())
|
||||
SetClaudeCodeClientContext(c, validClaudeCodeBodyJSON(), nil)
|
||||
require.True(t, service.IsClaudeCodeClient(c.Request.Context()))
|
||||
})
|
||||
|
||||
@@ -160,11 +168,51 @@ func TestSetClaudeCodeClientContext_FastPathAndStrictPath(t *testing.T) {
|
||||
c, _ := newHelperTestContext(http.MethodPost, "/v1/messages")
|
||||
c.Request.Header.Set("User-Agent", "claude-cli/1.0.1")
|
||||
// 缺少严格校验所需 header + body 字段
|
||||
SetClaudeCodeClientContext(c, []byte(`{"model":"x"}`))
|
||||
SetClaudeCodeClientContext(c, []byte(`{"model":"x"}`), nil)
|
||||
require.False(t, service.IsClaudeCodeClient(c.Request.Context()))
|
||||
})
|
||||
}
|
||||
|
||||
func TestSetClaudeCodeClientContext_ReuseParsedRequestAndContextCache(t *testing.T) {
|
||||
t.Run("reuse parsed request without body unmarshal", func(t *testing.T) {
|
||||
c, _ := newHelperTestContext(http.MethodPost, "/v1/messages")
|
||||
c.Request.Header.Set("User-Agent", "claude-cli/1.0.1")
|
||||
c.Request.Header.Set("X-App", "claude-code")
|
||||
c.Request.Header.Set("anthropic-beta", "message-batches-2024-09-24")
|
||||
c.Request.Header.Set("anthropic-version", "2023-06-01")
|
||||
|
||||
parsedReq := &service.ParsedRequest{
|
||||
Model: "claude-3-5-sonnet-20241022",
|
||||
System: []any{
|
||||
map[string]any{"text": "You are Claude Code, Anthropic's official CLI for Claude."},
|
||||
},
|
||||
MetadataUserID: "user_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa_account__session_abc-123",
|
||||
}
|
||||
|
||||
// body 非法 JSON,如果函数复用 parsedReq 成功则仍应判定为 Claude Code。
|
||||
SetClaudeCodeClientContext(c, []byte(`{invalid`), parsedReq)
|
||||
require.True(t, service.IsClaudeCodeClient(c.Request.Context()))
|
||||
})
|
||||
|
||||
t.Run("reuse context cache without body unmarshal", func(t *testing.T) {
|
||||
c, _ := newHelperTestContext(http.MethodPost, "/v1/messages")
|
||||
c.Request.Header.Set("User-Agent", "claude-cli/1.0.1")
|
||||
c.Request.Header.Set("X-App", "claude-code")
|
||||
c.Request.Header.Set("anthropic-beta", "message-batches-2024-09-24")
|
||||
c.Request.Header.Set("anthropic-version", "2023-06-01")
|
||||
c.Set(service.OpenAIParsedRequestBodyKey, map[string]any{
|
||||
"model": "claude-3-5-sonnet-20241022",
|
||||
"system": []any{
|
||||
map[string]any{"text": "You are Claude Code, Anthropic's official CLI for Claude."},
|
||||
},
|
||||
"metadata": map[string]any{"user_id": "user_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa_account__session_abc-123"},
|
||||
})
|
||||
|
||||
SetClaudeCodeClientContext(c, []byte(`{invalid`), nil)
|
||||
require.True(t, service.IsClaudeCodeClient(c.Request.Context()))
|
||||
})
|
||||
}
|
||||
|
||||
func TestWaitForSlotWithPingTimeout_AccountAndUserAcquire(t *testing.T) {
|
||||
cache := &helperConcurrencyCacheStub{
|
||||
accountSeq: []bool{false, true},
|
||||
|
||||
@@ -7,16 +7,15 @@ import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/Wei-Shaw/sub2api/internal/domain"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/antigravity"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/gemini"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/googleapi"
|
||||
pkghttputil "github.com/Wei-Shaw/sub2api/internal/pkg/httputil"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/ip"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/logger"
|
||||
"github.com/Wei-Shaw/sub2api/internal/server/middleware"
|
||||
@@ -168,7 +167,7 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) {
|
||||
stream := action == "streamGenerateContent"
|
||||
reqLog = reqLog.With(zap.String("model", modelName), zap.String("action", action), zap.Bool("stream", stream))
|
||||
|
||||
body, err := io.ReadAll(c.Request.Body)
|
||||
body, err := pkghttputil.ReadRequestBodyWithPrealloc(c.Request)
|
||||
if err != nil {
|
||||
if maxErr, ok := extractMaxBytesError(err); ok {
|
||||
googleError(c, http.StatusRequestEntityTooLarge, buildBodyTooLargeMessage(maxErr.Limit))
|
||||
@@ -268,8 +267,7 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) {
|
||||
if apiKey.GroupID != nil {
|
||||
prefetchedGroupID = *apiKey.GroupID
|
||||
}
|
||||
ctx := context.WithValue(c.Request.Context(), ctxkey.PrefetchedStickyAccountID, sessionBoundAccountID)
|
||||
ctx = context.WithValue(ctx, ctxkey.PrefetchedStickyGroupID, prefetchedGroupID)
|
||||
ctx := service.WithPrefetchedStickySession(c.Request.Context(), sessionBoundAccountID, prefetchedGroupID, h.metadataBridgeEnabled())
|
||||
c.Request = c.Request.WithContext(ctx)
|
||||
}
|
||||
}
|
||||
@@ -349,7 +347,7 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) {
|
||||
// 单账号分组提前设置 SingleAccountRetry 标记,让 Service 层首次 503 就不设模型限流标记。
|
||||
// 避免单账号分组收到 503 (MODEL_CAPACITY_EXHAUSTED) 时设 29s 限流,导致后续请求连续快速失败。
|
||||
if h.gatewayService.IsSingleAntigravityAccountGroup(c.Request.Context(), apiKey.GroupID) {
|
||||
ctx := context.WithValue(c.Request.Context(), ctxkey.SingleAccountRetry, true)
|
||||
ctx := service.WithSingleAccountRetry(c.Request.Context(), true, h.metadataBridgeEnabled())
|
||||
c.Request = c.Request.WithContext(ctx)
|
||||
}
|
||||
|
||||
@@ -363,7 +361,7 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) {
|
||||
action := fs.HandleSelectionExhausted(c.Request.Context())
|
||||
switch action {
|
||||
case FailoverContinue:
|
||||
ctx := context.WithValue(c.Request.Context(), ctxkey.SingleAccountRetry, true)
|
||||
ctx := service.WithSingleAccountRetry(c.Request.Context(), true, h.metadataBridgeEnabled())
|
||||
c.Request = c.Request.WithContext(ctx)
|
||||
continue
|
||||
case FailoverCanceled:
|
||||
@@ -456,7 +454,7 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) {
|
||||
var result *service.ForwardResult
|
||||
requestCtx := c.Request.Context()
|
||||
if fs.SwitchCount > 0 {
|
||||
requestCtx = context.WithValue(requestCtx, ctxkey.AccountSwitchCount, fs.SwitchCount)
|
||||
requestCtx = service.WithAccountSwitchCount(requestCtx, fs.SwitchCount, h.metadataBridgeEnabled())
|
||||
}
|
||||
if account.Platform == service.PlatformAntigravity && account.Type != service.AccountTypeAPIKey {
|
||||
result, err = h.antigravityGatewayService.ForwardGemini(requestCtx, c, account, modelName, action, stream, body, hasBoundSession)
|
||||
|
||||
@@ -11,6 +11,7 @@ type AdminHandlers struct {
|
||||
Group *admin.GroupHandler
|
||||
Account *admin.AccountHandler
|
||||
Announcement *admin.AnnouncementHandler
|
||||
DataManagement *admin.DataManagementHandler
|
||||
OAuth *admin.OAuthHandler
|
||||
OpenAIOAuth *admin.OpenAIOAuthHandler
|
||||
GeminiOAuth *admin.GeminiOAuthHandler
|
||||
@@ -25,6 +26,7 @@ type AdminHandlers struct {
|
||||
Usage *admin.UsageHandler
|
||||
UserAttribute *admin.UserAttributeHandler
|
||||
ErrorPassthrough *admin.ErrorPassthroughHandler
|
||||
APIKey *admin.AdminAPIKeyHandler
|
||||
}
|
||||
|
||||
// Handlers contains all HTTP handlers
|
||||
@@ -40,6 +42,7 @@ type Handlers struct {
|
||||
Gateway *GatewayHandler
|
||||
OpenAIGateway *OpenAIGatewayHandler
|
||||
SoraGateway *SoraGatewayHandler
|
||||
SoraClient *SoraClientHandler
|
||||
Setting *SettingHandler
|
||||
Totp *TotpHandler
|
||||
}
|
||||
|
||||
@@ -5,17 +5,20 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Wei-Shaw/sub2api/internal/config"
|
||||
pkghttputil "github.com/Wei-Shaw/sub2api/internal/pkg/httputil"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/ip"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/logger"
|
||||
middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
|
||||
coderws "github.com/coder/websocket"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/tidwall/gjson"
|
||||
"go.uber.org/zap"
|
||||
@@ -64,6 +67,11 @@ func NewOpenAIGatewayHandler(
|
||||
// Responses handles OpenAI Responses API endpoint
|
||||
// POST /openai/v1/responses
|
||||
func (h *OpenAIGatewayHandler) Responses(c *gin.Context) {
|
||||
// 局部兜底:确保该 handler 内部任何 panic 都不会击穿到进程级。
|
||||
streamStarted := false
|
||||
defer h.recoverResponsesPanic(c, &streamStarted)
|
||||
setOpenAIClientTransportHTTP(c)
|
||||
|
||||
requestStart := time.Now()
|
||||
|
||||
// Get apiKey and user from context (set by ApiKeyAuth middleware)
|
||||
@@ -85,9 +93,12 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) {
|
||||
zap.Int64("api_key_id", apiKey.ID),
|
||||
zap.Any("group_id", apiKey.GroupID),
|
||||
)
|
||||
if !h.ensureResponsesDependencies(c, reqLog) {
|
||||
return
|
||||
}
|
||||
|
||||
// Read request body
|
||||
body, err := io.ReadAll(c.Request.Body)
|
||||
body, err := pkghttputil.ReadRequestBodyWithPrealloc(c.Request)
|
||||
if err != nil {
|
||||
if maxErr, ok := extractMaxBytesError(err); ok {
|
||||
h.errorResponse(c, http.StatusRequestEntityTooLarge, "invalid_request_error", buildBodyTooLargeMessage(maxErr.Limit))
|
||||
@@ -125,43 +136,30 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) {
|
||||
}
|
||||
reqStream := streamResult.Bool()
|
||||
reqLog = reqLog.With(zap.String("model", reqModel), zap.Bool("stream", reqStream))
|
||||
previousResponseID := strings.TrimSpace(gjson.GetBytes(body, "previous_response_id").String())
|
||||
if previousResponseID != "" {
|
||||
previousResponseIDKind := service.ClassifyOpenAIPreviousResponseIDKind(previousResponseID)
|
||||
reqLog = reqLog.With(
|
||||
zap.Bool("has_previous_response_id", true),
|
||||
zap.String("previous_response_id_kind", previousResponseIDKind),
|
||||
zap.Int("previous_response_id_len", len(previousResponseID)),
|
||||
)
|
||||
if previousResponseIDKind == service.OpenAIPreviousResponseIDKindMessageID {
|
||||
reqLog.Warn("openai.request_validation_failed",
|
||||
zap.String("reason", "previous_response_id_looks_like_message_id"),
|
||||
)
|
||||
h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "previous_response_id must be a response.id (resp_*), not a message id")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
setOpsRequestContext(c, reqModel, reqStream, body)
|
||||
|
||||
// 提前校验 function_call_output 是否具备可关联上下文,避免上游 400。
|
||||
// 要求 previous_response_id,或 input 内存在带 call_id 的 tool_call/function_call,
|
||||
// 或带 id 且与 call_id 匹配的 item_reference。
|
||||
// 此路径需要遍历 input 数组做 call_id 关联检查,保留 Unmarshal
|
||||
if gjson.GetBytes(body, `input.#(type=="function_call_output")`).Exists() {
|
||||
var reqBody map[string]any
|
||||
if err := json.Unmarshal(body, &reqBody); err == nil {
|
||||
c.Set(service.OpenAIParsedRequestBodyKey, reqBody)
|
||||
if service.HasFunctionCallOutput(reqBody) {
|
||||
previousResponseID, _ := reqBody["previous_response_id"].(string)
|
||||
if strings.TrimSpace(previousResponseID) == "" && !service.HasToolCallContext(reqBody) {
|
||||
if service.HasFunctionCallOutputMissingCallID(reqBody) {
|
||||
reqLog.Warn("openai.request_validation_failed",
|
||||
zap.String("reason", "function_call_output_missing_call_id"),
|
||||
)
|
||||
h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "function_call_output requires call_id or previous_response_id; if relying on history, ensure store=true and reuse previous_response_id")
|
||||
return
|
||||
}
|
||||
callIDs := service.FunctionCallOutputCallIDs(reqBody)
|
||||
if !service.HasItemReferenceForCallIDs(reqBody, callIDs) {
|
||||
reqLog.Warn("openai.request_validation_failed",
|
||||
zap.String("reason", "function_call_output_missing_item_reference"),
|
||||
)
|
||||
h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "function_call_output requires item_reference ids matching each call_id, or previous_response_id/tool_call context; if relying on history, ensure store=true and reuse previous_response_id")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if !h.validateFunctionCallOutputRequest(c, body, reqLog) {
|
||||
return
|
||||
}
|
||||
|
||||
// Track if we've started streaming (for error handling)
|
||||
streamStarted := false
|
||||
|
||||
// 绑定错误透传服务,允许 service 层在非 failover 错误场景复用规则。
|
||||
if h.errorPassthroughService != nil {
|
||||
service.BindErrorPassthroughService(c, h.errorPassthroughService)
|
||||
@@ -173,51 +171,11 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) {
|
||||
service.SetOpsLatencyMs(c, service.OpsAuthLatencyMsKey, time.Since(requestStart).Milliseconds())
|
||||
routingStart := time.Now()
|
||||
|
||||
// 0. 先尝试直接抢占用户槽位(快速路径)
|
||||
userReleaseFunc, userAcquired, err := h.concurrencyHelper.TryAcquireUserSlot(c.Request.Context(), subject.UserID, subject.Concurrency)
|
||||
if err != nil {
|
||||
reqLog.Warn("openai.user_slot_acquire_failed", zap.Error(err))
|
||||
h.handleConcurrencyError(c, err, "user", streamStarted)
|
||||
userReleaseFunc, acquired := h.acquireResponsesUserSlot(c, subject.UserID, subject.Concurrency, reqStream, &streamStarted, reqLog)
|
||||
if !acquired {
|
||||
return
|
||||
}
|
||||
|
||||
waitCounted := false
|
||||
if !userAcquired {
|
||||
// 仅在抢槽失败时才进入等待队列,减少常态请求 Redis 写入。
|
||||
maxWait := service.CalculateMaxWait(subject.Concurrency)
|
||||
canWait, waitErr := h.concurrencyHelper.IncrementWaitCount(c.Request.Context(), subject.UserID, maxWait)
|
||||
if waitErr != nil {
|
||||
reqLog.Warn("openai.user_wait_counter_increment_failed", zap.Error(waitErr))
|
||||
// 按现有降级语义:等待计数异常时放行后续抢槽流程
|
||||
} else if !canWait {
|
||||
reqLog.Info("openai.user_wait_queue_full", zap.Int("max_wait", maxWait))
|
||||
h.errorResponse(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later")
|
||||
return
|
||||
}
|
||||
if waitErr == nil && canWait {
|
||||
waitCounted = true
|
||||
}
|
||||
defer func() {
|
||||
if waitCounted {
|
||||
h.concurrencyHelper.DecrementWaitCount(c.Request.Context(), subject.UserID)
|
||||
}
|
||||
}()
|
||||
|
||||
userReleaseFunc, err = h.concurrencyHelper.AcquireUserSlotWithWait(c, subject.UserID, subject.Concurrency, reqStream, &streamStarted)
|
||||
if err != nil {
|
||||
reqLog.Warn("openai.user_slot_acquire_failed_after_wait", zap.Error(err))
|
||||
h.handleConcurrencyError(c, err, "user", streamStarted)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// 用户槽位已获取:退出等待队列计数。
|
||||
if waitCounted {
|
||||
h.concurrencyHelper.DecrementWaitCount(c.Request.Context(), subject.UserID)
|
||||
waitCounted = false
|
||||
}
|
||||
// 确保请求取消时也会释放槽位,避免长连接被动中断造成泄漏
|
||||
userReleaseFunc = wrapReleaseOnDone(c.Request.Context(), userReleaseFunc)
|
||||
if userReleaseFunc != nil {
|
||||
defer userReleaseFunc()
|
||||
}
|
||||
@@ -241,7 +199,15 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) {
|
||||
for {
|
||||
// Select account supporting the requested model
|
||||
reqLog.Debug("openai.account_selecting", zap.Int("excluded_account_count", len(failedAccountIDs)))
|
||||
selection, err := h.gatewayService.SelectAccountWithLoadAwareness(c.Request.Context(), apiKey.GroupID, sessionHash, reqModel, failedAccountIDs)
|
||||
selection, scheduleDecision, err := h.gatewayService.SelectAccountWithScheduler(
|
||||
c.Request.Context(),
|
||||
apiKey.GroupID,
|
||||
previousResponseID,
|
||||
sessionHash,
|
||||
reqModel,
|
||||
failedAccountIDs,
|
||||
service.OpenAIUpstreamTransportAny,
|
||||
)
|
||||
if err != nil {
|
||||
reqLog.Warn("openai.account_select_failed",
|
||||
zap.Error(err),
|
||||
@@ -258,80 +224,30 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) {
|
||||
}
|
||||
return
|
||||
}
|
||||
if selection == nil || selection.Account == nil {
|
||||
h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts", streamStarted)
|
||||
return
|
||||
}
|
||||
if previousResponseID != "" && selection != nil && selection.Account != nil {
|
||||
reqLog.Debug("openai.account_selected_with_previous_response_id", zap.Int64("account_id", selection.Account.ID))
|
||||
}
|
||||
reqLog.Debug("openai.account_schedule_decision",
|
||||
zap.String("layer", scheduleDecision.Layer),
|
||||
zap.Bool("sticky_previous_hit", scheduleDecision.StickyPreviousHit),
|
||||
zap.Bool("sticky_session_hit", scheduleDecision.StickySessionHit),
|
||||
zap.Int("candidate_count", scheduleDecision.CandidateCount),
|
||||
zap.Int("top_k", scheduleDecision.TopK),
|
||||
zap.Int64("latency_ms", scheduleDecision.LatencyMs),
|
||||
zap.Float64("load_skew", scheduleDecision.LoadSkew),
|
||||
)
|
||||
account := selection.Account
|
||||
reqLog.Debug("openai.account_selected", zap.Int64("account_id", account.ID), zap.String("account_name", account.Name))
|
||||
setOpsSelectedAccount(c, account.ID, account.Platform)
|
||||
|
||||
// 3. Acquire account concurrency slot
|
||||
accountReleaseFunc := selection.ReleaseFunc
|
||||
if !selection.Acquired {
|
||||
if selection.WaitPlan == nil {
|
||||
h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts", streamStarted)
|
||||
return
|
||||
}
|
||||
|
||||
// 先快速尝试一次账号槽位,命中则跳过等待计数写入。
|
||||
fastReleaseFunc, fastAcquired, err := h.concurrencyHelper.TryAcquireAccountSlot(
|
||||
c.Request.Context(),
|
||||
account.ID,
|
||||
selection.WaitPlan.MaxConcurrency,
|
||||
)
|
||||
if err != nil {
|
||||
reqLog.Warn("openai.account_slot_quick_acquire_failed", zap.Int64("account_id", account.ID), zap.Error(err))
|
||||
h.handleConcurrencyError(c, err, "account", streamStarted)
|
||||
return
|
||||
}
|
||||
if fastAcquired {
|
||||
accountReleaseFunc = fastReleaseFunc
|
||||
if err := h.gatewayService.BindStickySession(c.Request.Context(), apiKey.GroupID, sessionHash, account.ID); err != nil {
|
||||
reqLog.Warn("openai.bind_sticky_session_failed", zap.Int64("account_id", account.ID), zap.Error(err))
|
||||
}
|
||||
} else {
|
||||
accountWaitCounted := false
|
||||
canWait, err := h.concurrencyHelper.IncrementAccountWaitCount(c.Request.Context(), account.ID, selection.WaitPlan.MaxWaiting)
|
||||
if err != nil {
|
||||
reqLog.Warn("openai.account_wait_counter_increment_failed", zap.Int64("account_id", account.ID), zap.Error(err))
|
||||
} else if !canWait {
|
||||
reqLog.Info("openai.account_wait_queue_full",
|
||||
zap.Int64("account_id", account.ID),
|
||||
zap.Int("max_waiting", selection.WaitPlan.MaxWaiting),
|
||||
)
|
||||
h.handleStreamingAwareError(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later", streamStarted)
|
||||
return
|
||||
}
|
||||
if err == nil && canWait {
|
||||
accountWaitCounted = true
|
||||
}
|
||||
releaseWait := func() {
|
||||
if accountWaitCounted {
|
||||
h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID)
|
||||
accountWaitCounted = false
|
||||
}
|
||||
}
|
||||
|
||||
accountReleaseFunc, err = h.concurrencyHelper.AcquireAccountSlotWithWaitTimeout(
|
||||
c,
|
||||
account.ID,
|
||||
selection.WaitPlan.MaxConcurrency,
|
||||
selection.WaitPlan.Timeout,
|
||||
reqStream,
|
||||
&streamStarted,
|
||||
)
|
||||
if err != nil {
|
||||
reqLog.Warn("openai.account_slot_acquire_failed", zap.Int64("account_id", account.ID), zap.Error(err))
|
||||
releaseWait()
|
||||
h.handleConcurrencyError(c, err, "account", streamStarted)
|
||||
return
|
||||
}
|
||||
// Slot acquired: no longer waiting in queue.
|
||||
releaseWait()
|
||||
if err := h.gatewayService.BindStickySession(c.Request.Context(), apiKey.GroupID, sessionHash, account.ID); err != nil {
|
||||
reqLog.Warn("openai.bind_sticky_session_failed", zap.Int64("account_id", account.ID), zap.Error(err))
|
||||
}
|
||||
}
|
||||
accountReleaseFunc, acquired := h.acquireResponsesAccountSlot(c, apiKey.GroupID, sessionHash, selection, reqStream, &streamStarted, reqLog)
|
||||
if !acquired {
|
||||
return
|
||||
}
|
||||
// 账号槽位/等待计数需要在超时或断开时安全回收
|
||||
accountReleaseFunc = wrapReleaseOnDone(c.Request.Context(), accountReleaseFunc)
|
||||
|
||||
// Forward request
|
||||
service.SetOpsLatencyMs(c, service.OpsRoutingLatencyMsKey, time.Since(routingStart).Milliseconds())
|
||||
@@ -353,6 +269,8 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) {
|
||||
if err != nil {
|
||||
var failoverErr *service.UpstreamFailoverError
|
||||
if errors.As(err, &failoverErr) {
|
||||
h.gatewayService.ReportOpenAIAccountScheduleResult(account.ID, false, nil)
|
||||
h.gatewayService.RecordOpenAIAccountSwitch()
|
||||
failedAccountIDs[account.ID] = struct{}{}
|
||||
lastFailoverErr = failoverErr
|
||||
if switchCount >= maxAccountSwitches {
|
||||
@@ -368,14 +286,25 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) {
|
||||
)
|
||||
continue
|
||||
}
|
||||
h.gatewayService.ReportOpenAIAccountScheduleResult(account.ID, false, nil)
|
||||
wroteFallback := h.ensureForwardErrorResponse(c, streamStarted)
|
||||
reqLog.Error("openai.forward_failed",
|
||||
fields := []zap.Field{
|
||||
zap.Int64("account_id", account.ID),
|
||||
zap.Bool("fallback_error_response_written", wroteFallback),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
if shouldLogOpenAIForwardFailureAsWarn(c, wroteFallback) {
|
||||
reqLog.Warn("openai.forward_failed", fields...)
|
||||
return
|
||||
}
|
||||
reqLog.Error("openai.forward_failed", fields...)
|
||||
return
|
||||
}
|
||||
if result != nil {
|
||||
h.gatewayService.ReportOpenAIAccountScheduleResult(account.ID, true, result.FirstTokenMs)
|
||||
} else {
|
||||
h.gatewayService.ReportOpenAIAccountScheduleResult(account.ID, true, nil)
|
||||
}
|
||||
|
||||
// 捕获请求信息(用于异步记录,避免在 goroutine 中访问 gin.Context)
|
||||
userAgent := c.GetHeader("User-Agent")
|
||||
@@ -411,6 +340,525 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
func (h *OpenAIGatewayHandler) validateFunctionCallOutputRequest(c *gin.Context, body []byte, reqLog *zap.Logger) bool {
|
||||
if !gjson.GetBytes(body, `input.#(type=="function_call_output")`).Exists() {
|
||||
return true
|
||||
}
|
||||
|
||||
var reqBody map[string]any
|
||||
if err := json.Unmarshal(body, &reqBody); err != nil {
|
||||
// 保持原有容错语义:解析失败时跳过预校验,沿用后续上游校验结果。
|
||||
return true
|
||||
}
|
||||
|
||||
c.Set(service.OpenAIParsedRequestBodyKey, reqBody)
|
||||
validation := service.ValidateFunctionCallOutputContext(reqBody)
|
||||
if !validation.HasFunctionCallOutput {
|
||||
return true
|
||||
}
|
||||
|
||||
previousResponseID, _ := reqBody["previous_response_id"].(string)
|
||||
if strings.TrimSpace(previousResponseID) != "" || validation.HasToolCallContext {
|
||||
return true
|
||||
}
|
||||
|
||||
if validation.HasFunctionCallOutputMissingCallID {
|
||||
reqLog.Warn("openai.request_validation_failed",
|
||||
zap.String("reason", "function_call_output_missing_call_id"),
|
||||
)
|
||||
h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "function_call_output requires call_id or previous_response_id; if relying on history, ensure store=true and reuse previous_response_id")
|
||||
return false
|
||||
}
|
||||
if validation.HasItemReferenceForAllCallIDs {
|
||||
return true
|
||||
}
|
||||
|
||||
reqLog.Warn("openai.request_validation_failed",
|
||||
zap.String("reason", "function_call_output_missing_item_reference"),
|
||||
)
|
||||
h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "function_call_output requires item_reference ids matching each call_id, or previous_response_id/tool_call context; if relying on history, ensure store=true and reuse previous_response_id")
|
||||
return false
|
||||
}
|
||||
|
||||
func (h *OpenAIGatewayHandler) acquireResponsesUserSlot(
|
||||
c *gin.Context,
|
||||
userID int64,
|
||||
userConcurrency int,
|
||||
reqStream bool,
|
||||
streamStarted *bool,
|
||||
reqLog *zap.Logger,
|
||||
) (func(), bool) {
|
||||
ctx := c.Request.Context()
|
||||
userReleaseFunc, userAcquired, err := h.concurrencyHelper.TryAcquireUserSlot(ctx, userID, userConcurrency)
|
||||
if err != nil {
|
||||
reqLog.Warn("openai.user_slot_acquire_failed", zap.Error(err))
|
||||
h.handleConcurrencyError(c, err, "user", *streamStarted)
|
||||
return nil, false
|
||||
}
|
||||
if userAcquired {
|
||||
return wrapReleaseOnDone(ctx, userReleaseFunc), true
|
||||
}
|
||||
|
||||
maxWait := service.CalculateMaxWait(userConcurrency)
|
||||
canWait, waitErr := h.concurrencyHelper.IncrementWaitCount(ctx, userID, maxWait)
|
||||
if waitErr != nil {
|
||||
reqLog.Warn("openai.user_wait_counter_increment_failed", zap.Error(waitErr))
|
||||
// 按现有降级语义:等待计数异常时放行后续抢槽流程
|
||||
} else if !canWait {
|
||||
reqLog.Info("openai.user_wait_queue_full", zap.Int("max_wait", maxWait))
|
||||
h.errorResponse(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later")
|
||||
return nil, false
|
||||
}
|
||||
|
||||
waitCounted := waitErr == nil && canWait
|
||||
defer func() {
|
||||
if waitCounted {
|
||||
h.concurrencyHelper.DecrementWaitCount(ctx, userID)
|
||||
}
|
||||
}()
|
||||
|
||||
userReleaseFunc, err = h.concurrencyHelper.AcquireUserSlotWithWait(c, userID, userConcurrency, reqStream, streamStarted)
|
||||
if err != nil {
|
||||
reqLog.Warn("openai.user_slot_acquire_failed_after_wait", zap.Error(err))
|
||||
h.handleConcurrencyError(c, err, "user", *streamStarted)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// 槽位获取成功后,立刻退出等待计数。
|
||||
if waitCounted {
|
||||
h.concurrencyHelper.DecrementWaitCount(ctx, userID)
|
||||
waitCounted = false
|
||||
}
|
||||
return wrapReleaseOnDone(ctx, userReleaseFunc), true
|
||||
}
|
||||
|
||||
func (h *OpenAIGatewayHandler) acquireResponsesAccountSlot(
|
||||
c *gin.Context,
|
||||
groupID *int64,
|
||||
sessionHash string,
|
||||
selection *service.AccountSelectionResult,
|
||||
reqStream bool,
|
||||
streamStarted *bool,
|
||||
reqLog *zap.Logger,
|
||||
) (func(), bool) {
|
||||
if selection == nil || selection.Account == nil {
|
||||
h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts", *streamStarted)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
ctx := c.Request.Context()
|
||||
account := selection.Account
|
||||
if selection.Acquired {
|
||||
return wrapReleaseOnDone(ctx, selection.ReleaseFunc), true
|
||||
}
|
||||
if selection.WaitPlan == nil {
|
||||
h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts", *streamStarted)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
fastReleaseFunc, fastAcquired, err := h.concurrencyHelper.TryAcquireAccountSlot(
|
||||
ctx,
|
||||
account.ID,
|
||||
selection.WaitPlan.MaxConcurrency,
|
||||
)
|
||||
if err != nil {
|
||||
reqLog.Warn("openai.account_slot_quick_acquire_failed", zap.Int64("account_id", account.ID), zap.Error(err))
|
||||
h.handleConcurrencyError(c, err, "account", *streamStarted)
|
||||
return nil, false
|
||||
}
|
||||
if fastAcquired {
|
||||
if err := h.gatewayService.BindStickySession(ctx, groupID, sessionHash, account.ID); err != nil {
|
||||
reqLog.Warn("openai.bind_sticky_session_failed", zap.Int64("account_id", account.ID), zap.Error(err))
|
||||
}
|
||||
return wrapReleaseOnDone(ctx, fastReleaseFunc), true
|
||||
}
|
||||
|
||||
canWait, waitErr := h.concurrencyHelper.IncrementAccountWaitCount(ctx, account.ID, selection.WaitPlan.MaxWaiting)
|
||||
if waitErr != nil {
|
||||
reqLog.Warn("openai.account_wait_counter_increment_failed", zap.Int64("account_id", account.ID), zap.Error(waitErr))
|
||||
} else if !canWait {
|
||||
reqLog.Info("openai.account_wait_queue_full",
|
||||
zap.Int64("account_id", account.ID),
|
||||
zap.Int("max_waiting", selection.WaitPlan.MaxWaiting),
|
||||
)
|
||||
h.handleStreamingAwareError(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later", *streamStarted)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
accountWaitCounted := waitErr == nil && canWait
|
||||
releaseWait := func() {
|
||||
if accountWaitCounted {
|
||||
h.concurrencyHelper.DecrementAccountWaitCount(ctx, account.ID)
|
||||
accountWaitCounted = false
|
||||
}
|
||||
}
|
||||
defer releaseWait()
|
||||
|
||||
accountReleaseFunc, err := h.concurrencyHelper.AcquireAccountSlotWithWaitTimeout(
|
||||
c,
|
||||
account.ID,
|
||||
selection.WaitPlan.MaxConcurrency,
|
||||
selection.WaitPlan.Timeout,
|
||||
reqStream,
|
||||
streamStarted,
|
||||
)
|
||||
if err != nil {
|
||||
reqLog.Warn("openai.account_slot_acquire_failed", zap.Int64("account_id", account.ID), zap.Error(err))
|
||||
h.handleConcurrencyError(c, err, "account", *streamStarted)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Slot acquired: no longer waiting in queue.
|
||||
releaseWait()
|
||||
if err := h.gatewayService.BindStickySession(ctx, groupID, sessionHash, account.ID); err != nil {
|
||||
reqLog.Warn("openai.bind_sticky_session_failed", zap.Int64("account_id", account.ID), zap.Error(err))
|
||||
}
|
||||
return wrapReleaseOnDone(ctx, accountReleaseFunc), true
|
||||
}
|
||||
|
||||
// ResponsesWebSocket handles OpenAI Responses API WebSocket ingress endpoint
|
||||
// GET /openai/v1/responses (Upgrade: websocket)
|
||||
func (h *OpenAIGatewayHandler) ResponsesWebSocket(c *gin.Context) {
|
||||
if !isOpenAIWSUpgradeRequest(c.Request) {
|
||||
h.errorResponse(c, http.StatusUpgradeRequired, "invalid_request_error", "WebSocket upgrade required (Upgrade: websocket)")
|
||||
return
|
||||
}
|
||||
setOpenAIClientTransportWS(c)
|
||||
|
||||
apiKey, ok := middleware2.GetAPIKeyFromContext(c)
|
||||
if !ok {
|
||||
h.errorResponse(c, http.StatusUnauthorized, "authentication_error", "Invalid API key")
|
||||
return
|
||||
}
|
||||
subject, ok := middleware2.GetAuthSubjectFromContext(c)
|
||||
if !ok {
|
||||
h.errorResponse(c, http.StatusInternalServerError, "api_error", "User context not found")
|
||||
return
|
||||
}
|
||||
|
||||
reqLog := requestLogger(
|
||||
c,
|
||||
"handler.openai_gateway.responses_ws",
|
||||
zap.Int64("user_id", subject.UserID),
|
||||
zap.Int64("api_key_id", apiKey.ID),
|
||||
zap.Any("group_id", apiKey.GroupID),
|
||||
zap.Bool("openai_ws_mode", true),
|
||||
)
|
||||
if !h.ensureResponsesDependencies(c, reqLog) {
|
||||
return
|
||||
}
|
||||
reqLog.Info("openai.websocket_ingress_started")
|
||||
clientIP := ip.GetClientIP(c)
|
||||
userAgent := strings.TrimSpace(c.GetHeader("User-Agent"))
|
||||
|
||||
wsConn, err := coderws.Accept(c.Writer, c.Request, &coderws.AcceptOptions{
|
||||
CompressionMode: coderws.CompressionContextTakeover,
|
||||
})
|
||||
if err != nil {
|
||||
reqLog.Warn("openai.websocket_accept_failed",
|
||||
zap.Error(err),
|
||||
zap.String("client_ip", clientIP),
|
||||
zap.String("request_user_agent", userAgent),
|
||||
zap.String("upgrade_header", strings.TrimSpace(c.GetHeader("Upgrade"))),
|
||||
zap.String("connection_header", strings.TrimSpace(c.GetHeader("Connection"))),
|
||||
zap.String("sec_websocket_version", strings.TrimSpace(c.GetHeader("Sec-WebSocket-Version"))),
|
||||
zap.Bool("has_sec_websocket_key", strings.TrimSpace(c.GetHeader("Sec-WebSocket-Key")) != ""),
|
||||
)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
_ = wsConn.CloseNow()
|
||||
}()
|
||||
wsConn.SetReadLimit(16 * 1024 * 1024)
|
||||
|
||||
ctx := c.Request.Context()
|
||||
readCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
msgType, firstMessage, err := wsConn.Read(readCtx)
|
||||
cancel()
|
||||
if err != nil {
|
||||
closeStatus, closeReason := summarizeWSCloseErrorForLog(err)
|
||||
reqLog.Warn("openai.websocket_read_first_message_failed",
|
||||
zap.Error(err),
|
||||
zap.String("client_ip", clientIP),
|
||||
zap.String("close_status", closeStatus),
|
||||
zap.String("close_reason", closeReason),
|
||||
zap.Duration("read_timeout", 30*time.Second),
|
||||
)
|
||||
closeOpenAIClientWS(wsConn, coderws.StatusPolicyViolation, "missing first response.create message")
|
||||
return
|
||||
}
|
||||
if msgType != coderws.MessageText && msgType != coderws.MessageBinary {
|
||||
closeOpenAIClientWS(wsConn, coderws.StatusPolicyViolation, "unsupported websocket message type")
|
||||
return
|
||||
}
|
||||
if !gjson.ValidBytes(firstMessage) {
|
||||
closeOpenAIClientWS(wsConn, coderws.StatusPolicyViolation, "invalid JSON payload")
|
||||
return
|
||||
}
|
||||
|
||||
reqModel := strings.TrimSpace(gjson.GetBytes(firstMessage, "model").String())
|
||||
if reqModel == "" {
|
||||
closeOpenAIClientWS(wsConn, coderws.StatusPolicyViolation, "model is required in first response.create payload")
|
||||
return
|
||||
}
|
||||
previousResponseID := strings.TrimSpace(gjson.GetBytes(firstMessage, "previous_response_id").String())
|
||||
previousResponseIDKind := service.ClassifyOpenAIPreviousResponseIDKind(previousResponseID)
|
||||
if previousResponseID != "" && previousResponseIDKind == service.OpenAIPreviousResponseIDKindMessageID {
|
||||
closeOpenAIClientWS(wsConn, coderws.StatusPolicyViolation, "previous_response_id must be a response.id (resp_*), not a message id")
|
||||
return
|
||||
}
|
||||
reqLog = reqLog.With(
|
||||
zap.Bool("ws_ingress", true),
|
||||
zap.String("model", reqModel),
|
||||
zap.Bool("has_previous_response_id", previousResponseID != ""),
|
||||
zap.String("previous_response_id_kind", previousResponseIDKind),
|
||||
)
|
||||
setOpsRequestContext(c, reqModel, true, firstMessage)
|
||||
|
||||
var currentUserRelease func()
|
||||
var currentAccountRelease func()
|
||||
releaseTurnSlots := func() {
|
||||
if currentAccountRelease != nil {
|
||||
currentAccountRelease()
|
||||
currentAccountRelease = nil
|
||||
}
|
||||
if currentUserRelease != nil {
|
||||
currentUserRelease()
|
||||
currentUserRelease = nil
|
||||
}
|
||||
}
|
||||
// 必须尽早注册,确保任何 early return 都能释放已获取的并发槽位。
|
||||
defer releaseTurnSlots()
|
||||
|
||||
userReleaseFunc, userAcquired, err := h.concurrencyHelper.TryAcquireUserSlot(ctx, subject.UserID, subject.Concurrency)
|
||||
if err != nil {
|
||||
reqLog.Warn("openai.websocket_user_slot_acquire_failed", zap.Error(err))
|
||||
closeOpenAIClientWS(wsConn, coderws.StatusInternalError, "failed to acquire user concurrency slot")
|
||||
return
|
||||
}
|
||||
if !userAcquired {
|
||||
closeOpenAIClientWS(wsConn, coderws.StatusTryAgainLater, "too many concurrent requests, please retry later")
|
||||
return
|
||||
}
|
||||
currentUserRelease = wrapReleaseOnDone(ctx, userReleaseFunc)
|
||||
|
||||
subscription, _ := middleware2.GetSubscriptionFromContext(c)
|
||||
if err := h.billingCacheService.CheckBillingEligibility(ctx, apiKey.User, apiKey, apiKey.Group, subscription); err != nil {
|
||||
reqLog.Info("openai.websocket_billing_eligibility_check_failed", zap.Error(err))
|
||||
closeOpenAIClientWS(wsConn, coderws.StatusPolicyViolation, "billing check failed")
|
||||
return
|
||||
}
|
||||
|
||||
sessionHash := h.gatewayService.GenerateSessionHashWithFallback(
|
||||
c,
|
||||
firstMessage,
|
||||
openAIWSIngressFallbackSessionSeed(subject.UserID, apiKey.ID, apiKey.GroupID),
|
||||
)
|
||||
selection, scheduleDecision, err := h.gatewayService.SelectAccountWithScheduler(
|
||||
ctx,
|
||||
apiKey.GroupID,
|
||||
previousResponseID,
|
||||
sessionHash,
|
||||
reqModel,
|
||||
nil,
|
||||
service.OpenAIUpstreamTransportResponsesWebsocketV2,
|
||||
)
|
||||
if err != nil {
|
||||
reqLog.Warn("openai.websocket_account_select_failed", zap.Error(err))
|
||||
closeOpenAIClientWS(wsConn, coderws.StatusTryAgainLater, "no available account")
|
||||
return
|
||||
}
|
||||
if selection == nil || selection.Account == nil {
|
||||
closeOpenAIClientWS(wsConn, coderws.StatusTryAgainLater, "no available account")
|
||||
return
|
||||
}
|
||||
|
||||
account := selection.Account
|
||||
accountMaxConcurrency := account.Concurrency
|
||||
if selection.WaitPlan != nil && selection.WaitPlan.MaxConcurrency > 0 {
|
||||
accountMaxConcurrency = selection.WaitPlan.MaxConcurrency
|
||||
}
|
||||
accountReleaseFunc := selection.ReleaseFunc
|
||||
if !selection.Acquired {
|
||||
if selection.WaitPlan == nil {
|
||||
closeOpenAIClientWS(wsConn, coderws.StatusTryAgainLater, "account is busy, please retry later")
|
||||
return
|
||||
}
|
||||
fastReleaseFunc, fastAcquired, err := h.concurrencyHelper.TryAcquireAccountSlot(
|
||||
ctx,
|
||||
account.ID,
|
||||
selection.WaitPlan.MaxConcurrency,
|
||||
)
|
||||
if err != nil {
|
||||
reqLog.Warn("openai.websocket_account_slot_acquire_failed", zap.Int64("account_id", account.ID), zap.Error(err))
|
||||
closeOpenAIClientWS(wsConn, coderws.StatusInternalError, "failed to acquire account concurrency slot")
|
||||
return
|
||||
}
|
||||
if !fastAcquired {
|
||||
closeOpenAIClientWS(wsConn, coderws.StatusTryAgainLater, "account is busy, please retry later")
|
||||
return
|
||||
}
|
||||
accountReleaseFunc = fastReleaseFunc
|
||||
}
|
||||
currentAccountRelease = wrapReleaseOnDone(ctx, accountReleaseFunc)
|
||||
if err := h.gatewayService.BindStickySession(ctx, apiKey.GroupID, sessionHash, account.ID); err != nil {
|
||||
reqLog.Warn("openai.websocket_bind_sticky_session_failed", zap.Int64("account_id", account.ID), zap.Error(err))
|
||||
}
|
||||
|
||||
token, _, err := h.gatewayService.GetAccessToken(ctx, account)
|
||||
if err != nil {
|
||||
reqLog.Warn("openai.websocket_get_access_token_failed", zap.Int64("account_id", account.ID), zap.Error(err))
|
||||
closeOpenAIClientWS(wsConn, coderws.StatusInternalError, "failed to get access token")
|
||||
return
|
||||
}
|
||||
|
||||
reqLog.Debug("openai.websocket_account_selected",
|
||||
zap.Int64("account_id", account.ID),
|
||||
zap.String("account_name", account.Name),
|
||||
zap.String("schedule_layer", scheduleDecision.Layer),
|
||||
zap.Int("candidate_count", scheduleDecision.CandidateCount),
|
||||
)
|
||||
|
||||
hooks := &service.OpenAIWSIngressHooks{
|
||||
BeforeTurn: func(turn int) error {
|
||||
if turn == 1 {
|
||||
return nil
|
||||
}
|
||||
// 防御式清理:避免异常路径下旧槽位覆盖导致泄漏。
|
||||
releaseTurnSlots()
|
||||
// 非首轮 turn 需要重新抢占并发槽位,避免长连接空闲占槽。
|
||||
userReleaseFunc, userAcquired, err := h.concurrencyHelper.TryAcquireUserSlot(ctx, subject.UserID, subject.Concurrency)
|
||||
if err != nil {
|
||||
return service.NewOpenAIWSClientCloseError(coderws.StatusInternalError, "failed to acquire user concurrency slot", err)
|
||||
}
|
||||
if !userAcquired {
|
||||
return service.NewOpenAIWSClientCloseError(coderws.StatusTryAgainLater, "too many concurrent requests, please retry later", nil)
|
||||
}
|
||||
accountReleaseFunc, accountAcquired, err := h.concurrencyHelper.TryAcquireAccountSlot(ctx, account.ID, accountMaxConcurrency)
|
||||
if err != nil {
|
||||
if userReleaseFunc != nil {
|
||||
userReleaseFunc()
|
||||
}
|
||||
return service.NewOpenAIWSClientCloseError(coderws.StatusInternalError, "failed to acquire account concurrency slot", err)
|
||||
}
|
||||
if !accountAcquired {
|
||||
if userReleaseFunc != nil {
|
||||
userReleaseFunc()
|
||||
}
|
||||
return service.NewOpenAIWSClientCloseError(coderws.StatusTryAgainLater, "account is busy, please retry later", nil)
|
||||
}
|
||||
currentUserRelease = wrapReleaseOnDone(ctx, userReleaseFunc)
|
||||
currentAccountRelease = wrapReleaseOnDone(ctx, accountReleaseFunc)
|
||||
return nil
|
||||
},
|
||||
AfterTurn: func(turn int, result *service.OpenAIForwardResult, turnErr error) {
|
||||
releaseTurnSlots()
|
||||
if turnErr != nil || result == nil {
|
||||
return
|
||||
}
|
||||
h.gatewayService.ReportOpenAIAccountScheduleResult(account.ID, true, result.FirstTokenMs)
|
||||
h.submitUsageRecordTask(func(taskCtx context.Context) {
|
||||
if err := h.gatewayService.RecordUsage(taskCtx, &service.OpenAIRecordUsageInput{
|
||||
Result: result,
|
||||
APIKey: apiKey,
|
||||
User: apiKey.User,
|
||||
Account: account,
|
||||
Subscription: subscription,
|
||||
UserAgent: userAgent,
|
||||
IPAddress: clientIP,
|
||||
APIKeyService: h.apiKeyService,
|
||||
}); err != nil {
|
||||
reqLog.Error("openai.websocket_record_usage_failed",
|
||||
zap.Int64("account_id", account.ID),
|
||||
zap.String("request_id", result.RequestID),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
if err := h.gatewayService.ProxyResponsesWebSocketFromClient(ctx, c, wsConn, account, token, firstMessage, hooks); err != nil {
|
||||
h.gatewayService.ReportOpenAIAccountScheduleResult(account.ID, false, nil)
|
||||
closeStatus, closeReason := summarizeWSCloseErrorForLog(err)
|
||||
reqLog.Warn("openai.websocket_proxy_failed",
|
||||
zap.Int64("account_id", account.ID),
|
||||
zap.Error(err),
|
||||
zap.String("close_status", closeStatus),
|
||||
zap.String("close_reason", closeReason),
|
||||
)
|
||||
var closeErr *service.OpenAIWSClientCloseError
|
||||
if errors.As(err, &closeErr) {
|
||||
closeOpenAIClientWS(wsConn, closeErr.StatusCode(), closeErr.Reason())
|
||||
return
|
||||
}
|
||||
closeOpenAIClientWS(wsConn, coderws.StatusInternalError, "upstream websocket proxy failed")
|
||||
return
|
||||
}
|
||||
reqLog.Info("openai.websocket_ingress_closed", zap.Int64("account_id", account.ID))
|
||||
}
|
||||
|
||||
func (h *OpenAIGatewayHandler) recoverResponsesPanic(c *gin.Context, streamStarted *bool) {
|
||||
recovered := recover()
|
||||
if recovered == nil {
|
||||
return
|
||||
}
|
||||
|
||||
started := false
|
||||
if streamStarted != nil {
|
||||
started = *streamStarted
|
||||
}
|
||||
wroteFallback := h.ensureForwardErrorResponse(c, started)
|
||||
requestLogger(c, "handler.openai_gateway.responses").Error(
|
||||
"openai.responses_panic_recovered",
|
||||
zap.Bool("fallback_error_response_written", wroteFallback),
|
||||
zap.Any("panic", recovered),
|
||||
zap.ByteString("stack", debug.Stack()),
|
||||
)
|
||||
}
|
||||
|
||||
func (h *OpenAIGatewayHandler) ensureResponsesDependencies(c *gin.Context, reqLog *zap.Logger) bool {
|
||||
missing := h.missingResponsesDependencies()
|
||||
if len(missing) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
if reqLog == nil {
|
||||
reqLog = requestLogger(c, "handler.openai_gateway.responses")
|
||||
}
|
||||
reqLog.Error("openai.handler_dependencies_missing", zap.Strings("missing_dependencies", missing))
|
||||
|
||||
if c != nil && c.Writer != nil && !c.Writer.Written() {
|
||||
c.JSON(http.StatusServiceUnavailable, gin.H{
|
||||
"error": gin.H{
|
||||
"type": "api_error",
|
||||
"message": "Service temporarily unavailable",
|
||||
},
|
||||
})
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (h *OpenAIGatewayHandler) missingResponsesDependencies() []string {
|
||||
missing := make([]string, 0, 5)
|
||||
if h == nil {
|
||||
return append(missing, "handler")
|
||||
}
|
||||
if h.gatewayService == nil {
|
||||
missing = append(missing, "gatewayService")
|
||||
}
|
||||
if h.billingCacheService == nil {
|
||||
missing = append(missing, "billingCacheService")
|
||||
}
|
||||
if h.apiKeyService == nil {
|
||||
missing = append(missing, "apiKeyService")
|
||||
}
|
||||
if h.concurrencyHelper == nil || h.concurrencyHelper.concurrencyService == nil {
|
||||
missing = append(missing, "concurrencyHelper")
|
||||
}
|
||||
return missing
|
||||
}
|
||||
|
||||
func getContextInt64(c *gin.Context, key string) (int64, bool) {
|
||||
if c == nil || key == "" {
|
||||
return 0, false
|
||||
@@ -444,6 +892,14 @@ func (h *OpenAIGatewayHandler) submitUsageRecordTask(task service.UsageRecordTas
|
||||
// 回退路径:worker 池未注入时同步执行,避免退回到无界 goroutine 模式。
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
defer func() {
|
||||
if recovered := recover(); recovered != nil {
|
||||
logger.L().With(
|
||||
zap.String("component", "handler.openai_gateway.responses"),
|
||||
zap.Any("panic", recovered),
|
||||
).Error("openai.usage_record_task_panic_recovered")
|
||||
}
|
||||
}()
|
||||
task(ctx)
|
||||
}
|
||||
|
||||
@@ -515,19 +971,8 @@ func (h *OpenAIGatewayHandler) handleStreamingAwareError(c *gin.Context, status
|
||||
// Stream already started, send error as SSE event then close
|
||||
flusher, ok := c.Writer.(http.Flusher)
|
||||
if ok {
|
||||
// Send error event in OpenAI SSE format with proper JSON marshaling
|
||||
errorData := map[string]any{
|
||||
"error": map[string]string{
|
||||
"type": errType,
|
||||
"message": message,
|
||||
},
|
||||
}
|
||||
jsonBytes, err := json.Marshal(errorData)
|
||||
if err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
errorEvent := fmt.Sprintf("event: error\ndata: %s\n\n", string(jsonBytes))
|
||||
// SSE 错误事件固定 schema,使用 Quote 直拼可避免额外 Marshal 分配。
|
||||
errorEvent := "event: error\ndata: " + `{"error":{"type":` + strconv.Quote(errType) + `,"message":` + strconv.Quote(message) + `}}` + "\n\n"
|
||||
if _, err := fmt.Fprint(c.Writer, errorEvent); err != nil {
|
||||
_ = c.Error(err)
|
||||
}
|
||||
@@ -549,6 +994,16 @@ func (h *OpenAIGatewayHandler) ensureForwardErrorResponse(c *gin.Context, stream
|
||||
return true
|
||||
}
|
||||
|
||||
func shouldLogOpenAIForwardFailureAsWarn(c *gin.Context, wroteFallback bool) bool {
|
||||
if wroteFallback {
|
||||
return false
|
||||
}
|
||||
if c == nil || c.Writer == nil {
|
||||
return false
|
||||
}
|
||||
return c.Writer.Written()
|
||||
}
|
||||
|
||||
// errorResponse returns OpenAI API format error response
|
||||
func (h *OpenAIGatewayHandler) errorResponse(c *gin.Context, status int, errType, message string) {
|
||||
c.JSON(status, gin.H{
|
||||
@@ -558,3 +1013,61 @@ func (h *OpenAIGatewayHandler) errorResponse(c *gin.Context, status int, errType
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func setOpenAIClientTransportHTTP(c *gin.Context) {
|
||||
service.SetOpenAIClientTransport(c, service.OpenAIClientTransportHTTP)
|
||||
}
|
||||
|
||||
func setOpenAIClientTransportWS(c *gin.Context) {
|
||||
service.SetOpenAIClientTransport(c, service.OpenAIClientTransportWS)
|
||||
}
|
||||
|
||||
func openAIWSIngressFallbackSessionSeed(userID, apiKeyID int64, groupID *int64) string {
|
||||
gid := int64(0)
|
||||
if groupID != nil {
|
||||
gid = *groupID
|
||||
}
|
||||
return fmt.Sprintf("openai_ws_ingress:%d:%d:%d", gid, userID, apiKeyID)
|
||||
}
|
||||
|
||||
func isOpenAIWSUpgradeRequest(r *http.Request) bool {
|
||||
if r == nil {
|
||||
return false
|
||||
}
|
||||
if !strings.EqualFold(strings.TrimSpace(r.Header.Get("Upgrade")), "websocket") {
|
||||
return false
|
||||
}
|
||||
return strings.Contains(strings.ToLower(strings.TrimSpace(r.Header.Get("Connection"))), "upgrade")
|
||||
}
|
||||
|
||||
func closeOpenAIClientWS(conn *coderws.Conn, status coderws.StatusCode, reason string) {
|
||||
if conn == nil {
|
||||
return
|
||||
}
|
||||
reason = strings.TrimSpace(reason)
|
||||
if len(reason) > 120 {
|
||||
reason = reason[:120]
|
||||
}
|
||||
_ = conn.Close(status, reason)
|
||||
_ = conn.CloseNow()
|
||||
}
|
||||
|
||||
func summarizeWSCloseErrorForLog(err error) (string, string) {
|
||||
if err == nil {
|
||||
return "-", "-"
|
||||
}
|
||||
statusCode := coderws.CloseStatus(err)
|
||||
if statusCode == -1 {
|
||||
return "-", "-"
|
||||
}
|
||||
closeStatus := fmt.Sprintf("%d(%s)", int(statusCode), statusCode.String())
|
||||
closeReason := "-"
|
||||
var closeErr coderws.CloseError
|
||||
if errors.As(err, &closeErr) {
|
||||
reason := strings.TrimSpace(closeErr.Reason)
|
||||
if reason != "" {
|
||||
closeReason = reason
|
||||
}
|
||||
}
|
||||
return closeStatus, closeReason
|
||||
}
|
||||
|
||||
@@ -1,12 +1,19 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
pkghttputil "github.com/Wei-Shaw/sub2api/internal/pkg/httputil"
|
||||
"github.com/Wei-Shaw/sub2api/internal/server/middleware"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
coderws "github.com/coder/websocket"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -105,6 +112,27 @@ func TestOpenAIHandleStreamingAwareError_NonStreaming(t *testing.T) {
|
||||
assert.Equal(t, "test error", errorObj["message"])
|
||||
}
|
||||
|
||||
func TestReadRequestBodyWithPrealloc(t *testing.T) {
|
||||
payload := `{"model":"gpt-5","input":"hello"}`
|
||||
req := httptest.NewRequest(http.MethodPost, "/v1/responses", strings.NewReader(payload))
|
||||
req.ContentLength = int64(len(payload))
|
||||
|
||||
body, err := pkghttputil.ReadRequestBodyWithPrealloc(req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, payload, string(body))
|
||||
}
|
||||
|
||||
func TestReadRequestBodyWithPrealloc_MaxBytesError(t *testing.T) {
|
||||
rec := httptest.NewRecorder()
|
||||
req := httptest.NewRequest(http.MethodPost, "/v1/responses", strings.NewReader(strings.Repeat("x", 8)))
|
||||
req.Body = http.MaxBytesReader(rec, req.Body, 4)
|
||||
|
||||
_, err := pkghttputil.ReadRequestBodyWithPrealloc(req)
|
||||
require.Error(t, err)
|
||||
var maxErr *http.MaxBytesError
|
||||
require.ErrorAs(t, err, &maxErr)
|
||||
}
|
||||
|
||||
func TestOpenAIEnsureForwardErrorResponse_WritesFallbackWhenNotWritten(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
w := httptest.NewRecorder()
|
||||
@@ -141,6 +169,387 @@ func TestOpenAIEnsureForwardErrorResponse_DoesNotOverrideWrittenResponse(t *test
|
||||
assert.Equal(t, "already written", w.Body.String())
|
||||
}
|
||||
|
||||
func TestShouldLogOpenAIForwardFailureAsWarn(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
t.Run("fallback_written_should_not_downgrade", func(t *testing.T) {
|
||||
w := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
c.Request = httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
require.False(t, shouldLogOpenAIForwardFailureAsWarn(c, true))
|
||||
})
|
||||
|
||||
t.Run("context_nil_should_not_downgrade", func(t *testing.T) {
|
||||
require.False(t, shouldLogOpenAIForwardFailureAsWarn(nil, false))
|
||||
})
|
||||
|
||||
t.Run("response_not_written_should_not_downgrade", func(t *testing.T) {
|
||||
w := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
c.Request = httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
require.False(t, shouldLogOpenAIForwardFailureAsWarn(c, false))
|
||||
})
|
||||
|
||||
t.Run("response_already_written_should_downgrade", func(t *testing.T) {
|
||||
w := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
c.Request = httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
c.String(http.StatusForbidden, "already written")
|
||||
require.True(t, shouldLogOpenAIForwardFailureAsWarn(c, false))
|
||||
})
|
||||
}
|
||||
|
||||
func TestOpenAIRecoverResponsesPanic_WritesFallbackResponse(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
c.Request = httptest.NewRequest(http.MethodPost, "/v1/responses", nil)
|
||||
|
||||
h := &OpenAIGatewayHandler{}
|
||||
streamStarted := false
|
||||
require.NotPanics(t, func() {
|
||||
func() {
|
||||
defer h.recoverResponsesPanic(c, &streamStarted)
|
||||
panic("test panic")
|
||||
}()
|
||||
})
|
||||
|
||||
require.Equal(t, http.StatusBadGateway, w.Code)
|
||||
|
||||
var parsed map[string]any
|
||||
err := json.Unmarshal(w.Body.Bytes(), &parsed)
|
||||
require.NoError(t, err)
|
||||
|
||||
errorObj, ok := parsed["error"].(map[string]any)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, "upstream_error", errorObj["type"])
|
||||
assert.Equal(t, "Upstream request failed", errorObj["message"])
|
||||
}
|
||||
|
||||
func TestOpenAIRecoverResponsesPanic_NoPanicNoWrite(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
c.Request = httptest.NewRequest(http.MethodPost, "/v1/responses", nil)
|
||||
|
||||
h := &OpenAIGatewayHandler{}
|
||||
streamStarted := false
|
||||
require.NotPanics(t, func() {
|
||||
func() {
|
||||
defer h.recoverResponsesPanic(c, &streamStarted)
|
||||
}()
|
||||
})
|
||||
|
||||
require.False(t, c.Writer.Written())
|
||||
assert.Equal(t, "", w.Body.String())
|
||||
}
|
||||
|
||||
func TestOpenAIRecoverResponsesPanic_DoesNotOverrideWrittenResponse(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
c.Request = httptest.NewRequest(http.MethodPost, "/v1/responses", nil)
|
||||
c.String(http.StatusTeapot, "already written")
|
||||
|
||||
h := &OpenAIGatewayHandler{}
|
||||
streamStarted := false
|
||||
require.NotPanics(t, func() {
|
||||
func() {
|
||||
defer h.recoverResponsesPanic(c, &streamStarted)
|
||||
panic("test panic")
|
||||
}()
|
||||
})
|
||||
|
||||
require.Equal(t, http.StatusTeapot, w.Code)
|
||||
assert.Equal(t, "already written", w.Body.String())
|
||||
}
|
||||
|
||||
func TestOpenAIMissingResponsesDependencies(t *testing.T) {
|
||||
t.Run("nil_handler", func(t *testing.T) {
|
||||
var h *OpenAIGatewayHandler
|
||||
require.Equal(t, []string{"handler"}, h.missingResponsesDependencies())
|
||||
})
|
||||
|
||||
t.Run("all_dependencies_missing", func(t *testing.T) {
|
||||
h := &OpenAIGatewayHandler{}
|
||||
require.Equal(t,
|
||||
[]string{"gatewayService", "billingCacheService", "apiKeyService", "concurrencyHelper"},
|
||||
h.missingResponsesDependencies(),
|
||||
)
|
||||
})
|
||||
|
||||
t.Run("all_dependencies_present", func(t *testing.T) {
|
||||
h := &OpenAIGatewayHandler{
|
||||
gatewayService: &service.OpenAIGatewayService{},
|
||||
billingCacheService: &service.BillingCacheService{},
|
||||
apiKeyService: &service.APIKeyService{},
|
||||
concurrencyHelper: &ConcurrencyHelper{
|
||||
concurrencyService: &service.ConcurrencyService{},
|
||||
},
|
||||
}
|
||||
require.Empty(t, h.missingResponsesDependencies())
|
||||
})
|
||||
}
|
||||
|
||||
func TestOpenAIEnsureResponsesDependencies(t *testing.T) {
|
||||
t.Run("missing_dependencies_returns_503", func(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
w := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
c.Request = httptest.NewRequest(http.MethodPost, "/v1/responses", nil)
|
||||
|
||||
h := &OpenAIGatewayHandler{}
|
||||
ok := h.ensureResponsesDependencies(c, nil)
|
||||
|
||||
require.False(t, ok)
|
||||
require.Equal(t, http.StatusServiceUnavailable, w.Code)
|
||||
var parsed map[string]any
|
||||
err := json.Unmarshal(w.Body.Bytes(), &parsed)
|
||||
require.NoError(t, err)
|
||||
errorObj, exists := parsed["error"].(map[string]any)
|
||||
require.True(t, exists)
|
||||
assert.Equal(t, "api_error", errorObj["type"])
|
||||
assert.Equal(t, "Service temporarily unavailable", errorObj["message"])
|
||||
})
|
||||
|
||||
t.Run("already_written_response_not_overridden", func(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
w := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
c.Request = httptest.NewRequest(http.MethodPost, "/v1/responses", nil)
|
||||
c.String(http.StatusTeapot, "already written")
|
||||
|
||||
h := &OpenAIGatewayHandler{}
|
||||
ok := h.ensureResponsesDependencies(c, nil)
|
||||
|
||||
require.False(t, ok)
|
||||
require.Equal(t, http.StatusTeapot, w.Code)
|
||||
assert.Equal(t, "already written", w.Body.String())
|
||||
})
|
||||
|
||||
t.Run("dependencies_ready_returns_true_and_no_write", func(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
w := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
c.Request = httptest.NewRequest(http.MethodPost, "/v1/responses", nil)
|
||||
|
||||
h := &OpenAIGatewayHandler{
|
||||
gatewayService: &service.OpenAIGatewayService{},
|
||||
billingCacheService: &service.BillingCacheService{},
|
||||
apiKeyService: &service.APIKeyService{},
|
||||
concurrencyHelper: &ConcurrencyHelper{
|
||||
concurrencyService: &service.ConcurrencyService{},
|
||||
},
|
||||
}
|
||||
ok := h.ensureResponsesDependencies(c, nil)
|
||||
|
||||
require.True(t, ok)
|
||||
require.False(t, c.Writer.Written())
|
||||
assert.Equal(t, "", w.Body.String())
|
||||
})
|
||||
}
|
||||
|
||||
func TestOpenAIResponses_MissingDependencies_ReturnsServiceUnavailable(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
c.Request = httptest.NewRequest(http.MethodPost, "/v1/responses", strings.NewReader(`{"model":"gpt-5","stream":false}`))
|
||||
c.Request.Header.Set("Content-Type", "application/json")
|
||||
|
||||
groupID := int64(2)
|
||||
c.Set(string(middleware.ContextKeyAPIKey), &service.APIKey{
|
||||
ID: 10,
|
||||
GroupID: &groupID,
|
||||
})
|
||||
c.Set(string(middleware.ContextKeyUser), middleware.AuthSubject{
|
||||
UserID: 1,
|
||||
Concurrency: 1,
|
||||
})
|
||||
|
||||
// 故意使用未初始化依赖,验证快速失败而不是崩溃。
|
||||
h := &OpenAIGatewayHandler{}
|
||||
require.NotPanics(t, func() {
|
||||
h.Responses(c)
|
||||
})
|
||||
|
||||
require.Equal(t, http.StatusServiceUnavailable, w.Code)
|
||||
|
||||
var parsed map[string]any
|
||||
err := json.Unmarshal(w.Body.Bytes(), &parsed)
|
||||
require.NoError(t, err)
|
||||
|
||||
errorObj, ok := parsed["error"].(map[string]any)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, "api_error", errorObj["type"])
|
||||
assert.Equal(t, "Service temporarily unavailable", errorObj["message"])
|
||||
}
|
||||
|
||||
func TestOpenAIResponses_SetsClientTransportHTTP(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
c.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", strings.NewReader(`{"model":"gpt-5"}`))
|
||||
c.Request.Header.Set("Content-Type", "application/json")
|
||||
|
||||
h := &OpenAIGatewayHandler{}
|
||||
h.Responses(c)
|
||||
|
||||
require.Equal(t, http.StatusUnauthorized, w.Code)
|
||||
require.Equal(t, service.OpenAIClientTransportHTTP, service.GetOpenAIClientTransport(c))
|
||||
}
|
||||
|
||||
func TestOpenAIResponses_RejectsMessageIDAsPreviousResponseID(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
c.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", strings.NewReader(
|
||||
`{"model":"gpt-5.1","stream":false,"previous_response_id":"msg_123456","input":[{"type":"input_text","text":"hello"}]}`,
|
||||
))
|
||||
c.Request.Header.Set("Content-Type", "application/json")
|
||||
|
||||
groupID := int64(2)
|
||||
c.Set(string(middleware.ContextKeyAPIKey), &service.APIKey{
|
||||
ID: 101,
|
||||
GroupID: &groupID,
|
||||
User: &service.User{ID: 1},
|
||||
})
|
||||
c.Set(string(middleware.ContextKeyUser), middleware.AuthSubject{
|
||||
UserID: 1,
|
||||
Concurrency: 1,
|
||||
})
|
||||
|
||||
h := newOpenAIHandlerForPreviousResponseIDValidation(t, nil)
|
||||
h.Responses(c)
|
||||
|
||||
require.Equal(t, http.StatusBadRequest, w.Code)
|
||||
require.Contains(t, w.Body.String(), "previous_response_id must be a response.id")
|
||||
}
|
||||
|
||||
func TestOpenAIResponsesWebSocket_SetsClientTransportWSWhenUpgradeValid(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
c.Request = httptest.NewRequest(http.MethodGet, "/openai/v1/responses", nil)
|
||||
c.Request.Header.Set("Upgrade", "websocket")
|
||||
c.Request.Header.Set("Connection", "Upgrade")
|
||||
|
||||
h := &OpenAIGatewayHandler{}
|
||||
h.ResponsesWebSocket(c)
|
||||
|
||||
require.Equal(t, http.StatusUnauthorized, w.Code)
|
||||
require.Equal(t, service.OpenAIClientTransportWS, service.GetOpenAIClientTransport(c))
|
||||
}
|
||||
|
||||
func TestOpenAIResponsesWebSocket_InvalidUpgradeDoesNotSetTransport(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
c.Request = httptest.NewRequest(http.MethodGet, "/openai/v1/responses", nil)
|
||||
|
||||
h := &OpenAIGatewayHandler{}
|
||||
h.ResponsesWebSocket(c)
|
||||
|
||||
require.Equal(t, http.StatusUpgradeRequired, w.Code)
|
||||
require.Equal(t, service.OpenAIClientTransportUnknown, service.GetOpenAIClientTransport(c))
|
||||
}
|
||||
|
||||
func TestOpenAIResponsesWebSocket_RejectsMessageIDAsPreviousResponseID(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
h := newOpenAIHandlerForPreviousResponseIDValidation(t, nil)
|
||||
wsServer := newOpenAIWSHandlerTestServer(t, h, middleware.AuthSubject{UserID: 1, Concurrency: 1})
|
||||
defer wsServer.Close()
|
||||
|
||||
dialCtx, cancelDial := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
clientConn, _, err := coderws.Dial(dialCtx, "ws"+strings.TrimPrefix(wsServer.URL, "http")+"/openai/v1/responses", nil)
|
||||
cancelDial()
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = clientConn.CloseNow()
|
||||
}()
|
||||
|
||||
writeCtx, cancelWrite := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
err = clientConn.Write(writeCtx, coderws.MessageText, []byte(
|
||||
`{"type":"response.create","model":"gpt-5.1","stream":false,"previous_response_id":"msg_abc123"}`,
|
||||
))
|
||||
cancelWrite()
|
||||
require.NoError(t, err)
|
||||
|
||||
readCtx, cancelRead := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
_, _, err = clientConn.Read(readCtx)
|
||||
cancelRead()
|
||||
require.Error(t, err)
|
||||
var closeErr coderws.CloseError
|
||||
require.ErrorAs(t, err, &closeErr)
|
||||
require.Equal(t, coderws.StatusPolicyViolation, closeErr.Code)
|
||||
require.Contains(t, strings.ToLower(closeErr.Reason), "previous_response_id")
|
||||
}
|
||||
|
||||
func TestOpenAIResponsesWebSocket_PreviousResponseIDKindLoggedBeforeAcquireFailure(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
cache := &concurrencyCacheMock{
|
||||
acquireUserSlotFn: func(ctx context.Context, userID int64, maxConcurrency int, requestID string) (bool, error) {
|
||||
return false, errors.New("user slot unavailable")
|
||||
},
|
||||
}
|
||||
h := newOpenAIHandlerForPreviousResponseIDValidation(t, cache)
|
||||
wsServer := newOpenAIWSHandlerTestServer(t, h, middleware.AuthSubject{UserID: 1, Concurrency: 1})
|
||||
defer wsServer.Close()
|
||||
|
||||
dialCtx, cancelDial := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
clientConn, _, err := coderws.Dial(dialCtx, "ws"+strings.TrimPrefix(wsServer.URL, "http")+"/openai/v1/responses", nil)
|
||||
cancelDial()
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = clientConn.CloseNow()
|
||||
}()
|
||||
|
||||
writeCtx, cancelWrite := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
err = clientConn.Write(writeCtx, coderws.MessageText, []byte(
|
||||
`{"type":"response.create","model":"gpt-5.1","stream":false,"previous_response_id":"resp_prev_123"}`,
|
||||
))
|
||||
cancelWrite()
|
||||
require.NoError(t, err)
|
||||
|
||||
readCtx, cancelRead := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
_, _, err = clientConn.Read(readCtx)
|
||||
cancelRead()
|
||||
require.Error(t, err)
|
||||
var closeErr coderws.CloseError
|
||||
require.ErrorAs(t, err, &closeErr)
|
||||
require.Equal(t, coderws.StatusInternalError, closeErr.Code)
|
||||
require.Contains(t, strings.ToLower(closeErr.Reason), "failed to acquire user concurrency slot")
|
||||
}
|
||||
|
||||
func TestSetOpenAIClientTransportHTTP(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
|
||||
setOpenAIClientTransportHTTP(c)
|
||||
require.Equal(t, service.OpenAIClientTransportHTTP, service.GetOpenAIClientTransport(c))
|
||||
}
|
||||
|
||||
func TestSetOpenAIClientTransportWS(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
|
||||
setOpenAIClientTransportWS(c)
|
||||
require.Equal(t, service.OpenAIClientTransportWS, service.GetOpenAIClientTransport(c))
|
||||
}
|
||||
|
||||
// TestOpenAIHandler_GjsonExtraction 验证 gjson 从请求体中提取 model/stream 的正确性
|
||||
func TestOpenAIHandler_GjsonExtraction(t *testing.T) {
|
||||
tests := []struct {
|
||||
@@ -228,3 +637,41 @@ func TestOpenAIHandler_InstructionsInjection(t *testing.T) {
|
||||
require.NoError(t, setErr)
|
||||
require.True(t, gjson.ValidBytes(result))
|
||||
}
|
||||
|
||||
func newOpenAIHandlerForPreviousResponseIDValidation(t *testing.T, cache *concurrencyCacheMock) *OpenAIGatewayHandler {
|
||||
t.Helper()
|
||||
if cache == nil {
|
||||
cache = &concurrencyCacheMock{
|
||||
acquireUserSlotFn: func(ctx context.Context, userID int64, maxConcurrency int, requestID string) (bool, error) {
|
||||
return true, nil
|
||||
},
|
||||
acquireAccountSlotFn: func(ctx context.Context, accountID int64, maxConcurrency int, requestID string) (bool, error) {
|
||||
return true, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
return &OpenAIGatewayHandler{
|
||||
gatewayService: &service.OpenAIGatewayService{},
|
||||
billingCacheService: &service.BillingCacheService{},
|
||||
apiKeyService: &service.APIKeyService{},
|
||||
concurrencyHelper: NewConcurrencyHelper(service.NewConcurrencyService(cache), SSEPingFormatNone, time.Second),
|
||||
}
|
||||
}
|
||||
|
||||
func newOpenAIWSHandlerTestServer(t *testing.T, h *OpenAIGatewayHandler, subject middleware.AuthSubject) *httptest.Server {
|
||||
t.Helper()
|
||||
groupID := int64(2)
|
||||
apiKey := &service.APIKey{
|
||||
ID: 101,
|
||||
GroupID: &groupID,
|
||||
User: &service.User{ID: subject.UserID},
|
||||
}
|
||||
router := gin.New()
|
||||
router.Use(func(c *gin.Context) {
|
||||
c.Set(string(middleware.ContextKeyAPIKey), apiKey)
|
||||
c.Set(string(middleware.ContextKeyUser), subject)
|
||||
c.Next()
|
||||
})
|
||||
router.GET("/openai/v1/responses", h.ResponsesWebSocket)
|
||||
return httptest.NewServer(router)
|
||||
}
|
||||
|
||||
@@ -311,6 +311,35 @@ type opsCaptureWriter struct {
|
||||
buf bytes.Buffer
|
||||
}
|
||||
|
||||
const opsCaptureWriterLimit = 64 * 1024
|
||||
|
||||
var opsCaptureWriterPool = sync.Pool{
|
||||
New: func() any {
|
||||
return &opsCaptureWriter{limit: opsCaptureWriterLimit}
|
||||
},
|
||||
}
|
||||
|
||||
func acquireOpsCaptureWriter(rw gin.ResponseWriter) *opsCaptureWriter {
|
||||
w, ok := opsCaptureWriterPool.Get().(*opsCaptureWriter)
|
||||
if !ok || w == nil {
|
||||
w = &opsCaptureWriter{}
|
||||
}
|
||||
w.ResponseWriter = rw
|
||||
w.limit = opsCaptureWriterLimit
|
||||
w.buf.Reset()
|
||||
return w
|
||||
}
|
||||
|
||||
func releaseOpsCaptureWriter(w *opsCaptureWriter) {
|
||||
if w == nil {
|
||||
return
|
||||
}
|
||||
w.ResponseWriter = nil
|
||||
w.limit = opsCaptureWriterLimit
|
||||
w.buf.Reset()
|
||||
opsCaptureWriterPool.Put(w)
|
||||
}
|
||||
|
||||
func (w *opsCaptureWriter) Write(b []byte) (int, error) {
|
||||
if w.Status() >= 400 && w.limit > 0 && w.buf.Len() < w.limit {
|
||||
remaining := w.limit - w.buf.Len()
|
||||
@@ -342,7 +371,16 @@ func (w *opsCaptureWriter) WriteString(s string) (int, error) {
|
||||
// - Streaming errors after the response has started (SSE) may still need explicit logging.
|
||||
func OpsErrorLoggerMiddleware(ops *service.OpsService) gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
w := &opsCaptureWriter{ResponseWriter: c.Writer, limit: 64 * 1024}
|
||||
originalWriter := c.Writer
|
||||
w := acquireOpsCaptureWriter(originalWriter)
|
||||
defer func() {
|
||||
// Restore the original writer before returning so outer middlewares
|
||||
// don't observe a pooled wrapper that has been released.
|
||||
if c.Writer == w {
|
||||
c.Writer = originalWriter
|
||||
}
|
||||
releaseOpsCaptureWriter(w)
|
||||
}()
|
||||
c.Writer = w
|
||||
c.Next()
|
||||
|
||||
@@ -624,8 +662,10 @@ func OpsErrorLoggerMiddleware(ops *service.OpsService) gin.HandlerFunc {
|
||||
requestID = c.Writer.Header().Get("x-request-id")
|
||||
}
|
||||
|
||||
phase := classifyOpsPhase(parsed.ErrorType, parsed.Message, parsed.Code)
|
||||
isBusinessLimited := classifyOpsIsBusinessLimited(parsed.ErrorType, phase, parsed.Code, status, parsed.Message)
|
||||
normalizedType := normalizeOpsErrorType(parsed.ErrorType, parsed.Code)
|
||||
|
||||
phase := classifyOpsPhase(normalizedType, parsed.Message, parsed.Code)
|
||||
isBusinessLimited := classifyOpsIsBusinessLimited(normalizedType, phase, parsed.Code, status, parsed.Message)
|
||||
|
||||
errorOwner := classifyOpsErrorOwner(phase, parsed.Message)
|
||||
errorSource := classifyOpsErrorSource(phase, parsed.Message)
|
||||
@@ -647,8 +687,8 @@ func OpsErrorLoggerMiddleware(ops *service.OpsService) gin.HandlerFunc {
|
||||
UserAgent: c.GetHeader("User-Agent"),
|
||||
|
||||
ErrorPhase: phase,
|
||||
ErrorType: normalizeOpsErrorType(parsed.ErrorType, parsed.Code),
|
||||
Severity: classifyOpsSeverity(parsed.ErrorType, status),
|
||||
ErrorType: normalizedType,
|
||||
Severity: classifyOpsSeverity(normalizedType, status),
|
||||
StatusCode: status,
|
||||
IsBusinessLimited: isBusinessLimited,
|
||||
IsCountTokens: isCountTokensRequest(c),
|
||||
@@ -660,7 +700,7 @@ func OpsErrorLoggerMiddleware(ops *service.OpsService) gin.HandlerFunc {
|
||||
ErrorSource: errorSource,
|
||||
ErrorOwner: errorOwner,
|
||||
|
||||
IsRetryable: classifyOpsIsRetryable(parsed.ErrorType, status),
|
||||
IsRetryable: classifyOpsIsRetryable(normalizedType, status),
|
||||
RetryCount: 0,
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
@@ -901,8 +941,29 @@ func guessPlatformFromPath(path string) string {
|
||||
}
|
||||
}
|
||||
|
||||
// isKnownOpsErrorType returns true if t is a recognized error type used by the
|
||||
// ops classification pipeline. Upstream proxies sometimes return garbage values
|
||||
// (e.g. the Go-serialized literal "<nil>") which would pollute phase/severity
|
||||
// classification if accepted blindly.
|
||||
func isKnownOpsErrorType(t string) bool {
|
||||
switch t {
|
||||
case "invalid_request_error",
|
||||
"authentication_error",
|
||||
"rate_limit_error",
|
||||
"billing_error",
|
||||
"subscription_error",
|
||||
"upstream_error",
|
||||
"overloaded_error",
|
||||
"api_error",
|
||||
"not_found_error",
|
||||
"forbidden_error":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func normalizeOpsErrorType(errType string, code string) string {
|
||||
if errType != "" {
|
||||
if errType != "" && isKnownOpsErrorType(errType) {
|
||||
return errType
|
||||
}
|
||||
switch strings.TrimSpace(code) {
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -173,3 +174,103 @@ func TestEnqueueOpsErrorLog_EarlyReturnBranches(t *testing.T) {
|
||||
enqueueOpsErrorLog(ops, entry)
|
||||
require.Equal(t, int64(0), OpsErrorLogEnqueuedTotal())
|
||||
}
|
||||
|
||||
func TestOpsCaptureWriterPool_ResetOnRelease(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(rec)
|
||||
c.Request = httptest.NewRequest(http.MethodGet, "/test", nil)
|
||||
|
||||
writer := acquireOpsCaptureWriter(c.Writer)
|
||||
require.NotNil(t, writer)
|
||||
_, err := writer.buf.WriteString("temp-error-body")
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseOpsCaptureWriter(writer)
|
||||
|
||||
reused := acquireOpsCaptureWriter(c.Writer)
|
||||
defer releaseOpsCaptureWriter(reused)
|
||||
|
||||
require.Zero(t, reused.buf.Len(), "writer should be reset before reuse")
|
||||
}
|
||||
|
||||
func TestOpsErrorLoggerMiddleware_DoesNotBreakOuterMiddlewares(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
r := gin.New()
|
||||
r.Use(middleware2.Recovery())
|
||||
r.Use(middleware2.RequestLogger())
|
||||
r.Use(middleware2.Logger())
|
||||
r.GET("/v1/messages", OpsErrorLoggerMiddleware(nil), func(c *gin.Context) {
|
||||
c.Status(http.StatusNoContent)
|
||||
})
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
req := httptest.NewRequest(http.MethodGet, "/v1/messages", nil)
|
||||
|
||||
require.NotPanics(t, func() {
|
||||
r.ServeHTTP(rec, req)
|
||||
})
|
||||
require.Equal(t, http.StatusNoContent, rec.Code)
|
||||
}
|
||||
|
||||
func TestIsKnownOpsErrorType(t *testing.T) {
|
||||
known := []string{
|
||||
"invalid_request_error",
|
||||
"authentication_error",
|
||||
"rate_limit_error",
|
||||
"billing_error",
|
||||
"subscription_error",
|
||||
"upstream_error",
|
||||
"overloaded_error",
|
||||
"api_error",
|
||||
"not_found_error",
|
||||
"forbidden_error",
|
||||
}
|
||||
for _, k := range known {
|
||||
require.True(t, isKnownOpsErrorType(k), "expected known: %s", k)
|
||||
}
|
||||
|
||||
unknown := []string{"<nil>", "null", "", "random_error", "some_new_type", "<nil>\u003e"}
|
||||
for _, u := range unknown {
|
||||
require.False(t, isKnownOpsErrorType(u), "expected unknown: %q", u)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeOpsErrorType(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
errType string
|
||||
code string
|
||||
want string
|
||||
}{
|
||||
// Known types pass through.
|
||||
{"known invalid_request_error", "invalid_request_error", "", "invalid_request_error"},
|
||||
{"known rate_limit_error", "rate_limit_error", "", "rate_limit_error"},
|
||||
{"known upstream_error", "upstream_error", "", "upstream_error"},
|
||||
|
||||
// Unknown/garbage types are rejected and fall through to code-based or default.
|
||||
{"nil literal from upstream", "<nil>", "", "api_error"},
|
||||
{"null string", "null", "", "api_error"},
|
||||
{"random string", "something_weird", "", "api_error"},
|
||||
|
||||
// Unknown type but known code still maps correctly.
|
||||
{"nil with INSUFFICIENT_BALANCE code", "<nil>", "INSUFFICIENT_BALANCE", "billing_error"},
|
||||
{"nil with USAGE_LIMIT_EXCEEDED code", "<nil>", "USAGE_LIMIT_EXCEEDED", "subscription_error"},
|
||||
|
||||
// Empty type falls through to code-based mapping.
|
||||
{"empty type with balance code", "", "INSUFFICIENT_BALANCE", "billing_error"},
|
||||
{"empty type with subscription code", "", "SUBSCRIPTION_NOT_FOUND", "subscription_error"},
|
||||
{"empty type no code", "", "", "api_error"},
|
||||
|
||||
// Known type overrides conflicting code-based mapping.
|
||||
{"known type overrides conflicting code", "rate_limit_error", "INSUFFICIENT_BALANCE", "rate_limit_error"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := normalizeOpsErrorType(tt.errType, tt.code)
|
||||
require.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,25 +32,28 @@ func (h *SettingHandler) GetPublicSettings(c *gin.Context) {
|
||||
}
|
||||
|
||||
response.Success(c, dto.PublicSettings{
|
||||
RegistrationEnabled: settings.RegistrationEnabled,
|
||||
EmailVerifyEnabled: settings.EmailVerifyEnabled,
|
||||
PromoCodeEnabled: settings.PromoCodeEnabled,
|
||||
PasswordResetEnabled: settings.PasswordResetEnabled,
|
||||
InvitationCodeEnabled: settings.InvitationCodeEnabled,
|
||||
TotpEnabled: settings.TotpEnabled,
|
||||
TurnstileEnabled: settings.TurnstileEnabled,
|
||||
TurnstileSiteKey: settings.TurnstileSiteKey,
|
||||
SiteName: settings.SiteName,
|
||||
SiteLogo: settings.SiteLogo,
|
||||
SiteSubtitle: settings.SiteSubtitle,
|
||||
APIBaseURL: settings.APIBaseURL,
|
||||
ContactInfo: settings.ContactInfo,
|
||||
DocURL: settings.DocURL,
|
||||
HomeContent: settings.HomeContent,
|
||||
HideCcsImportButton: settings.HideCcsImportButton,
|
||||
PurchaseSubscriptionEnabled: settings.PurchaseSubscriptionEnabled,
|
||||
PurchaseSubscriptionURL: settings.PurchaseSubscriptionURL,
|
||||
LinuxDoOAuthEnabled: settings.LinuxDoOAuthEnabled,
|
||||
Version: h.version,
|
||||
RegistrationEnabled: settings.RegistrationEnabled,
|
||||
EmailVerifyEnabled: settings.EmailVerifyEnabled,
|
||||
RegistrationEmailSuffixWhitelist: settings.RegistrationEmailSuffixWhitelist,
|
||||
PromoCodeEnabled: settings.PromoCodeEnabled,
|
||||
PasswordResetEnabled: settings.PasswordResetEnabled,
|
||||
InvitationCodeEnabled: settings.InvitationCodeEnabled,
|
||||
TotpEnabled: settings.TotpEnabled,
|
||||
TurnstileEnabled: settings.TurnstileEnabled,
|
||||
TurnstileSiteKey: settings.TurnstileSiteKey,
|
||||
SiteName: settings.SiteName,
|
||||
SiteLogo: settings.SiteLogo,
|
||||
SiteSubtitle: settings.SiteSubtitle,
|
||||
APIBaseURL: settings.APIBaseURL,
|
||||
ContactInfo: settings.ContactInfo,
|
||||
DocURL: settings.DocURL,
|
||||
HomeContent: settings.HomeContent,
|
||||
HideCcsImportButton: settings.HideCcsImportButton,
|
||||
PurchaseSubscriptionEnabled: settings.PurchaseSubscriptionEnabled,
|
||||
PurchaseSubscriptionURL: settings.PurchaseSubscriptionURL,
|
||||
CustomMenuItems: dto.ParseUserVisibleMenuItems(settings.CustomMenuItems),
|
||||
LinuxDoOAuthEnabled: settings.LinuxDoOAuthEnabled,
|
||||
SoraClientEnabled: settings.SoraClientEnabled,
|
||||
Version: h.version,
|
||||
})
|
||||
}
|
||||
|
||||
979
backend/internal/handler/sora_client_handler.go
Normal file
979
backend/internal/handler/sora_client_handler.go
Normal file
@@ -0,0 +1,979 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/logger"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/response"
|
||||
middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
const (
|
||||
// 上游模型缓存 TTL
|
||||
modelCacheTTL = 1 * time.Hour // 上游获取成功
|
||||
modelCacheFailedTTL = 2 * time.Minute // 上游获取失败(降级到本地)
|
||||
)
|
||||
|
||||
// SoraClientHandler 处理 Sora 客户端 API 请求。
|
||||
type SoraClientHandler struct {
|
||||
genService *service.SoraGenerationService
|
||||
quotaService *service.SoraQuotaService
|
||||
s3Storage *service.SoraS3Storage
|
||||
soraGatewayService *service.SoraGatewayService
|
||||
gatewayService *service.GatewayService
|
||||
mediaStorage *service.SoraMediaStorage
|
||||
apiKeyService *service.APIKeyService
|
||||
|
||||
// 上游模型缓存
|
||||
modelCacheMu sync.RWMutex
|
||||
cachedFamilies []service.SoraModelFamily
|
||||
modelCacheTime time.Time
|
||||
modelCacheUpstream bool // 是否来自上游(决定 TTL)
|
||||
}
|
||||
|
||||
// NewSoraClientHandler 创建 Sora 客户端 Handler。
|
||||
func NewSoraClientHandler(
|
||||
genService *service.SoraGenerationService,
|
||||
quotaService *service.SoraQuotaService,
|
||||
s3Storage *service.SoraS3Storage,
|
||||
soraGatewayService *service.SoraGatewayService,
|
||||
gatewayService *service.GatewayService,
|
||||
mediaStorage *service.SoraMediaStorage,
|
||||
apiKeyService *service.APIKeyService,
|
||||
) *SoraClientHandler {
|
||||
return &SoraClientHandler{
|
||||
genService: genService,
|
||||
quotaService: quotaService,
|
||||
s3Storage: s3Storage,
|
||||
soraGatewayService: soraGatewayService,
|
||||
gatewayService: gatewayService,
|
||||
mediaStorage: mediaStorage,
|
||||
apiKeyService: apiKeyService,
|
||||
}
|
||||
}
|
||||
|
||||
// GenerateRequest 生成请求。
|
||||
type GenerateRequest struct {
|
||||
Model string `json:"model" binding:"required"`
|
||||
Prompt string `json:"prompt" binding:"required"`
|
||||
MediaType string `json:"media_type"` // video / image,默认 video
|
||||
VideoCount int `json:"video_count,omitempty"` // 视频数量(1-3)
|
||||
ImageInput string `json:"image_input,omitempty"` // 参考图(base64 或 URL)
|
||||
APIKeyID *int64 `json:"api_key_id,omitempty"` // 前端传递的 API Key ID
|
||||
}
|
||||
|
||||
// Generate 异步生成 — 创建 pending 记录后立即返回。
|
||||
// POST /api/v1/sora/generate
|
||||
func (h *SoraClientHandler) Generate(c *gin.Context) {
|
||||
userID := getUserIDFromContext(c)
|
||||
if userID == 0 {
|
||||
response.Error(c, http.StatusUnauthorized, "未登录")
|
||||
return
|
||||
}
|
||||
|
||||
var req GenerateRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
response.Error(c, http.StatusBadRequest, "参数错误: "+err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if req.MediaType == "" {
|
||||
req.MediaType = "video"
|
||||
}
|
||||
req.VideoCount = normalizeVideoCount(req.MediaType, req.VideoCount)
|
||||
|
||||
// 并发数检查(最多 3 个)
|
||||
activeCount, err := h.genService.CountActiveByUser(c.Request.Context(), userID)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
if activeCount >= 3 {
|
||||
response.Error(c, http.StatusTooManyRequests, "同时进行中的任务不能超过 3 个")
|
||||
return
|
||||
}
|
||||
|
||||
// 配额检查(粗略检查,实际文件大小在上传后才知道)
|
||||
if h.quotaService != nil {
|
||||
if err := h.quotaService.CheckQuota(c.Request.Context(), userID, 0); err != nil {
|
||||
var quotaErr *service.QuotaExceededError
|
||||
if errors.As(err, "aErr) {
|
||||
response.Error(c, http.StatusTooManyRequests, "存储配额已满,请删除不需要的作品释放空间")
|
||||
return
|
||||
}
|
||||
response.Error(c, http.StatusForbidden, err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// 获取 API Key ID 和 Group ID
|
||||
var apiKeyID *int64
|
||||
var groupID *int64
|
||||
|
||||
if req.APIKeyID != nil && h.apiKeyService != nil {
|
||||
// 前端传递了 api_key_id,需要校验
|
||||
apiKey, err := h.apiKeyService.GetByID(c.Request.Context(), *req.APIKeyID)
|
||||
if err != nil {
|
||||
response.Error(c, http.StatusBadRequest, "API Key 不存在")
|
||||
return
|
||||
}
|
||||
if apiKey.UserID != userID {
|
||||
response.Error(c, http.StatusForbidden, "API Key 不属于当前用户")
|
||||
return
|
||||
}
|
||||
if apiKey.Status != service.StatusAPIKeyActive {
|
||||
response.Error(c, http.StatusForbidden, "API Key 不可用")
|
||||
return
|
||||
}
|
||||
apiKeyID = &apiKey.ID
|
||||
groupID = apiKey.GroupID
|
||||
} else if id, ok := c.Get("api_key_id"); ok {
|
||||
// 兼容 API Key 认证路径(/sora/v1/ 网关路由)
|
||||
if v, ok := id.(int64); ok {
|
||||
apiKeyID = &v
|
||||
}
|
||||
}
|
||||
|
||||
gen, err := h.genService.CreatePending(c.Request.Context(), userID, apiKeyID, req.Model, req.Prompt, req.MediaType)
|
||||
if err != nil {
|
||||
if errors.Is(err, service.ErrSoraGenerationConcurrencyLimit) {
|
||||
response.Error(c, http.StatusTooManyRequests, "同时进行中的任务不能超过 3 个")
|
||||
return
|
||||
}
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
|
||||
// 启动后台异步生成 goroutine
|
||||
go h.processGeneration(gen.ID, userID, groupID, req.Model, req.Prompt, req.MediaType, req.ImageInput, req.VideoCount)
|
||||
|
||||
response.Success(c, gin.H{
|
||||
"generation_id": gen.ID,
|
||||
"status": gen.Status,
|
||||
})
|
||||
}
|
||||
|
||||
// processGeneration 后台异步执行 Sora 生成任务。
|
||||
// 流程:选择账号 → Forward → 提取媒体 URL → 三层降级存储(S3 → 本地 → 上游)→ 更新记录。
|
||||
func (h *SoraClientHandler) processGeneration(genID int64, userID int64, groupID *int64, model, prompt, mediaType, imageInput string, videoCount int) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
// 标记为生成中
|
||||
if err := h.genService.MarkGenerating(ctx, genID, ""); err != nil {
|
||||
if errors.Is(err, service.ErrSoraGenerationStateConflict) {
|
||||
logger.LegacyPrintf("handler.sora_client", "[SoraClient] 任务状态已变化,跳过生成 id=%d", genID)
|
||||
return
|
||||
}
|
||||
logger.LegacyPrintf("handler.sora_client", "[SoraClient] 标记生成中失败 id=%d err=%v", genID, err)
|
||||
return
|
||||
}
|
||||
|
||||
logger.LegacyPrintf(
|
||||
"handler.sora_client",
|
||||
"[SoraClient] 开始生成 id=%d user=%d group=%d model=%s media_type=%s video_count=%d has_image=%v prompt_len=%d",
|
||||
genID,
|
||||
userID,
|
||||
groupIDForLog(groupID),
|
||||
model,
|
||||
mediaType,
|
||||
videoCount,
|
||||
strings.TrimSpace(imageInput) != "",
|
||||
len(strings.TrimSpace(prompt)),
|
||||
)
|
||||
|
||||
// 有 groupID 时由分组决定平台,无 groupID 时用 ForcePlatform 兜底
|
||||
if groupID == nil {
|
||||
ctx = context.WithValue(ctx, ctxkey.ForcePlatform, service.PlatformSora)
|
||||
}
|
||||
|
||||
if h.gatewayService == nil {
|
||||
_ = h.genService.MarkFailed(ctx, genID, "内部错误: gatewayService 未初始化")
|
||||
return
|
||||
}
|
||||
|
||||
// 选择 Sora 账号
|
||||
account, err := h.gatewayService.SelectAccountForModel(ctx, groupID, "", model)
|
||||
if err != nil {
|
||||
logger.LegacyPrintf(
|
||||
"handler.sora_client",
|
||||
"[SoraClient] 选择账号失败 id=%d user=%d group=%d model=%s err=%v",
|
||||
genID,
|
||||
userID,
|
||||
groupIDForLog(groupID),
|
||||
model,
|
||||
err,
|
||||
)
|
||||
_ = h.genService.MarkFailed(ctx, genID, "选择账号失败: "+err.Error())
|
||||
return
|
||||
}
|
||||
logger.LegacyPrintf(
|
||||
"handler.sora_client",
|
||||
"[SoraClient] 选中账号 id=%d user=%d group=%d model=%s account_id=%d account_name=%s platform=%s type=%s",
|
||||
genID,
|
||||
userID,
|
||||
groupIDForLog(groupID),
|
||||
model,
|
||||
account.ID,
|
||||
account.Name,
|
||||
account.Platform,
|
||||
account.Type,
|
||||
)
|
||||
|
||||
// 构建 chat completions 请求体(非流式)
|
||||
body := buildAsyncRequestBody(model, prompt, imageInput, normalizeVideoCount(mediaType, videoCount))
|
||||
|
||||
if h.soraGatewayService == nil {
|
||||
_ = h.genService.MarkFailed(ctx, genID, "内部错误: soraGatewayService 未初始化")
|
||||
return
|
||||
}
|
||||
|
||||
// 创建 mock gin 上下文用于 Forward(捕获响应以提取媒体 URL)
|
||||
recorder := httptest.NewRecorder()
|
||||
mockGinCtx, _ := gin.CreateTestContext(recorder)
|
||||
mockGinCtx.Request, _ = http.NewRequest("POST", "/", nil)
|
||||
|
||||
// 调用 Forward(非流式)
|
||||
result, err := h.soraGatewayService.Forward(ctx, mockGinCtx, account, body, false)
|
||||
if err != nil {
|
||||
logger.LegacyPrintf(
|
||||
"handler.sora_client",
|
||||
"[SoraClient] Forward失败 id=%d account_id=%d model=%s status=%d body=%s err=%v",
|
||||
genID,
|
||||
account.ID,
|
||||
model,
|
||||
recorder.Code,
|
||||
trimForLog(recorder.Body.String(), 400),
|
||||
err,
|
||||
)
|
||||
// 检查是否已取消
|
||||
gen, _ := h.genService.GetByID(ctx, genID, userID)
|
||||
if gen != nil && gen.Status == service.SoraGenStatusCancelled {
|
||||
return
|
||||
}
|
||||
_ = h.genService.MarkFailed(ctx, genID, "生成失败: "+err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// 提取媒体 URL(优先从 ForwardResult,其次从响应体解析)
|
||||
mediaURL, mediaURLs := extractMediaURLsFromResult(result, recorder)
|
||||
if mediaURL == "" {
|
||||
logger.LegacyPrintf(
|
||||
"handler.sora_client",
|
||||
"[SoraClient] 未提取到媒体URL id=%d account_id=%d model=%s status=%d body=%s",
|
||||
genID,
|
||||
account.ID,
|
||||
model,
|
||||
recorder.Code,
|
||||
trimForLog(recorder.Body.String(), 400),
|
||||
)
|
||||
_ = h.genService.MarkFailed(ctx, genID, "未获取到媒体 URL")
|
||||
return
|
||||
}
|
||||
|
||||
// 检查任务是否已被取消
|
||||
gen, _ := h.genService.GetByID(ctx, genID, userID)
|
||||
if gen != nil && gen.Status == service.SoraGenStatusCancelled {
|
||||
logger.LegacyPrintf("handler.sora_client", "[SoraClient] 任务已取消,跳过存储 id=%d", genID)
|
||||
return
|
||||
}
|
||||
|
||||
// 三层降级存储:S3 → 本地 → 上游临时 URL
|
||||
storedURL, storedURLs, storageType, s3Keys, fileSize := h.storeMediaWithDegradation(ctx, userID, mediaType, mediaURL, mediaURLs)
|
||||
|
||||
usageAdded := false
|
||||
if (storageType == service.SoraStorageTypeS3 || storageType == service.SoraStorageTypeLocal) && fileSize > 0 && h.quotaService != nil {
|
||||
if err := h.quotaService.AddUsage(ctx, userID, fileSize); err != nil {
|
||||
h.cleanupStoredMedia(ctx, storageType, s3Keys, storedURLs)
|
||||
var quotaErr *service.QuotaExceededError
|
||||
if errors.As(err, "aErr) {
|
||||
_ = h.genService.MarkFailed(ctx, genID, "存储配额已满,请删除不需要的作品释放空间")
|
||||
return
|
||||
}
|
||||
_ = h.genService.MarkFailed(ctx, genID, "存储配额更新失败: "+err.Error())
|
||||
return
|
||||
}
|
||||
usageAdded = true
|
||||
}
|
||||
|
||||
// 存储完成后再做一次取消检查,防止取消被 completed 覆盖。
|
||||
gen, _ = h.genService.GetByID(ctx, genID, userID)
|
||||
if gen != nil && gen.Status == service.SoraGenStatusCancelled {
|
||||
logger.LegacyPrintf("handler.sora_client", "[SoraClient] 存储后检测到任务已取消,回滚存储 id=%d", genID)
|
||||
h.cleanupStoredMedia(ctx, storageType, s3Keys, storedURLs)
|
||||
if usageAdded && h.quotaService != nil {
|
||||
_ = h.quotaService.ReleaseUsage(ctx, userID, fileSize)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// 标记完成
|
||||
if err := h.genService.MarkCompleted(ctx, genID, storedURL, storedURLs, storageType, s3Keys, fileSize); err != nil {
|
||||
if errors.Is(err, service.ErrSoraGenerationStateConflict) {
|
||||
h.cleanupStoredMedia(ctx, storageType, s3Keys, storedURLs)
|
||||
if usageAdded && h.quotaService != nil {
|
||||
_ = h.quotaService.ReleaseUsage(ctx, userID, fileSize)
|
||||
}
|
||||
return
|
||||
}
|
||||
logger.LegacyPrintf("handler.sora_client", "[SoraClient] 标记完成失败 id=%d err=%v", genID, err)
|
||||
return
|
||||
}
|
||||
|
||||
logger.LegacyPrintf("handler.sora_client", "[SoraClient] 生成完成 id=%d storage=%s size=%d", genID, storageType, fileSize)
|
||||
}
|
||||
|
||||
// storeMediaWithDegradation 实现三层降级存储链:S3 → 本地 → 上游。
|
||||
func (h *SoraClientHandler) storeMediaWithDegradation(
|
||||
ctx context.Context, userID int64, mediaType string,
|
||||
mediaURL string, mediaURLs []string,
|
||||
) (storedURL string, storedURLs []string, storageType string, s3Keys []string, fileSize int64) {
|
||||
urls := mediaURLs
|
||||
if len(urls) == 0 {
|
||||
urls = []string{mediaURL}
|
||||
}
|
||||
|
||||
// 第一层:尝试 S3
|
||||
if h.s3Storage != nil && h.s3Storage.Enabled(ctx) {
|
||||
keys := make([]string, 0, len(urls))
|
||||
var totalSize int64
|
||||
allOK := true
|
||||
for _, u := range urls {
|
||||
key, size, err := h.s3Storage.UploadFromURL(ctx, userID, u)
|
||||
if err != nil {
|
||||
logger.LegacyPrintf("handler.sora_client", "[SoraClient] S3 上传失败 err=%v", err)
|
||||
allOK = false
|
||||
// 清理已上传的文件
|
||||
if len(keys) > 0 {
|
||||
_ = h.s3Storage.DeleteObjects(ctx, keys)
|
||||
}
|
||||
break
|
||||
}
|
||||
keys = append(keys, key)
|
||||
totalSize += size
|
||||
}
|
||||
if allOK && len(keys) > 0 {
|
||||
accessURLs := make([]string, 0, len(keys))
|
||||
for _, key := range keys {
|
||||
accessURL, err := h.s3Storage.GetAccessURL(ctx, key)
|
||||
if err != nil {
|
||||
logger.LegacyPrintf("handler.sora_client", "[SoraClient] 生成 S3 访问 URL 失败 err=%v", err)
|
||||
_ = h.s3Storage.DeleteObjects(ctx, keys)
|
||||
allOK = false
|
||||
break
|
||||
}
|
||||
accessURLs = append(accessURLs, accessURL)
|
||||
}
|
||||
if allOK && len(accessURLs) > 0 {
|
||||
return accessURLs[0], accessURLs, service.SoraStorageTypeS3, keys, totalSize
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 第二层:尝试本地存储
|
||||
if h.mediaStorage != nil && h.mediaStorage.Enabled() {
|
||||
storedPaths, err := h.mediaStorage.StoreFromURLs(ctx, mediaType, urls)
|
||||
if err == nil && len(storedPaths) > 0 {
|
||||
firstPath := storedPaths[0]
|
||||
totalSize, sizeErr := h.mediaStorage.TotalSizeByRelativePaths(storedPaths)
|
||||
if sizeErr != nil {
|
||||
logger.LegacyPrintf("handler.sora_client", "[SoraClient] 统计本地文件大小失败 err=%v", sizeErr)
|
||||
}
|
||||
return firstPath, storedPaths, service.SoraStorageTypeLocal, nil, totalSize
|
||||
}
|
||||
logger.LegacyPrintf("handler.sora_client", "[SoraClient] 本地存储失败 err=%v", err)
|
||||
}
|
||||
|
||||
// 第三层:保留上游临时 URL
|
||||
return urls[0], urls, service.SoraStorageTypeUpstream, nil, 0
|
||||
}
|
||||
|
||||
// buildAsyncRequestBody 构建 Sora 异步生成的 chat completions 请求体。
|
||||
func buildAsyncRequestBody(model, prompt, imageInput string, videoCount int) []byte {
|
||||
body := map[string]any{
|
||||
"model": model,
|
||||
"messages": []map[string]string{
|
||||
{"role": "user", "content": prompt},
|
||||
},
|
||||
"stream": false,
|
||||
}
|
||||
if imageInput != "" {
|
||||
body["image_input"] = imageInput
|
||||
}
|
||||
if videoCount > 1 {
|
||||
body["video_count"] = videoCount
|
||||
}
|
||||
b, _ := json.Marshal(body)
|
||||
return b
|
||||
}
|
||||
|
||||
func normalizeVideoCount(mediaType string, videoCount int) int {
|
||||
if mediaType != "video" {
|
||||
return 1
|
||||
}
|
||||
if videoCount <= 0 {
|
||||
return 1
|
||||
}
|
||||
if videoCount > 3 {
|
||||
return 3
|
||||
}
|
||||
return videoCount
|
||||
}
|
||||
|
||||
// extractMediaURLsFromResult 从 Forward 结果和响应体中提取媒体 URL。
|
||||
// OAuth 路径:ForwardResult.MediaURL 已填充。
|
||||
// APIKey 路径:需从响应体解析 media_url / media_urls 字段。
|
||||
func extractMediaURLsFromResult(result *service.ForwardResult, recorder *httptest.ResponseRecorder) (string, []string) {
|
||||
// 优先从 ForwardResult 获取(OAuth 路径)
|
||||
if result != nil && result.MediaURL != "" {
|
||||
// 尝试从响应体获取完整 URL 列表
|
||||
if urls := parseMediaURLsFromBody(recorder.Body.Bytes()); len(urls) > 0 {
|
||||
return urls[0], urls
|
||||
}
|
||||
return result.MediaURL, []string{result.MediaURL}
|
||||
}
|
||||
|
||||
// 从响应体解析(APIKey 路径)
|
||||
if urls := parseMediaURLsFromBody(recorder.Body.Bytes()); len(urls) > 0 {
|
||||
return urls[0], urls
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// parseMediaURLsFromBody 从 JSON 响应体中解析 media_url / media_urls 字段。
|
||||
func parseMediaURLsFromBody(body []byte) []string {
|
||||
if len(body) == 0 {
|
||||
return nil
|
||||
}
|
||||
var resp map[string]any
|
||||
if err := json.Unmarshal(body, &resp); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// 优先 media_urls(多图数组)
|
||||
if rawURLs, ok := resp["media_urls"]; ok {
|
||||
if arr, ok := rawURLs.([]any); ok && len(arr) > 0 {
|
||||
urls := make([]string, 0, len(arr))
|
||||
for _, item := range arr {
|
||||
if s, ok := item.(string); ok && s != "" {
|
||||
urls = append(urls, s)
|
||||
}
|
||||
}
|
||||
if len(urls) > 0 {
|
||||
return urls
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 回退到 media_url(单个 URL)
|
||||
if url, ok := resp["media_url"].(string); ok && url != "" {
|
||||
return []string{url}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListGenerations 查询生成记录列表。
|
||||
// GET /api/v1/sora/generations
|
||||
func (h *SoraClientHandler) ListGenerations(c *gin.Context) {
|
||||
userID := getUserIDFromContext(c)
|
||||
if userID == 0 {
|
||||
response.Error(c, http.StatusUnauthorized, "未登录")
|
||||
return
|
||||
}
|
||||
|
||||
page, _ := strconv.Atoi(c.DefaultQuery("page", "1"))
|
||||
pageSize, _ := strconv.Atoi(c.DefaultQuery("page_size", "20"))
|
||||
|
||||
params := service.SoraGenerationListParams{
|
||||
UserID: userID,
|
||||
Status: c.Query("status"),
|
||||
StorageType: c.Query("storage_type"),
|
||||
MediaType: c.Query("media_type"),
|
||||
Page: page,
|
||||
PageSize: pageSize,
|
||||
}
|
||||
|
||||
gens, total, err := h.genService.List(c.Request.Context(), params)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
|
||||
// 为 S3 记录动态生成预签名 URL
|
||||
for _, gen := range gens {
|
||||
_ = h.genService.ResolveMediaURLs(c.Request.Context(), gen)
|
||||
}
|
||||
|
||||
response.Success(c, gin.H{
|
||||
"data": gens,
|
||||
"total": total,
|
||||
"page": page,
|
||||
})
|
||||
}
|
||||
|
||||
// GetGeneration 查询生成记录详情。
|
||||
// GET /api/v1/sora/generations/:id
|
||||
func (h *SoraClientHandler) GetGeneration(c *gin.Context) {
|
||||
userID := getUserIDFromContext(c)
|
||||
if userID == 0 {
|
||||
response.Error(c, http.StatusUnauthorized, "未登录")
|
||||
return
|
||||
}
|
||||
|
||||
id, err := strconv.ParseInt(c.Param("id"), 10, 64)
|
||||
if err != nil {
|
||||
response.Error(c, http.StatusBadRequest, "无效的 ID")
|
||||
return
|
||||
}
|
||||
|
||||
gen, err := h.genService.GetByID(c.Request.Context(), id, userID)
|
||||
if err != nil {
|
||||
response.Error(c, http.StatusNotFound, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
_ = h.genService.ResolveMediaURLs(c.Request.Context(), gen)
|
||||
response.Success(c, gen)
|
||||
}
|
||||
|
||||
// DeleteGeneration 删除生成记录。
|
||||
// DELETE /api/v1/sora/generations/:id
|
||||
func (h *SoraClientHandler) DeleteGeneration(c *gin.Context) {
|
||||
userID := getUserIDFromContext(c)
|
||||
if userID == 0 {
|
||||
response.Error(c, http.StatusUnauthorized, "未登录")
|
||||
return
|
||||
}
|
||||
|
||||
id, err := strconv.ParseInt(c.Param("id"), 10, 64)
|
||||
if err != nil {
|
||||
response.Error(c, http.StatusBadRequest, "无效的 ID")
|
||||
return
|
||||
}
|
||||
|
||||
gen, err := h.genService.GetByID(c.Request.Context(), id, userID)
|
||||
if err != nil {
|
||||
response.Error(c, http.StatusNotFound, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// 先尝试清理本地文件,再删除记录(清理失败不阻塞删除)。
|
||||
if gen.StorageType == service.SoraStorageTypeLocal && h.mediaStorage != nil {
|
||||
paths := gen.MediaURLs
|
||||
if len(paths) == 0 && gen.MediaURL != "" {
|
||||
paths = []string{gen.MediaURL}
|
||||
}
|
||||
if err := h.mediaStorage.DeleteByRelativePaths(paths); err != nil {
|
||||
logger.LegacyPrintf("handler.sora_client", "[SoraClient] 删除本地文件失败 id=%d err=%v", id, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := h.genService.Delete(c.Request.Context(), id, userID); err != nil {
|
||||
response.Error(c, http.StatusNotFound, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
response.Success(c, gin.H{"message": "已删除"})
|
||||
}
|
||||
|
||||
// GetQuota 查询用户存储配额。
|
||||
// GET /api/v1/sora/quota
|
||||
func (h *SoraClientHandler) GetQuota(c *gin.Context) {
|
||||
userID := getUserIDFromContext(c)
|
||||
if userID == 0 {
|
||||
response.Error(c, http.StatusUnauthorized, "未登录")
|
||||
return
|
||||
}
|
||||
|
||||
if h.quotaService == nil {
|
||||
response.Success(c, service.QuotaInfo{QuotaSource: "unlimited", Source: "unlimited"})
|
||||
return
|
||||
}
|
||||
|
||||
quota, err := h.quotaService.GetQuota(c.Request.Context(), userID)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, quota)
|
||||
}
|
||||
|
||||
// CancelGeneration 取消生成任务。
|
||||
// POST /api/v1/sora/generations/:id/cancel
|
||||
func (h *SoraClientHandler) CancelGeneration(c *gin.Context) {
|
||||
userID := getUserIDFromContext(c)
|
||||
if userID == 0 {
|
||||
response.Error(c, http.StatusUnauthorized, "未登录")
|
||||
return
|
||||
}
|
||||
|
||||
id, err := strconv.ParseInt(c.Param("id"), 10, 64)
|
||||
if err != nil {
|
||||
response.Error(c, http.StatusBadRequest, "无效的 ID")
|
||||
return
|
||||
}
|
||||
|
||||
// 权限校验
|
||||
gen, err := h.genService.GetByID(c.Request.Context(), id, userID)
|
||||
if err != nil {
|
||||
response.Error(c, http.StatusNotFound, err.Error())
|
||||
return
|
||||
}
|
||||
_ = gen
|
||||
|
||||
if err := h.genService.MarkCancelled(c.Request.Context(), id); err != nil {
|
||||
if errors.Is(err, service.ErrSoraGenerationNotActive) {
|
||||
response.Error(c, http.StatusConflict, "任务已结束,无法取消")
|
||||
return
|
||||
}
|
||||
response.Error(c, http.StatusBadRequest, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
response.Success(c, gin.H{"message": "已取消"})
|
||||
}
|
||||
|
||||
// SaveToStorage 手动保存 upstream 记录到 S3。
|
||||
// POST /api/v1/sora/generations/:id/save
|
||||
func (h *SoraClientHandler) SaveToStorage(c *gin.Context) {
|
||||
userID := getUserIDFromContext(c)
|
||||
if userID == 0 {
|
||||
response.Error(c, http.StatusUnauthorized, "未登录")
|
||||
return
|
||||
}
|
||||
|
||||
id, err := strconv.ParseInt(c.Param("id"), 10, 64)
|
||||
if err != nil {
|
||||
response.Error(c, http.StatusBadRequest, "无效的 ID")
|
||||
return
|
||||
}
|
||||
|
||||
gen, err := h.genService.GetByID(c.Request.Context(), id, userID)
|
||||
if err != nil {
|
||||
response.Error(c, http.StatusNotFound, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if gen.StorageType != service.SoraStorageTypeUpstream {
|
||||
response.Error(c, http.StatusBadRequest, "仅 upstream 类型的记录可手动保存")
|
||||
return
|
||||
}
|
||||
if gen.MediaURL == "" {
|
||||
response.Error(c, http.StatusBadRequest, "媒体 URL 为空,可能已过期")
|
||||
return
|
||||
}
|
||||
|
||||
if h.s3Storage == nil || !h.s3Storage.Enabled(c.Request.Context()) {
|
||||
response.Error(c, http.StatusServiceUnavailable, "云存储未配置,请联系管理员")
|
||||
return
|
||||
}
|
||||
|
||||
sourceURLs := gen.MediaURLs
|
||||
if len(sourceURLs) == 0 && gen.MediaURL != "" {
|
||||
sourceURLs = []string{gen.MediaURL}
|
||||
}
|
||||
if len(sourceURLs) == 0 {
|
||||
response.Error(c, http.StatusBadRequest, "媒体 URL 为空,可能已过期")
|
||||
return
|
||||
}
|
||||
|
||||
uploadedKeys := make([]string, 0, len(sourceURLs))
|
||||
accessURLs := make([]string, 0, len(sourceURLs))
|
||||
var totalSize int64
|
||||
|
||||
for _, sourceURL := range sourceURLs {
|
||||
objectKey, fileSize, uploadErr := h.s3Storage.UploadFromURL(c.Request.Context(), userID, sourceURL)
|
||||
if uploadErr != nil {
|
||||
if len(uploadedKeys) > 0 {
|
||||
_ = h.s3Storage.DeleteObjects(c.Request.Context(), uploadedKeys)
|
||||
}
|
||||
var upstreamErr *service.UpstreamDownloadError
|
||||
if errors.As(uploadErr, &upstreamErr) && (upstreamErr.StatusCode == http.StatusForbidden || upstreamErr.StatusCode == http.StatusNotFound) {
|
||||
response.Error(c, http.StatusGone, "媒体链接已过期,无法保存")
|
||||
return
|
||||
}
|
||||
response.Error(c, http.StatusInternalServerError, "上传到 S3 失败: "+uploadErr.Error())
|
||||
return
|
||||
}
|
||||
accessURL, err := h.s3Storage.GetAccessURL(c.Request.Context(), objectKey)
|
||||
if err != nil {
|
||||
uploadedKeys = append(uploadedKeys, objectKey)
|
||||
_ = h.s3Storage.DeleteObjects(c.Request.Context(), uploadedKeys)
|
||||
response.Error(c, http.StatusInternalServerError, "生成 S3 访问链接失败: "+err.Error())
|
||||
return
|
||||
}
|
||||
uploadedKeys = append(uploadedKeys, objectKey)
|
||||
accessURLs = append(accessURLs, accessURL)
|
||||
totalSize += fileSize
|
||||
}
|
||||
|
||||
usageAdded := false
|
||||
if totalSize > 0 && h.quotaService != nil {
|
||||
if err := h.quotaService.AddUsage(c.Request.Context(), userID, totalSize); err != nil {
|
||||
_ = h.s3Storage.DeleteObjects(c.Request.Context(), uploadedKeys)
|
||||
var quotaErr *service.QuotaExceededError
|
||||
if errors.As(err, "aErr) {
|
||||
response.Error(c, http.StatusTooManyRequests, "存储配额已满,请删除不需要的作品释放空间")
|
||||
return
|
||||
}
|
||||
response.Error(c, http.StatusInternalServerError, "配额更新失败: "+err.Error())
|
||||
return
|
||||
}
|
||||
usageAdded = true
|
||||
}
|
||||
|
||||
if err := h.genService.UpdateStorageForCompleted(
|
||||
c.Request.Context(),
|
||||
id,
|
||||
accessURLs[0],
|
||||
accessURLs,
|
||||
service.SoraStorageTypeS3,
|
||||
uploadedKeys,
|
||||
totalSize,
|
||||
); err != nil {
|
||||
_ = h.s3Storage.DeleteObjects(c.Request.Context(), uploadedKeys)
|
||||
if usageAdded && h.quotaService != nil {
|
||||
_ = h.quotaService.ReleaseUsage(c.Request.Context(), userID, totalSize)
|
||||
}
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
|
||||
response.Success(c, gin.H{
|
||||
"message": "已保存到 S3",
|
||||
"object_key": uploadedKeys[0],
|
||||
"object_keys": uploadedKeys,
|
||||
})
|
||||
}
|
||||
|
||||
// GetStorageStatus 返回存储状态。
|
||||
// GET /api/v1/sora/storage-status
|
||||
func (h *SoraClientHandler) GetStorageStatus(c *gin.Context) {
|
||||
s3Enabled := h.s3Storage != nil && h.s3Storage.Enabled(c.Request.Context())
|
||||
s3Healthy := false
|
||||
if s3Enabled {
|
||||
s3Healthy = h.s3Storage.IsHealthy(c.Request.Context())
|
||||
}
|
||||
localEnabled := h.mediaStorage != nil && h.mediaStorage.Enabled()
|
||||
response.Success(c, gin.H{
|
||||
"s3_enabled": s3Enabled,
|
||||
"s3_healthy": s3Healthy,
|
||||
"local_enabled": localEnabled,
|
||||
})
|
||||
}
|
||||
|
||||
func (h *SoraClientHandler) cleanupStoredMedia(ctx context.Context, storageType string, s3Keys []string, localPaths []string) {
|
||||
switch storageType {
|
||||
case service.SoraStorageTypeS3:
|
||||
if h.s3Storage != nil && len(s3Keys) > 0 {
|
||||
if err := h.s3Storage.DeleteObjects(ctx, s3Keys); err != nil {
|
||||
logger.LegacyPrintf("handler.sora_client", "[SoraClient] 清理 S3 文件失败 keys=%v err=%v", s3Keys, err)
|
||||
}
|
||||
}
|
||||
case service.SoraStorageTypeLocal:
|
||||
if h.mediaStorage != nil && len(localPaths) > 0 {
|
||||
if err := h.mediaStorage.DeleteByRelativePaths(localPaths); err != nil {
|
||||
logger.LegacyPrintf("handler.sora_client", "[SoraClient] 清理本地文件失败 paths=%v err=%v", localPaths, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getUserIDFromContext 从 gin 上下文中提取用户 ID。
|
||||
func getUserIDFromContext(c *gin.Context) int64 {
|
||||
if subject, ok := middleware2.GetAuthSubjectFromContext(c); ok && subject.UserID > 0 {
|
||||
return subject.UserID
|
||||
}
|
||||
|
||||
if id, ok := c.Get("user_id"); ok {
|
||||
switch v := id.(type) {
|
||||
case int64:
|
||||
return v
|
||||
case float64:
|
||||
return int64(v)
|
||||
case string:
|
||||
n, _ := strconv.ParseInt(v, 10, 64)
|
||||
return n
|
||||
}
|
||||
}
|
||||
// 尝试从 JWT claims 获取
|
||||
if id, ok := c.Get("userID"); ok {
|
||||
if v, ok := id.(int64); ok {
|
||||
return v
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func groupIDForLog(groupID *int64) int64 {
|
||||
if groupID == nil {
|
||||
return 0
|
||||
}
|
||||
return *groupID
|
||||
}
|
||||
|
||||
func trimForLog(raw string, maxLen int) string {
|
||||
trimmed := strings.TrimSpace(raw)
|
||||
if maxLen <= 0 || len(trimmed) <= maxLen {
|
||||
return trimmed
|
||||
}
|
||||
return trimmed[:maxLen] + "...(truncated)"
|
||||
}
|
||||
|
||||
// GetModels 获取可用 Sora 模型家族列表。
|
||||
// 优先从上游 Sora API 同步模型列表,失败时降级到本地配置。
|
||||
// GET /api/v1/sora/models
|
||||
func (h *SoraClientHandler) GetModels(c *gin.Context) {
|
||||
families := h.getModelFamilies(c.Request.Context())
|
||||
response.Success(c, families)
|
||||
}
|
||||
|
||||
// getModelFamilies 获取模型家族列表(带缓存)。
|
||||
func (h *SoraClientHandler) getModelFamilies(ctx context.Context) []service.SoraModelFamily {
|
||||
// 读锁检查缓存
|
||||
h.modelCacheMu.RLock()
|
||||
ttl := modelCacheTTL
|
||||
if !h.modelCacheUpstream {
|
||||
ttl = modelCacheFailedTTL
|
||||
}
|
||||
if h.cachedFamilies != nil && time.Since(h.modelCacheTime) < ttl {
|
||||
families := h.cachedFamilies
|
||||
h.modelCacheMu.RUnlock()
|
||||
return families
|
||||
}
|
||||
h.modelCacheMu.RUnlock()
|
||||
|
||||
// 写锁更新缓存
|
||||
h.modelCacheMu.Lock()
|
||||
defer h.modelCacheMu.Unlock()
|
||||
|
||||
// double-check
|
||||
ttl = modelCacheTTL
|
||||
if !h.modelCacheUpstream {
|
||||
ttl = modelCacheFailedTTL
|
||||
}
|
||||
if h.cachedFamilies != nil && time.Since(h.modelCacheTime) < ttl {
|
||||
return h.cachedFamilies
|
||||
}
|
||||
|
||||
// 尝试从上游获取
|
||||
families, err := h.fetchUpstreamModels(ctx)
|
||||
if err != nil {
|
||||
logger.LegacyPrintf("handler.sora_client", "[SoraClient] 上游模型获取失败,使用本地配置: %v", err)
|
||||
families = service.BuildSoraModelFamilies()
|
||||
h.cachedFamilies = families
|
||||
h.modelCacheTime = time.Now()
|
||||
h.modelCacheUpstream = false
|
||||
return families
|
||||
}
|
||||
|
||||
logger.LegacyPrintf("handler.sora_client", "[SoraClient] 从上游同步到 %d 个模型家族", len(families))
|
||||
h.cachedFamilies = families
|
||||
h.modelCacheTime = time.Now()
|
||||
h.modelCacheUpstream = true
|
||||
return families
|
||||
}
|
||||
|
||||
// fetchUpstreamModels 从上游 Sora API 获取模型列表。
|
||||
func (h *SoraClientHandler) fetchUpstreamModels(ctx context.Context) ([]service.SoraModelFamily, error) {
|
||||
if h.gatewayService == nil {
|
||||
return nil, fmt.Errorf("gatewayService 未初始化")
|
||||
}
|
||||
|
||||
// 设置 ForcePlatform 用于 Sora 账号选择
|
||||
ctx = context.WithValue(ctx, ctxkey.ForcePlatform, service.PlatformSora)
|
||||
|
||||
// 选择一个 Sora 账号
|
||||
account, err := h.gatewayService.SelectAccountForModel(ctx, nil, "", "sora2-landscape-10s")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("选择 Sora 账号失败: %w", err)
|
||||
}
|
||||
|
||||
// 仅支持 API Key 类型账号
|
||||
if account.Type != service.AccountTypeAPIKey {
|
||||
return nil, fmt.Errorf("当前账号类型 %s 不支持模型同步", account.Type)
|
||||
}
|
||||
|
||||
apiKey := account.GetCredential("api_key")
|
||||
if apiKey == "" {
|
||||
return nil, fmt.Errorf("账号缺少 api_key")
|
||||
}
|
||||
|
||||
baseURL := account.GetBaseURL()
|
||||
if baseURL == "" {
|
||||
return nil, fmt.Errorf("账号缺少 base_url")
|
||||
}
|
||||
|
||||
// 构建上游模型列表请求
|
||||
modelsURL := strings.TrimRight(baseURL, "/") + "/sora/v1/models"
|
||||
|
||||
reqCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
req, err := http.NewRequestWithContext(reqCtx, http.MethodGet, modelsURL, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("创建请求失败: %w", err)
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||
|
||||
client := &http.Client{Timeout: 10 * time.Second}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("请求上游失败: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
_ = resp.Body.Close()
|
||||
}()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("上游返回状态码 %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(io.LimitReader(resp.Body, 1*1024*1024))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("读取响应失败: %w", err)
|
||||
}
|
||||
|
||||
// 解析 OpenAI 格式的模型列表
|
||||
var modelsResp struct {
|
||||
Data []struct {
|
||||
ID string `json:"id"`
|
||||
} `json:"data"`
|
||||
}
|
||||
if err := json.Unmarshal(body, &modelsResp); err != nil {
|
||||
return nil, fmt.Errorf("解析响应失败: %w", err)
|
||||
}
|
||||
|
||||
if len(modelsResp.Data) == 0 {
|
||||
return nil, fmt.Errorf("上游返回空模型列表")
|
||||
}
|
||||
|
||||
// 提取模型 ID
|
||||
modelIDs := make([]string, 0, len(modelsResp.Data))
|
||||
for _, m := range modelsResp.Data {
|
||||
modelIDs = append(modelIDs, m.ID)
|
||||
}
|
||||
|
||||
// 转换为模型家族
|
||||
families := service.BuildSoraModelFamiliesFromIDs(modelIDs)
|
||||
if len(families) == 0 {
|
||||
return nil, fmt.Errorf("未能从上游模型列表中识别出有效的模型家族")
|
||||
}
|
||||
|
||||
return families, nil
|
||||
}
|
||||
3153
backend/internal/handler/sora_client_handler_test.go
Normal file
3153
backend/internal/handler/sora_client_handler_test.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -7,7 +7,6 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
@@ -17,6 +16,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/Wei-Shaw/sub2api/internal/config"
|
||||
pkghttputil "github.com/Wei-Shaw/sub2api/internal/pkg/httputil"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/ip"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/logger"
|
||||
middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware"
|
||||
@@ -107,7 +107,7 @@ func (h *SoraGatewayHandler) ChatCompletions(c *gin.Context) {
|
||||
zap.Any("group_id", apiKey.GroupID),
|
||||
)
|
||||
|
||||
body, err := io.ReadAll(c.Request.Body)
|
||||
body, err := pkghttputil.ReadRequestBodyWithPrealloc(c.Request)
|
||||
if err != nil {
|
||||
if maxErr, ok := extractMaxBytesError(err); ok {
|
||||
h.errorResponse(c, http.StatusRequestEntityTooLarge, "invalid_request_error", buildBodyTooLargeMessage(maxErr.Limit))
|
||||
@@ -461,6 +461,14 @@ func (h *SoraGatewayHandler) submitUsageRecordTask(task service.UsageRecordTask)
|
||||
// 回退路径:worker 池未注入时同步执行,避免退回到无界 goroutine 模式。
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
defer func() {
|
||||
if recovered := recover(); recovered != nil {
|
||||
logger.L().With(
|
||||
zap.String("component", "handler.sora_gateway.chat_completions"),
|
||||
zap.Any("panic", recovered),
|
||||
).Error("sora.usage_record_task_panic_recovered")
|
||||
}
|
||||
}()
|
||||
task(ctx)
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user