From 3bddbb6afe4f08024415c2f0f8ef276b3d5451dc Mon Sep 17 00:00:00 2001 From: shaw Date: Mon, 9 Feb 2026 09:42:29 +0800 Subject: [PATCH 001/175] chore: update version --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index bc88be6e..5087e794 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.74.7 +0.1.76 \ No newline at end of file From 084e0adb349ab00908049c6c250fe3301e324ca2 Mon Sep 17 00:00:00 2001 From: erio Date: Mon, 9 Feb 2026 12:32:35 +0800 Subject: [PATCH 002/175] feat: squash merge all changes from develop-0.1.75 Squash of 124 commits from the legacy develop branch (develop-0.1.75) onto a clean v0.1.75 upstream base, to simplify future upstream merges. Key changes included: - Refactor scope-level rate limiting to model-level rate limiting - Antigravity gateway service improvements (smart retry, error policy) - Digest session store (flat cache replacing Trie-based store) - Client disconnect detection during streaming - Gemini messages compatibility service enhancements - Scheduler shuffle for thundering herd prevention - Session hash generation improvements - Frontend customizations (WeChat service, HomeView, etc.) - Ops monitoring scope cleanup --- .github/workflows/backend-ci.yml | 2 + .gitignore | 1 + AGENTS.md | 723 ++++++++++ CLAUDE.md | 723 ++++++++++ backend/cmd/server/VERSION | 2 +- backend/cmd/server/wire_gen.go | 3 +- backend/go.mod | 1 + backend/go.sum | 2 + backend/internal/handler/dto/types.go | 8 - backend/internal/handler/gateway_handler.go | 50 +- .../internal/handler/gemini_v1beta_handler.go | 36 +- backend/internal/repository/account_repo.go | 47 - backend/internal/repository/gateway_cache.go | 225 --- .../gateway_cache_integration_test.go | 151 -- ...teway_cache_model_load_integration_test.go | 234 ---- backend/internal/server/api_contract_test.go | 4 - backend/internal/service/account_service.go | 1 - .../service/account_service_delete_test.go | 4 - backend/internal/service/anthropic_session.go | 10 - .../service/anthropic_session_test.go | 37 - .../service/antigravity_gateway_service.go | 782 ++++++++--- .../antigravity_gateway_service_test.go | 464 ++++++- .../service/antigravity_quota_scope.go | 149 +- .../service/antigravity_rate_limit_test.go | 79 +- .../service/antigravity_smart_retry_test.go | 40 +- .../internal/service/digest_session_store.go | 69 + .../service/digest_session_store_test.go | 312 +++++ .../service/error_policy_integration_test.go | 366 +++++ backend/internal/service/error_policy_test.go | 289 ++++ .../service/gateway_multiplatform_test.go | 26 - backend/internal/service/gateway_request.go | 67 +- .../internal/service/gateway_request_test.go | 111 +- backend/internal/service/gateway_service.go | 395 ++---- .../service/gateway_service_benchmark_test.go | 2 +- .../service/gemini_error_policy_test.go | 384 ++++++ .../service/gemini_messages_compat_service.go | 130 +- .../service/gemini_multiplatform_test.go | 26 - backend/internal/service/gemini_session.go | 53 - .../gemini_session_integration_test.go | 95 +- .../internal/service/gemini_session_test.go | 92 -- .../service/generate_session_hash_test.go | 1213 +++++++++++++++++ .../internal/service/model_rate_limit_test.go | 152 +-- .../service/openai_gateway_service.go | 5 +- .../service/openai_gateway_service_test.go | 24 - .../service/ops_account_availability.go | 20 - .../internal/service/ops_realtime_models.go | 39 +- backend/internal/service/ops_retry.go | 5 +- backend/internal/service/ratelimit_service.go | 26 + .../service/scheduler_shuffle_test.go | 318 +++++ backend/internal/service/wire.go | 1 + deploy/docker-compose.yml | 41 +- frontend/public/wechat-qr.jpg | Bin 0 -> 151392 bytes frontend/src/api/admin/ops.ts | 3 - .../account/AccountStatusIndicator.vue | 30 - .../components/account/CreateAccountModal.vue | 5 +- .../components/common/WechatServiceButton.vue | 104 ++ frontend/src/components/layout/AppHeader.vue | 17 - frontend/src/i18n/locales/en.ts | 2 - frontend/src/i18n/locales/zh.ts | 2 - frontend/src/types/index.ts | 3 - frontend/src/views/HomeView.vue | 173 ++- .../ops/components/OpsConcurrencyCard.vue | 24 - stress_test_gemini_session.sh | 127 ++ 63 files changed, 6396 insertions(+), 2133 deletions(-) create mode 100644 AGENTS.md create mode 100644 CLAUDE.md delete mode 100644 backend/internal/repository/gateway_cache_model_load_integration_test.go create mode 100644 backend/internal/service/digest_session_store.go create mode 100644 backend/internal/service/digest_session_store_test.go create mode 100644 backend/internal/service/error_policy_integration_test.go create mode 100644 backend/internal/service/error_policy_test.go create mode 100644 backend/internal/service/gemini_error_policy_test.go create mode 100644 backend/internal/service/generate_session_hash_test.go create mode 100644 backend/internal/service/scheduler_shuffle_test.go create mode 100644 frontend/public/wechat-qr.jpg create mode 100644 frontend/src/components/common/WechatServiceButton.vue create mode 100644 stress_test_gemini_session.sh diff --git a/.github/workflows/backend-ci.yml b/.github/workflows/backend-ci.yml index 2596a18c..84575a96 100644 --- a/.github/workflows/backend-ci.yml +++ b/.github/workflows/backend-ci.yml @@ -17,6 +17,7 @@ jobs: go-version-file: backend/go.mod check-latest: false cache: true + cache-dependency-path: backend/go.sum - name: Verify Go version run: | go version | grep -q 'go1.25.7' @@ -36,6 +37,7 @@ jobs: go-version-file: backend/go.mod check-latest: false cache: true + cache-dependency-path: backend/go.sum - name: Verify Go version run: | go version | grep -q 'go1.25.7' diff --git a/.gitignore b/.gitignore index 48172982..2f2bfbdf 100644 --- a/.gitignore +++ b/.gitignore @@ -78,6 +78,7 @@ Desktop.ini # =================== tmp/ temp/ +logs/ *.tmp *.temp *.log diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 00000000..a7a3e34a --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,723 @@ +# Sub2API 开发说明 + +## 版本管理策略 + +### 版本号规则 + +我们在官方版本号后面添加自己的小版本号: + +- 官方版本:`v0.1.68` +- 我们的版本:`v0.1.68.1`、`v0.1.68.2`(递增) + +### 分支策略 + +| 分支 | 说明 | +|------|------| +| `main` | 我们的主分支,包含所有定制功能 | +| `release/custom-X.Y.Z` | 基于官方 `vX.Y.Z` 的发布分支 | +| `upstream/main` | 上游官方仓库 | + +--- + +## 发布流程(基于新官方版本) + +当官方发布新版本(如 `v0.1.69`)时: + +### 1. 同步上游并创建发布分支 + +```bash +# 获取上游最新代码 +git fetch upstream --tags + +# 基于官方标签创建新的发布分支 +git checkout v0.1.69 -b release/custom-0.1.69 + +# 合并我们的 main 分支(包含所有定制功能) +git merge main --no-edit + +# 解决可能的冲突后继续 +``` + +### 2. 更新版本号并打标签 + +```bash +# 更新版本号文件 +echo "0.1.69.1" > backend/cmd/server/VERSION +git add backend/cmd/server/VERSION +git commit -m "chore: bump version to 0.1.69.1" + +# 打上我们自己的标签 +git tag v0.1.69.1 + +# 推送分支和标签 +git push origin release/custom-0.1.69 +git push origin v0.1.69.1 +``` + +### 3. 更新 main 分支 + +```bash +# 将发布分支合并回 main,保持 main 包含最新定制功能 +git checkout main +git merge release/custom-0.1.69 +git push origin main +``` + +--- + +## 热修复发布(在现有版本上修复) + +当需要在当前版本上发布修复时: + +```bash +# 在当前发布分支上修复 +git checkout release/custom-0.1.68 +# ... 进行修复 ... +git commit -m "fix: 修复描述" + +# 递增小版本号 +echo "0.1.68.2" > backend/cmd/server/VERSION +git add backend/cmd/server/VERSION +git commit -m "chore: bump version to 0.1.68.2" + +# 打标签并推送 +git tag v0.1.68.2 +git push origin release/custom-0.1.68 +git push origin v0.1.68.2 + +# 同步修复到 main +git checkout main +git cherry-pick +git push origin main +``` + +--- + +## 服务器部署流程 + +### 前置条件 + +- 本地已配置 SSH 别名 `clicodeplus` 连接到服务器 +- 服务器部署目录:`/root/sub2api`(正式)、`/root/sub2api-beta`(测试) +- 服务器使用 Docker Compose 部署 + +### 部署环境说明 + +| 环境 | 目录 | 端口 | 数据库 | 容器名 | +|------|------|------|--------|--------| +| 正式 | `/root/sub2api` | 8080 | `sub2api` | `sub2api` | +| Beta | `/root/sub2api-beta` | 8084 | `beta` | `sub2api-beta` | + +### 外部数据库 + +正式和 Beta 环境**共用外部 PostgreSQL 数据库**(非容器内数据库),配置在 `.env` 文件中: +- `DATABASE_HOST`:外部数据库地址 +- `DATABASE_SSLMODE`:SSL 模式(通常为 `require`) +- `POSTGRES_USER` / `POSTGRES_DB`:用户名和数据库名 + +#### 数据库操作命令 + +通过 SSH 在服务器上执行数据库操作: + +```bash +# 正式环境 - 查询迁移记录 +ssh clicodeplus "source /root/sub2api/deploy/.env && PGPASSWORD=\"\$POSTGRES_PASSWORD\" psql -h \$DATABASE_HOST -U \$POSTGRES_USER -d \$POSTGRES_DB -c 'SELECT * FROM schema_migrations ORDER BY applied_at DESC LIMIT 5;'" + +# Beta 环境 - 查询迁移记录 +ssh clicodeplus "source /root/sub2api-beta/deploy/.env && PGPASSWORD=\"\$POSTGRES_PASSWORD\" psql -h \$DATABASE_HOST -U \$POSTGRES_USER -d \$POSTGRES_DB -c 'SELECT * FROM schema_migrations ORDER BY applied_at DESC LIMIT 5;'" + +# Beta 环境 - 清除指定迁移记录(重新执行迁移) +ssh clicodeplus "source /root/sub2api-beta/deploy/.env && PGPASSWORD=\"\$POSTGRES_PASSWORD\" psql -h \$DATABASE_HOST -U \$POSTGRES_USER -d \$POSTGRES_DB -c \"DELETE FROM schema_migrations WHERE filename LIKE '%049%';\"" + +# Beta 环境 - 更新账号数据 +ssh clicodeplus "source /root/sub2api-beta/deploy/.env && PGPASSWORD=\"\$POSTGRES_PASSWORD\" psql -h \$DATABASE_HOST -U \$POSTGRES_USER -d \$POSTGRES_DB -c \"UPDATE accounts SET credentials = credentials - 'model_mapping' WHERE platform = 'antigravity';\"" +``` + +> **注意**:使用 `source .env` 加载环境变量,避免在命令行中暴露密码。 + +### 部署步骤 + +**重要:每次部署都必须递增版本号!** + +#### 0. 递增版本号(本地操作) + +每次部署前,先在本地递增小版本号: + +```bash +# 查看当前版本号 +cat backend/cmd/server/VERSION +# 假设当前是 0.1.69.1 + +# 递增版本号 +echo "0.1.69.2" > backend/cmd/server/VERSION +git add backend/cmd/server/VERSION +git commit -m "chore: bump version to 0.1.69.2" +git push origin release/custom-0.1.69 +``` + +#### 1. 服务器拉取代码 + +```bash +ssh clicodeplus "cd /root/sub2api && git fetch fork && git checkout -B release/custom-0.1.69 fork/release/custom-0.1.69" +``` + +#### 2. 服务器构建镜像 + +```bash +ssh clicodeplus "cd /root/sub2api && docker build --no-cache -t sub2api:latest -f Dockerfile ." +``` + +#### 3. 更新镜像标签并重启服务 + +```bash +ssh clicodeplus "docker tag sub2api:latest weishaw/sub2api:latest" +ssh clicodeplus "cd /root/sub2api/deploy && docker compose up -d --force-recreate sub2api" +``` + +#### 4. 验证部署 + +```bash +# 查看启动日志 +ssh clicodeplus "docker logs sub2api --tail 20" + +# 确认版本号(必须与步骤 0 中设置的版本号一致) +ssh clicodeplus "cat /root/sub2api/backend/cmd/server/VERSION" + +# 检查容器状态 +ssh clicodeplus "docker ps | grep sub2api" +``` + +--- + +## Beta 并行部署(不影响现网) + +目标:在同一台服务器上并行启动一个 beta 实例(例如端口 `8084`),**严禁改动/重启**现网实例(默认目录 `/root/sub2api`)。 + +### 设计原则 + +- **新目录**:beta 使用独立目录,例如 `/root/sub2api-beta`。 +- **敏感信息只放 `.env`**:beta 的数据库密码、JWT_SECRET 等只写入 `/root/sub2api-beta/deploy/.env`,不要提交到 git。 +- **独立 Compose Project**:通过 `docker compose -p sub2api-beta ...` 启动,确保 network/volume 隔离。 +- **独立端口**:通过 `.env` 的 `SERVER_PORT` 映射宿主机端口(例如 `8084:8080`)。 + +### 前置检查 + +```bash +# 1) 确保 8084 未被占用 +ssh clicodeplus "ss -ltnp | grep :8084 || echo '8084 is free'" + +# 2) 确认现网容器还在(只读检查) +ssh clicodeplus "docker ps --format 'table {{.Names}}\t{{.Image}}\t{{.Ports}}' | sed -n '1,200p'" +``` + +### 首次部署步骤 + +```bash +# 0) 进入服务器 +ssh clicodeplus + +# 1) 克隆代码到新目录(示例使用你的 fork) +cd /root +git clone https://github.com/touwaeriol/sub2api.git sub2api-beta +cd /root/sub2api-beta +git checkout release/custom-0.1.71 + +# 2) 准备 beta 的 .env(敏感信息只写这里) +cd /root/sub2api-beta/deploy + +# 推荐:从现网 .env 复制,保证除 DB 名/用户/端口外完全一致 +cp -f /root/sub2api/deploy/.env ./.env + +# 仅修改以下三项(其他保持不变) +perl -pi -e 's/^SERVER_PORT=.*/SERVER_PORT=8084/' ./.env +perl -pi -e 's/^POSTGRES_USER=.*/POSTGRES_USER=beta/' ./.env +perl -pi -e 's/^POSTGRES_DB=.*/POSTGRES_DB=beta/' ./.env + +# 3) 写 compose override(避免与现网容器名冲突,镜像使用本地构建的 sub2api:beta) +cat > docker-compose.override.yml <<'YAML' +services: + sub2api: + image: sub2api:beta + container_name: sub2api-beta + redis: + container_name: sub2api-beta-redis +YAML + +# 4) 构建 beta 镜像(基于当前代码) +cd /root/sub2api-beta +docker build -t sub2api:beta -f Dockerfile . + +# 5) 启动 beta(独立 project,确保不影响现网) +cd /root/sub2api-beta/deploy +docker compose -p sub2api-beta --env-file .env -f docker-compose.yml -f docker-compose.override.yml up -d + +# 6) 验证 beta +curl -fsS http://127.0.0.1:8084/health +docker logs sub2api-beta --tail 50 +``` + +### 数据库配置约定(beta) + +- 数据库地址/SSL/密码:与现网一致(从现网 `.env` 复制即可)。 +- 仅修改: + - `POSTGRES_USER=beta` + - `POSTGRES_DB=beta` + +注意:需要数据库侧已存在 `beta` 用户与 `beta` 数据库,并授予权限;否则容器会启动失败并不断重启。 + +### 更新 beta(拉代码 + 仅重建 beta 容器) + +```bash +ssh clicodeplus "set -e; cd /root/sub2api-beta && git fetch --all --tags && git checkout -f release/custom-0.1.71 && git reset --hard origin/release/custom-0.1.71" +ssh clicodeplus "cd /root/sub2api-beta && docker build -t sub2api:beta -f Dockerfile ." +ssh clicodeplus "cd /root/sub2api-beta/deploy && docker compose -p sub2api-beta --env-file .env -f docker-compose.yml -f docker-compose.override.yml up -d --no-deps --force-recreate sub2api" +ssh clicodeplus "curl -fsS http://127.0.0.1:8084/health" +``` + +### 停止/回滚 beta(只影响 beta) + +```bash +ssh clicodeplus "cd /root/sub2api-beta/deploy && docker compose -p sub2api-beta -f docker-compose.yml -f docker-compose.override.yml down" +``` + +--- + +## 服务器首次部署 + +### 1. 克隆代码并配置远程仓库 + +```bash +ssh clicodeplus +cd /root +git clone https://github.com/Wei-Shaw/sub2api.git +cd sub2api + +# 添加 fork 仓库 +git remote add fork https://github.com/touwaeriol/sub2api.git +``` + +### 2. 切换到定制分支并配置环境 + +```bash +git fetch fork +git checkout -B release/custom-0.1.69 fork/release/custom-0.1.69 + +cd deploy +cp .env.example .env +vim .env # 配置 DATABASE_URL, REDIS_URL, JWT_SECRET 等 +``` + +### 3. 构建并启动 + +```bash +cd /root/sub2api +docker build -t sub2api:latest -f Dockerfile . +docker tag sub2api:latest weishaw/sub2api:latest +cd deploy && docker compose up -d +``` + +### 6. 启动服务 + +```bash +# 进入 deploy 目录 +cd deploy + +# 启动所有服务(PostgreSQL、Redis、sub2api) +docker compose up -d + +# 查看服务状态 +docker compose ps +``` + +### 7. 验证部署 + +```bash +# 查看应用日志 +docker logs sub2api --tail 50 + +# 检查健康状态 +curl http://localhost:8080/health + +# 确认版本号 +cat /root/sub2api/backend/cmd/server/VERSION +``` + +### 8. 常用运维命令 + +```bash +# 查看实时日志 +docker logs -f sub2api + +# 重启服务 +docker compose restart sub2api + +# 停止所有服务 +docker compose down + +# 停止并删除数据卷(慎用!会删除数据库数据) +docker compose down -v + +# 查看资源使用情况 +docker stats sub2api +``` + +--- + +## 定制功能说明 + +当前定制分支包含以下功能(相对于官方版本): + +### UI/UX 定制 + +| 功能 | 说明 | +|------|------| +| 首页优化 | 面向用户的价值主张设计 | +| 移除 GitHub 链接 | 用户菜单中不显示 GitHub 导航 | +| 微信客服按钮 | 首页悬浮微信客服入口 | +| 限流时间精确显示 | 账号限流时间显示精确到秒 | + +### Antigravity 平台增强 + +| 功能 | 说明 | +|------|------| +| Scope 级别限流 | 按配额域(claude/gemini_text/gemini_image)独立限流,避免整个账号被锁定 | +| 模型级别限流 | 按具体模型(如 claude-opus-4-5)独立限流,更精细的限流控制 | +| 限流预检查 | 调度时预检查账号/模型限流状态,避免选中已限流账号 | +| 秒级冷却时间 | 支持 429 响应的秒级精确冷却时间 | +| 身份注入优化 | 模型身份信息注入 + 静默边界防止身份泄露 | +| thoughtSignature 修复 | Gemini 3 函数调用 400 错误修复 | +| max_tokens 自动修正 | 自动修正 max_tokens <= budget_tokens 导致的 400 错误 | + +### 调度算法优化 + +| 功能 | 说明 | +|------|------| +| 分层过滤选择 | 调度算法从全排序改为分层过滤,提升性能 | +| LRU 随机选择 | 相同 LRU 时间时随机选择,避免账号集中 | +| 限流等待阈值配置化 | 可配置的限流等待阈值 | + +### 运维增强 + +| 功能 | 说明 | +|------|------| +| Scope 限流统计 | 运维界面展示 Antigravity 账号 scope 级别限流统计 | +| 账号限流状态显示 | 账号列表显示 scope 和模型级别限流状态 | +| 清除限流按钮增强 | 有 scope/模型限流时也显示清除限流按钮 | + +### 其他修复 + +| 功能 | 说明 | +|------|------| +| .gitattributes | 确保迁移文件使用 LF 换行符(解决 Windows 下 SQL 摘要不一致) | +| 部署配置优化 | DATABASE_HOST 和 DATABASE_SSLMODE 可通过 .env 配置 | + +--- + +## 注意事项 + +1. **前端必须打包进镜像**:使用 `docker build` 在服务器上构建,Dockerfile 会自动编译前端并 embed 到后端二进制中 + +2. **镜像标签**:docker-compose.yml 使用 `weishaw/sub2api:latest`,本地构建后需要 `docker tag` 覆盖 + +3. **Windows 换行符问题**:已通过 `.gitattributes` 解决,确保 `*.sql` 文件始终使用 LF + +4. **版本号管理**:每次发布必须更新 `backend/cmd/server/VERSION` 并打标签 + +5. **合并冲突**:合并上游新版本时,重点关注以下文件可能的冲突: + - `backend/internal/service/antigravity_gateway_service.go` + - `backend/internal/service/gateway_service.go` + - `backend/internal/pkg/antigravity/request_transformer.go` + +--- + +## Go 代码规范 + +### 1. 函数设计 + +#### 单一职责原则 +- **函数行数**:单个函数常规不应超过 **30 行**,超过时应拆分为子函数。若某段逻辑确实不可拆分(如复杂的状态机、协议解析等),可以例外,但需添加注释说明原因 +- **嵌套层级**:避免超过 3 层嵌套,使用 early return 减少嵌套 + +```go +// ❌ 不推荐:深层嵌套 +func process(data []Item) { + for _, item := range data { + if item.Valid { + if item.Type == "A" { + if item.Status == "active" { + // 业务逻辑... + } + } + } + } +} + +// ✅ 推荐:early return +func process(data []Item) { + for _, item := range data { + if !item.Valid { + continue + } + if item.Type != "A" { + continue + } + if item.Status != "active" { + continue + } + // 业务逻辑... + } +} +``` + +#### 复杂逻辑提取 +将复杂的条件判断或处理逻辑提取为独立函数: + +```go +// ❌ 不推荐:内联复杂逻辑 +if resp.StatusCode == 429 || resp.StatusCode == 503 { + // 80+ 行处理逻辑... +} + +// ✅ 推荐:提取为独立函数 +result := handleRateLimitResponse(resp, params) +switch result.action { +case actionRetry: + continue +case actionBreak: + return result.resp, nil +} +``` + +### 2. 重复代码消除 + +#### 配置获取模式 +将重复的配置获取逻辑提取为方法: + +```go +// ❌ 不推荐:重复代码 +logBody := s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBody +maxBytes := 2048 +if s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes > 0 { + maxBytes = s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes +} + +// ✅ 推荐:提取为方法 +func (s *Service) getLogConfig() (logBody bool, maxBytes int) { + maxBytes = 2048 + if s.settingService == nil || s.settingService.cfg == nil { + return false, maxBytes + } + cfg := s.settingService.cfg.Gateway + if cfg.LogUpstreamErrorBodyMaxBytes > 0 { + maxBytes = cfg.LogUpstreamErrorBodyMaxBytes + } + return cfg.LogUpstreamErrorBody, maxBytes +} +``` + +### 3. 常量管理 + +#### 避免魔法数字 +所有硬编码的数值都应定义为常量: + +```go +// ❌ 不推荐 +if retryDelay >= 10*time.Second { + resetAt := time.Now().Add(30 * time.Second) +} + +// ✅ 推荐 +const ( + rateLimitThreshold = 10 * time.Second + defaultRateLimitDuration = 30 * time.Second +) + +if retryDelay >= rateLimitThreshold { + resetAt := time.Now().Add(defaultRateLimitDuration) +} +``` + +#### 注释引用常量名 +在注释中引用常量名而非硬编码值: + +```go +// ❌ 不推荐 +// < 10s: 等待后重试 + +// ✅ 推荐 +// < rateLimitThreshold: 等待后重试 +``` + +### 4. 错误处理 + +#### 使用结构化日志 +优先使用 `slog` 进行结构化日志记录: + +```go +// ❌ 不推荐 +log.Printf("%s status=%d model_rate_limit_failed model=%s error=%v", prefix, statusCode, modelName, err) + +// ✅ 推荐 +slog.Error("failed to set model rate limit", + "prefix", prefix, + "status_code", statusCode, + "model", modelName, + "error", err, +) +``` + +### 5. 测试规范 + +#### Mock 函数签名同步 +修改函数签名时,必须同步更新所有测试中的 mock 函数: + +```go +// 如果修改了 handleError 签名 +handleError func(..., groupID int64, sessionHash string) *Result + +// 必须同步更新测试中的 mock +handleError: func(..., groupID int64, sessionHash string) *Result { + return nil +}, +``` + +#### 测试构建标签 +统一使用测试构建标签: + +```go +//go:build unit + +package service +``` + +### 6. 时间格式解析 + +#### 使用标准库 +优先使用 `time.ParseDuration`,支持所有 Go duration 格式: + +```go +// ❌ 不推荐:手动限制格式 +if !strings.HasSuffix(delay, "s") || strings.Contains(delay, "m") { + continue +} + +// ✅ 推荐:使用标准库 +dur, err := time.ParseDuration(delay) // 支持 "0.5s", "4m50s", "1h30m" 等 +``` + +### 7. 接口设计 + +#### 接口隔离原则 +定义最小化接口,只包含必需的方法: + +```go +// ❌ 不推荐:使用过于宽泛的接口 +type AccountRepository interface { + // 20+ 个方法... +} + +// ✅ 推荐:定义最小化接口 +type ModelRateLimiter interface { + SetModelRateLimit(ctx context.Context, id int64, modelKey string, resetAt time.Time) error +} +``` + +### 8. 并发安全 + +#### 共享数据保护 +访问可能被并发修改的数据时,确保线程安全: + +```go +// 如果 Account.Extra 可能被并发修改 +// 需要使用互斥锁或原子操作保护读取 +func (a *Account) GetRateLimitRemainingTime(model string) time.Duration { + a.mu.RLock() + defer a.mu.RUnlock() + // 读取 Extra 字段... +} +``` + +### 9. 命名规范 + +#### 一致的命名风格 +- 常量使用 camelCase:`rateLimitThreshold` +- 类型使用 PascalCase:`AntigravityQuotaScope` +- 同一概念使用统一命名:`Threshold` 或 `Limit`,不要混用 + +```go +// ❌ 不推荐:命名不一致 +antigravitySmartRetryMinWait // 使用 Min +antigravityRateLimitThreshold // 使用 Threshold + +// ✅ 推荐:统一风格 +antigravityMinRetryWait +antigravityRateLimitThreshold +``` + +### 10. 代码审查清单 + +在提交代码前,检查以下项目: + +- [ ] 函数是否超过 30 行?(不可拆分的逻辑除外,需注释说明) +- [ ] 嵌套是否超过 3 层? +- [ ] 是否有重复代码可以提取? +- [ ] 是否使用了魔法数字? +- [ ] Mock 函数签名是否与实际函数一致? +- [ ] 测试是否覆盖了新增逻辑? +- [ ] 日志是否包含足够的上下文信息? +- [ ] 是否考虑了并发安全? + +--- + +## CI 检查与发布门禁 + +### GitHub Actions 检查项 + +本项目有 4 个 CI 任务,**任何代码推送或发布前都必须全部通过**: + +| Workflow | Job | 说明 | 本地验证命令 | +|----------|-----|------|-------------| +| CI | `test` | 单元测试 + 集成测试 | `cd backend && make test-unit && make test-integration` | +| CI | `golangci-lint` | Go 代码静态检查(golangci-lint v2.7) | `cd backend && golangci-lint run --timeout=5m` | +| Security Scan | `backend-security` | govulncheck + gosec 安全扫描 | `cd backend && govulncheck ./... && gosec -severity high -confidence high ./...` | +| Security Scan | `frontend-security` | pnpm audit 前端依赖安全检查 | `cd frontend && pnpm audit --prod --audit-level=high` | + +### 向上游提交 PR + +PR 目标是上游官方仓库,**只包含通用功能改动**(bug fix、新功能、性能优化等)。 + +**以下文件禁止出现在 PR 中**(属于我们 fork 的定制化内容): +- `CLAUDE.md`、`AGENTS.md` — 我们的开发文档 +- `backend/cmd/server/VERSION` — 我们的版本号文件 +- UI 定制改动(GitHub 链接移除、微信客服按钮、首页定制等) +- 部署配置(`deploy/` 目录下的定制修改) + +**PR 流程**: +1. 从 `develop` 创建功能分支,只包含要提交给上游的改动 +2. 推送分支后,**等待 4 个 CI job 全部通过** +3. 确认通过后再创建 PR +4. 使用 `gh run list --repo touwaeriol/sub2api --branch ` 检查状态 + +### 自有分支推送(develop / main) + +推送到我们自己的 `develop` 或 `main` 分支时,包含所有改动(定制化 + 通用功能)。 + +**推送流程**: +1. 本地运行 `cd backend && make test-unit` 确保单元测试通过 +2. 本地运行 `cd backend && gofmt -l ./...` 确保格式正确 +3. 推送后确认 CI 和 Security Scan 两个 workflow 的 4 个 job 全部绿色 ✅ +4. 任何 job 失败必须立即修复,**禁止在 CI 未通过的状态下继续后续操作** + +### 发布版本 + +1. 确保 `main` 分支最新提交的 4 个 CI job 全部通过 +2. 递增 `backend/cmd/server/VERSION`,提交并推送 +3. 打 tag 推送后,确认 tag 触发的 3 个 workflow(CI、Security Scan、Release)全部通过 +4. **Release workflow 失败时禁止部署** — 必须先修复问题,删除旧 tag,重新打 tag +5. 使用 `gh run list --repo touwaeriol/sub2api --limit 10` 确认状态 + +### 常见 CI 失败原因及修复 +- **gofmt**:struct 字段对齐不一致 → 运行 `gofmt -w ` 修复 +- **golangci-lint**:未使用的变量/导入 → 删除或使用 `_` 忽略 +- **test 失败**:mock 函数签名不一致 → 同步更新 mock +- **gosec**:安全漏洞 → 根据提示修复或添加例外 diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 00000000..a7a3e34a --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,723 @@ +# Sub2API 开发说明 + +## 版本管理策略 + +### 版本号规则 + +我们在官方版本号后面添加自己的小版本号: + +- 官方版本:`v0.1.68` +- 我们的版本:`v0.1.68.1`、`v0.1.68.2`(递增) + +### 分支策略 + +| 分支 | 说明 | +|------|------| +| `main` | 我们的主分支,包含所有定制功能 | +| `release/custom-X.Y.Z` | 基于官方 `vX.Y.Z` 的发布分支 | +| `upstream/main` | 上游官方仓库 | + +--- + +## 发布流程(基于新官方版本) + +当官方发布新版本(如 `v0.1.69`)时: + +### 1. 同步上游并创建发布分支 + +```bash +# 获取上游最新代码 +git fetch upstream --tags + +# 基于官方标签创建新的发布分支 +git checkout v0.1.69 -b release/custom-0.1.69 + +# 合并我们的 main 分支(包含所有定制功能) +git merge main --no-edit + +# 解决可能的冲突后继续 +``` + +### 2. 更新版本号并打标签 + +```bash +# 更新版本号文件 +echo "0.1.69.1" > backend/cmd/server/VERSION +git add backend/cmd/server/VERSION +git commit -m "chore: bump version to 0.1.69.1" + +# 打上我们自己的标签 +git tag v0.1.69.1 + +# 推送分支和标签 +git push origin release/custom-0.1.69 +git push origin v0.1.69.1 +``` + +### 3. 更新 main 分支 + +```bash +# 将发布分支合并回 main,保持 main 包含最新定制功能 +git checkout main +git merge release/custom-0.1.69 +git push origin main +``` + +--- + +## 热修复发布(在现有版本上修复) + +当需要在当前版本上发布修复时: + +```bash +# 在当前发布分支上修复 +git checkout release/custom-0.1.68 +# ... 进行修复 ... +git commit -m "fix: 修复描述" + +# 递增小版本号 +echo "0.1.68.2" > backend/cmd/server/VERSION +git add backend/cmd/server/VERSION +git commit -m "chore: bump version to 0.1.68.2" + +# 打标签并推送 +git tag v0.1.68.2 +git push origin release/custom-0.1.68 +git push origin v0.1.68.2 + +# 同步修复到 main +git checkout main +git cherry-pick +git push origin main +``` + +--- + +## 服务器部署流程 + +### 前置条件 + +- 本地已配置 SSH 别名 `clicodeplus` 连接到服务器 +- 服务器部署目录:`/root/sub2api`(正式)、`/root/sub2api-beta`(测试) +- 服务器使用 Docker Compose 部署 + +### 部署环境说明 + +| 环境 | 目录 | 端口 | 数据库 | 容器名 | +|------|------|------|--------|--------| +| 正式 | `/root/sub2api` | 8080 | `sub2api` | `sub2api` | +| Beta | `/root/sub2api-beta` | 8084 | `beta` | `sub2api-beta` | + +### 外部数据库 + +正式和 Beta 环境**共用外部 PostgreSQL 数据库**(非容器内数据库),配置在 `.env` 文件中: +- `DATABASE_HOST`:外部数据库地址 +- `DATABASE_SSLMODE`:SSL 模式(通常为 `require`) +- `POSTGRES_USER` / `POSTGRES_DB`:用户名和数据库名 + +#### 数据库操作命令 + +通过 SSH 在服务器上执行数据库操作: + +```bash +# 正式环境 - 查询迁移记录 +ssh clicodeplus "source /root/sub2api/deploy/.env && PGPASSWORD=\"\$POSTGRES_PASSWORD\" psql -h \$DATABASE_HOST -U \$POSTGRES_USER -d \$POSTGRES_DB -c 'SELECT * FROM schema_migrations ORDER BY applied_at DESC LIMIT 5;'" + +# Beta 环境 - 查询迁移记录 +ssh clicodeplus "source /root/sub2api-beta/deploy/.env && PGPASSWORD=\"\$POSTGRES_PASSWORD\" psql -h \$DATABASE_HOST -U \$POSTGRES_USER -d \$POSTGRES_DB -c 'SELECT * FROM schema_migrations ORDER BY applied_at DESC LIMIT 5;'" + +# Beta 环境 - 清除指定迁移记录(重新执行迁移) +ssh clicodeplus "source /root/sub2api-beta/deploy/.env && PGPASSWORD=\"\$POSTGRES_PASSWORD\" psql -h \$DATABASE_HOST -U \$POSTGRES_USER -d \$POSTGRES_DB -c \"DELETE FROM schema_migrations WHERE filename LIKE '%049%';\"" + +# Beta 环境 - 更新账号数据 +ssh clicodeplus "source /root/sub2api-beta/deploy/.env && PGPASSWORD=\"\$POSTGRES_PASSWORD\" psql -h \$DATABASE_HOST -U \$POSTGRES_USER -d \$POSTGRES_DB -c \"UPDATE accounts SET credentials = credentials - 'model_mapping' WHERE platform = 'antigravity';\"" +``` + +> **注意**:使用 `source .env` 加载环境变量,避免在命令行中暴露密码。 + +### 部署步骤 + +**重要:每次部署都必须递增版本号!** + +#### 0. 递增版本号(本地操作) + +每次部署前,先在本地递增小版本号: + +```bash +# 查看当前版本号 +cat backend/cmd/server/VERSION +# 假设当前是 0.1.69.1 + +# 递增版本号 +echo "0.1.69.2" > backend/cmd/server/VERSION +git add backend/cmd/server/VERSION +git commit -m "chore: bump version to 0.1.69.2" +git push origin release/custom-0.1.69 +``` + +#### 1. 服务器拉取代码 + +```bash +ssh clicodeplus "cd /root/sub2api && git fetch fork && git checkout -B release/custom-0.1.69 fork/release/custom-0.1.69" +``` + +#### 2. 服务器构建镜像 + +```bash +ssh clicodeplus "cd /root/sub2api && docker build --no-cache -t sub2api:latest -f Dockerfile ." +``` + +#### 3. 更新镜像标签并重启服务 + +```bash +ssh clicodeplus "docker tag sub2api:latest weishaw/sub2api:latest" +ssh clicodeplus "cd /root/sub2api/deploy && docker compose up -d --force-recreate sub2api" +``` + +#### 4. 验证部署 + +```bash +# 查看启动日志 +ssh clicodeplus "docker logs sub2api --tail 20" + +# 确认版本号(必须与步骤 0 中设置的版本号一致) +ssh clicodeplus "cat /root/sub2api/backend/cmd/server/VERSION" + +# 检查容器状态 +ssh clicodeplus "docker ps | grep sub2api" +``` + +--- + +## Beta 并行部署(不影响现网) + +目标:在同一台服务器上并行启动一个 beta 实例(例如端口 `8084`),**严禁改动/重启**现网实例(默认目录 `/root/sub2api`)。 + +### 设计原则 + +- **新目录**:beta 使用独立目录,例如 `/root/sub2api-beta`。 +- **敏感信息只放 `.env`**:beta 的数据库密码、JWT_SECRET 等只写入 `/root/sub2api-beta/deploy/.env`,不要提交到 git。 +- **独立 Compose Project**:通过 `docker compose -p sub2api-beta ...` 启动,确保 network/volume 隔离。 +- **独立端口**:通过 `.env` 的 `SERVER_PORT` 映射宿主机端口(例如 `8084:8080`)。 + +### 前置检查 + +```bash +# 1) 确保 8084 未被占用 +ssh clicodeplus "ss -ltnp | grep :8084 || echo '8084 is free'" + +# 2) 确认现网容器还在(只读检查) +ssh clicodeplus "docker ps --format 'table {{.Names}}\t{{.Image}}\t{{.Ports}}' | sed -n '1,200p'" +``` + +### 首次部署步骤 + +```bash +# 0) 进入服务器 +ssh clicodeplus + +# 1) 克隆代码到新目录(示例使用你的 fork) +cd /root +git clone https://github.com/touwaeriol/sub2api.git sub2api-beta +cd /root/sub2api-beta +git checkout release/custom-0.1.71 + +# 2) 准备 beta 的 .env(敏感信息只写这里) +cd /root/sub2api-beta/deploy + +# 推荐:从现网 .env 复制,保证除 DB 名/用户/端口外完全一致 +cp -f /root/sub2api/deploy/.env ./.env + +# 仅修改以下三项(其他保持不变) +perl -pi -e 's/^SERVER_PORT=.*/SERVER_PORT=8084/' ./.env +perl -pi -e 's/^POSTGRES_USER=.*/POSTGRES_USER=beta/' ./.env +perl -pi -e 's/^POSTGRES_DB=.*/POSTGRES_DB=beta/' ./.env + +# 3) 写 compose override(避免与现网容器名冲突,镜像使用本地构建的 sub2api:beta) +cat > docker-compose.override.yml <<'YAML' +services: + sub2api: + image: sub2api:beta + container_name: sub2api-beta + redis: + container_name: sub2api-beta-redis +YAML + +# 4) 构建 beta 镜像(基于当前代码) +cd /root/sub2api-beta +docker build -t sub2api:beta -f Dockerfile . + +# 5) 启动 beta(独立 project,确保不影响现网) +cd /root/sub2api-beta/deploy +docker compose -p sub2api-beta --env-file .env -f docker-compose.yml -f docker-compose.override.yml up -d + +# 6) 验证 beta +curl -fsS http://127.0.0.1:8084/health +docker logs sub2api-beta --tail 50 +``` + +### 数据库配置约定(beta) + +- 数据库地址/SSL/密码:与现网一致(从现网 `.env` 复制即可)。 +- 仅修改: + - `POSTGRES_USER=beta` + - `POSTGRES_DB=beta` + +注意:需要数据库侧已存在 `beta` 用户与 `beta` 数据库,并授予权限;否则容器会启动失败并不断重启。 + +### 更新 beta(拉代码 + 仅重建 beta 容器) + +```bash +ssh clicodeplus "set -e; cd /root/sub2api-beta && git fetch --all --tags && git checkout -f release/custom-0.1.71 && git reset --hard origin/release/custom-0.1.71" +ssh clicodeplus "cd /root/sub2api-beta && docker build -t sub2api:beta -f Dockerfile ." +ssh clicodeplus "cd /root/sub2api-beta/deploy && docker compose -p sub2api-beta --env-file .env -f docker-compose.yml -f docker-compose.override.yml up -d --no-deps --force-recreate sub2api" +ssh clicodeplus "curl -fsS http://127.0.0.1:8084/health" +``` + +### 停止/回滚 beta(只影响 beta) + +```bash +ssh clicodeplus "cd /root/sub2api-beta/deploy && docker compose -p sub2api-beta -f docker-compose.yml -f docker-compose.override.yml down" +``` + +--- + +## 服务器首次部署 + +### 1. 克隆代码并配置远程仓库 + +```bash +ssh clicodeplus +cd /root +git clone https://github.com/Wei-Shaw/sub2api.git +cd sub2api + +# 添加 fork 仓库 +git remote add fork https://github.com/touwaeriol/sub2api.git +``` + +### 2. 切换到定制分支并配置环境 + +```bash +git fetch fork +git checkout -B release/custom-0.1.69 fork/release/custom-0.1.69 + +cd deploy +cp .env.example .env +vim .env # 配置 DATABASE_URL, REDIS_URL, JWT_SECRET 等 +``` + +### 3. 构建并启动 + +```bash +cd /root/sub2api +docker build -t sub2api:latest -f Dockerfile . +docker tag sub2api:latest weishaw/sub2api:latest +cd deploy && docker compose up -d +``` + +### 6. 启动服务 + +```bash +# 进入 deploy 目录 +cd deploy + +# 启动所有服务(PostgreSQL、Redis、sub2api) +docker compose up -d + +# 查看服务状态 +docker compose ps +``` + +### 7. 验证部署 + +```bash +# 查看应用日志 +docker logs sub2api --tail 50 + +# 检查健康状态 +curl http://localhost:8080/health + +# 确认版本号 +cat /root/sub2api/backend/cmd/server/VERSION +``` + +### 8. 常用运维命令 + +```bash +# 查看实时日志 +docker logs -f sub2api + +# 重启服务 +docker compose restart sub2api + +# 停止所有服务 +docker compose down + +# 停止并删除数据卷(慎用!会删除数据库数据) +docker compose down -v + +# 查看资源使用情况 +docker stats sub2api +``` + +--- + +## 定制功能说明 + +当前定制分支包含以下功能(相对于官方版本): + +### UI/UX 定制 + +| 功能 | 说明 | +|------|------| +| 首页优化 | 面向用户的价值主张设计 | +| 移除 GitHub 链接 | 用户菜单中不显示 GitHub 导航 | +| 微信客服按钮 | 首页悬浮微信客服入口 | +| 限流时间精确显示 | 账号限流时间显示精确到秒 | + +### Antigravity 平台增强 + +| 功能 | 说明 | +|------|------| +| Scope 级别限流 | 按配额域(claude/gemini_text/gemini_image)独立限流,避免整个账号被锁定 | +| 模型级别限流 | 按具体模型(如 claude-opus-4-5)独立限流,更精细的限流控制 | +| 限流预检查 | 调度时预检查账号/模型限流状态,避免选中已限流账号 | +| 秒级冷却时间 | 支持 429 响应的秒级精确冷却时间 | +| 身份注入优化 | 模型身份信息注入 + 静默边界防止身份泄露 | +| thoughtSignature 修复 | Gemini 3 函数调用 400 错误修复 | +| max_tokens 自动修正 | 自动修正 max_tokens <= budget_tokens 导致的 400 错误 | + +### 调度算法优化 + +| 功能 | 说明 | +|------|------| +| 分层过滤选择 | 调度算法从全排序改为分层过滤,提升性能 | +| LRU 随机选择 | 相同 LRU 时间时随机选择,避免账号集中 | +| 限流等待阈值配置化 | 可配置的限流等待阈值 | + +### 运维增强 + +| 功能 | 说明 | +|------|------| +| Scope 限流统计 | 运维界面展示 Antigravity 账号 scope 级别限流统计 | +| 账号限流状态显示 | 账号列表显示 scope 和模型级别限流状态 | +| 清除限流按钮增强 | 有 scope/模型限流时也显示清除限流按钮 | + +### 其他修复 + +| 功能 | 说明 | +|------|------| +| .gitattributes | 确保迁移文件使用 LF 换行符(解决 Windows 下 SQL 摘要不一致) | +| 部署配置优化 | DATABASE_HOST 和 DATABASE_SSLMODE 可通过 .env 配置 | + +--- + +## 注意事项 + +1. **前端必须打包进镜像**:使用 `docker build` 在服务器上构建,Dockerfile 会自动编译前端并 embed 到后端二进制中 + +2. **镜像标签**:docker-compose.yml 使用 `weishaw/sub2api:latest`,本地构建后需要 `docker tag` 覆盖 + +3. **Windows 换行符问题**:已通过 `.gitattributes` 解决,确保 `*.sql` 文件始终使用 LF + +4. **版本号管理**:每次发布必须更新 `backend/cmd/server/VERSION` 并打标签 + +5. **合并冲突**:合并上游新版本时,重点关注以下文件可能的冲突: + - `backend/internal/service/antigravity_gateway_service.go` + - `backend/internal/service/gateway_service.go` + - `backend/internal/pkg/antigravity/request_transformer.go` + +--- + +## Go 代码规范 + +### 1. 函数设计 + +#### 单一职责原则 +- **函数行数**:单个函数常规不应超过 **30 行**,超过时应拆分为子函数。若某段逻辑确实不可拆分(如复杂的状态机、协议解析等),可以例外,但需添加注释说明原因 +- **嵌套层级**:避免超过 3 层嵌套,使用 early return 减少嵌套 + +```go +// ❌ 不推荐:深层嵌套 +func process(data []Item) { + for _, item := range data { + if item.Valid { + if item.Type == "A" { + if item.Status == "active" { + // 业务逻辑... + } + } + } + } +} + +// ✅ 推荐:early return +func process(data []Item) { + for _, item := range data { + if !item.Valid { + continue + } + if item.Type != "A" { + continue + } + if item.Status != "active" { + continue + } + // 业务逻辑... + } +} +``` + +#### 复杂逻辑提取 +将复杂的条件判断或处理逻辑提取为独立函数: + +```go +// ❌ 不推荐:内联复杂逻辑 +if resp.StatusCode == 429 || resp.StatusCode == 503 { + // 80+ 行处理逻辑... +} + +// ✅ 推荐:提取为独立函数 +result := handleRateLimitResponse(resp, params) +switch result.action { +case actionRetry: + continue +case actionBreak: + return result.resp, nil +} +``` + +### 2. 重复代码消除 + +#### 配置获取模式 +将重复的配置获取逻辑提取为方法: + +```go +// ❌ 不推荐:重复代码 +logBody := s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBody +maxBytes := 2048 +if s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes > 0 { + maxBytes = s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes +} + +// ✅ 推荐:提取为方法 +func (s *Service) getLogConfig() (logBody bool, maxBytes int) { + maxBytes = 2048 + if s.settingService == nil || s.settingService.cfg == nil { + return false, maxBytes + } + cfg := s.settingService.cfg.Gateway + if cfg.LogUpstreamErrorBodyMaxBytes > 0 { + maxBytes = cfg.LogUpstreamErrorBodyMaxBytes + } + return cfg.LogUpstreamErrorBody, maxBytes +} +``` + +### 3. 常量管理 + +#### 避免魔法数字 +所有硬编码的数值都应定义为常量: + +```go +// ❌ 不推荐 +if retryDelay >= 10*time.Second { + resetAt := time.Now().Add(30 * time.Second) +} + +// ✅ 推荐 +const ( + rateLimitThreshold = 10 * time.Second + defaultRateLimitDuration = 30 * time.Second +) + +if retryDelay >= rateLimitThreshold { + resetAt := time.Now().Add(defaultRateLimitDuration) +} +``` + +#### 注释引用常量名 +在注释中引用常量名而非硬编码值: + +```go +// ❌ 不推荐 +// < 10s: 等待后重试 + +// ✅ 推荐 +// < rateLimitThreshold: 等待后重试 +``` + +### 4. 错误处理 + +#### 使用结构化日志 +优先使用 `slog` 进行结构化日志记录: + +```go +// ❌ 不推荐 +log.Printf("%s status=%d model_rate_limit_failed model=%s error=%v", prefix, statusCode, modelName, err) + +// ✅ 推荐 +slog.Error("failed to set model rate limit", + "prefix", prefix, + "status_code", statusCode, + "model", modelName, + "error", err, +) +``` + +### 5. 测试规范 + +#### Mock 函数签名同步 +修改函数签名时,必须同步更新所有测试中的 mock 函数: + +```go +// 如果修改了 handleError 签名 +handleError func(..., groupID int64, sessionHash string) *Result + +// 必须同步更新测试中的 mock +handleError: func(..., groupID int64, sessionHash string) *Result { + return nil +}, +``` + +#### 测试构建标签 +统一使用测试构建标签: + +```go +//go:build unit + +package service +``` + +### 6. 时间格式解析 + +#### 使用标准库 +优先使用 `time.ParseDuration`,支持所有 Go duration 格式: + +```go +// ❌ 不推荐:手动限制格式 +if !strings.HasSuffix(delay, "s") || strings.Contains(delay, "m") { + continue +} + +// ✅ 推荐:使用标准库 +dur, err := time.ParseDuration(delay) // 支持 "0.5s", "4m50s", "1h30m" 等 +``` + +### 7. 接口设计 + +#### 接口隔离原则 +定义最小化接口,只包含必需的方法: + +```go +// ❌ 不推荐:使用过于宽泛的接口 +type AccountRepository interface { + // 20+ 个方法... +} + +// ✅ 推荐:定义最小化接口 +type ModelRateLimiter interface { + SetModelRateLimit(ctx context.Context, id int64, modelKey string, resetAt time.Time) error +} +``` + +### 8. 并发安全 + +#### 共享数据保护 +访问可能被并发修改的数据时,确保线程安全: + +```go +// 如果 Account.Extra 可能被并发修改 +// 需要使用互斥锁或原子操作保护读取 +func (a *Account) GetRateLimitRemainingTime(model string) time.Duration { + a.mu.RLock() + defer a.mu.RUnlock() + // 读取 Extra 字段... +} +``` + +### 9. 命名规范 + +#### 一致的命名风格 +- 常量使用 camelCase:`rateLimitThreshold` +- 类型使用 PascalCase:`AntigravityQuotaScope` +- 同一概念使用统一命名:`Threshold` 或 `Limit`,不要混用 + +```go +// ❌ 不推荐:命名不一致 +antigravitySmartRetryMinWait // 使用 Min +antigravityRateLimitThreshold // 使用 Threshold + +// ✅ 推荐:统一风格 +antigravityMinRetryWait +antigravityRateLimitThreshold +``` + +### 10. 代码审查清单 + +在提交代码前,检查以下项目: + +- [ ] 函数是否超过 30 行?(不可拆分的逻辑除外,需注释说明) +- [ ] 嵌套是否超过 3 层? +- [ ] 是否有重复代码可以提取? +- [ ] 是否使用了魔法数字? +- [ ] Mock 函数签名是否与实际函数一致? +- [ ] 测试是否覆盖了新增逻辑? +- [ ] 日志是否包含足够的上下文信息? +- [ ] 是否考虑了并发安全? + +--- + +## CI 检查与发布门禁 + +### GitHub Actions 检查项 + +本项目有 4 个 CI 任务,**任何代码推送或发布前都必须全部通过**: + +| Workflow | Job | 说明 | 本地验证命令 | +|----------|-----|------|-------------| +| CI | `test` | 单元测试 + 集成测试 | `cd backend && make test-unit && make test-integration` | +| CI | `golangci-lint` | Go 代码静态检查(golangci-lint v2.7) | `cd backend && golangci-lint run --timeout=5m` | +| Security Scan | `backend-security` | govulncheck + gosec 安全扫描 | `cd backend && govulncheck ./... && gosec -severity high -confidence high ./...` | +| Security Scan | `frontend-security` | pnpm audit 前端依赖安全检查 | `cd frontend && pnpm audit --prod --audit-level=high` | + +### 向上游提交 PR + +PR 目标是上游官方仓库,**只包含通用功能改动**(bug fix、新功能、性能优化等)。 + +**以下文件禁止出现在 PR 中**(属于我们 fork 的定制化内容): +- `CLAUDE.md`、`AGENTS.md` — 我们的开发文档 +- `backend/cmd/server/VERSION` — 我们的版本号文件 +- UI 定制改动(GitHub 链接移除、微信客服按钮、首页定制等) +- 部署配置(`deploy/` 目录下的定制修改) + +**PR 流程**: +1. 从 `develop` 创建功能分支,只包含要提交给上游的改动 +2. 推送分支后,**等待 4 个 CI job 全部通过** +3. 确认通过后再创建 PR +4. 使用 `gh run list --repo touwaeriol/sub2api --branch ` 检查状态 + +### 自有分支推送(develop / main) + +推送到我们自己的 `develop` 或 `main` 分支时,包含所有改动(定制化 + 通用功能)。 + +**推送流程**: +1. 本地运行 `cd backend && make test-unit` 确保单元测试通过 +2. 本地运行 `cd backend && gofmt -l ./...` 确保格式正确 +3. 推送后确认 CI 和 Security Scan 两个 workflow 的 4 个 job 全部绿色 ✅ +4. 任何 job 失败必须立即修复,**禁止在 CI 未通过的状态下继续后续操作** + +### 发布版本 + +1. 确保 `main` 分支最新提交的 4 个 CI job 全部通过 +2. 递增 `backend/cmd/server/VERSION`,提交并推送 +3. 打 tag 推送后,确认 tag 触发的 3 个 workflow(CI、Security Scan、Release)全部通过 +4. **Release workflow 失败时禁止部署** — 必须先修复问题,删除旧 tag,重新打 tag +5. 使用 `gh run list --repo touwaeriol/sub2api --limit 10` 确认状态 + +### 常见 CI 失败原因及修复 +- **gofmt**:struct 字段对齐不一致 → 运行 `gofmt -w ` 修复 +- **golangci-lint**:未使用的变量/导入 → 删除或使用 `_` 忽略 +- **test 失败**:mock 函数签名不一致 → 同步更新 mock +- **gosec**:安全漏洞 → 根据提示修复或添加例外 diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index bc88be6e..b4fec824 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.74.7 +0.1.75.7 \ No newline at end of file diff --git a/backend/cmd/server/wire_gen.go b/backend/cmd/server/wire_gen.go index ef205dc8..5ccd797e 100644 --- a/backend/cmd/server/wire_gen.go +++ b/backend/cmd/server/wire_gen.go @@ -154,7 +154,8 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { identityService := service.NewIdentityService(identityCache) deferredService := service.ProvideDeferredService(accountRepository, timingWheelService) claudeTokenProvider := service.NewClaudeTokenProvider(accountRepository, geminiTokenCache, oAuthService) - gatewayService := service.NewGatewayService(accountRepository, groupRepository, usageLogRepository, userRepository, userSubscriptionRepository, userGroupRateRepository, gatewayCache, configConfig, schedulerSnapshotService, concurrencyService, billingService, rateLimitService, billingCacheService, identityService, httpUpstream, deferredService, claudeTokenProvider, sessionLimitCache) + digestSessionStore := service.NewDigestSessionStore() + gatewayService := service.NewGatewayService(accountRepository, groupRepository, usageLogRepository, userRepository, userSubscriptionRepository, userGroupRateRepository, gatewayCache, configConfig, schedulerSnapshotService, concurrencyService, billingService, rateLimitService, billingCacheService, identityService, httpUpstream, deferredService, claudeTokenProvider, sessionLimitCache, digestSessionStore) openAITokenProvider := service.NewOpenAITokenProvider(accountRepository, geminiTokenCache, openAIOAuthService) openAIGatewayService := service.NewOpenAIGatewayService(accountRepository, usageLogRepository, userRepository, userSubscriptionRepository, gatewayCache, configConfig, schedulerSnapshotService, concurrencyService, billingService, rateLimitService, billingCacheService, httpUpstream, deferredService, openAITokenProvider) geminiMessagesCompatService := service.NewGeminiMessagesCompatService(accountRepository, groupRepository, gatewayCache, schedulerSnapshotService, geminiTokenProvider, rateLimitService, httpUpstream, antigravityGatewayService, configConfig) diff --git a/backend/go.mod b/backend/go.mod index 6916057f..08d54b91 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -103,6 +103,7 @@ require ( github.com/ncruces/go-strftime v1.0.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.1 // indirect + github.com/patrickmn/go-cache v2.1.0+incompatible // indirect github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect diff --git a/backend/go.sum b/backend/go.sum index 171995c7..e9525a10 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -207,6 +207,8 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= diff --git a/backend/internal/handler/dto/types.go b/backend/internal/handler/dto/types.go index 71bb1ed4..c1b70878 100644 --- a/backend/internal/handler/dto/types.go +++ b/backend/internal/handler/dto/types.go @@ -2,11 +2,6 @@ package dto import "time" -type ScopeRateLimitInfo struct { - ResetAt time.Time `json:"reset_at"` - RemainingSec int64 `json:"remaining_sec"` -} - type User struct { ID int64 `json:"id"` Email string `json:"email"` @@ -126,9 +121,6 @@ type Account struct { RateLimitResetAt *time.Time `json:"rate_limit_reset_at"` OverloadUntil *time.Time `json:"overload_until"` - // Antigravity scope 级限流状态(从 extra 提取) - ScopeRateLimits map[string]ScopeRateLimitInfo `json:"scope_rate_limits,omitempty"` - TempUnschedulableUntil *time.Time `json:"temp_unschedulable_until"` TempUnschedulableReason string `json:"temp_unschedulable_reason"` diff --git a/backend/internal/handler/gateway_handler.go b/backend/internal/handler/gateway_handler.go index 255d3fab..6900fa55 100644 --- a/backend/internal/handler/gateway_handler.go +++ b/backend/internal/handler/gateway_handler.go @@ -13,6 +13,7 @@ import ( "time" "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/domain" "github.com/Wei-Shaw/sub2api/internal/pkg/antigravity" "github.com/Wei-Shaw/sub2api/internal/pkg/claude" "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" @@ -114,7 +115,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { setOpsRequestContext(c, "", false, body) - parsedReq, err := service.ParseGatewayRequest(body) + parsedReq, err := service.ParseGatewayRequest(body, domain.PlatformAnthropic) if err != nil { h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "Failed to parse request body") return @@ -203,6 +204,11 @@ func (h *GatewayHandler) Messages(c *gin.Context) { } // 计算粘性会话hash + parsedReq.SessionContext = &service.SessionContext{ + ClientIP: ip.GetClientIP(c), + UserAgent: c.GetHeader("User-Agent"), + APIKeyID: apiKey.ID, + } sessionHash := h.gatewayService.GenerateSessionHash(parsedReq) // 获取平台:优先使用强制平台(/antigravity 路由,中间件已设置 request.Context),否则使用分组平台 @@ -335,7 +341,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { if errors.As(err, &failoverErr) { failedAccountIDs[account.ID] = struct{}{} lastFailoverErr = failoverErr - if failoverErr.ForceCacheBilling { + if needForceCacheBilling(hasBoundSession, failoverErr) { forceCacheBilling = true } if switchCount >= maxAccountSwitches { @@ -344,6 +350,11 @@ func (h *GatewayHandler) Messages(c *gin.Context) { } switchCount++ log.Printf("Account %d: upstream error %d, switching account %d/%d", account.ID, failoverErr.StatusCode, switchCount, maxAccountSwitches) + if account.Platform == service.PlatformAntigravity { + if !sleepFailoverDelay(c.Request.Context(), switchCount) { + return + } + } continue } // 错误响应已在Forward中处理,这里只记录日志 @@ -530,7 +541,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { if errors.As(err, &failoverErr) { failedAccountIDs[account.ID] = struct{}{} lastFailoverErr = failoverErr - if failoverErr.ForceCacheBilling { + if needForceCacheBilling(hasBoundSession, failoverErr) { forceCacheBilling = true } if switchCount >= maxAccountSwitches { @@ -539,6 +550,11 @@ func (h *GatewayHandler) Messages(c *gin.Context) { } switchCount++ log.Printf("Account %d: upstream error %d, switching account %d/%d", account.ID, failoverErr.StatusCode, switchCount, maxAccountSwitches) + if account.Platform == service.PlatformAntigravity { + if !sleepFailoverDelay(c.Request.Context(), switchCount) { + return + } + } continue } // 错误响应已在Forward中处理,这里只记录日志 @@ -801,6 +817,27 @@ func (h *GatewayHandler) handleConcurrencyError(c *gin.Context, err error, slotT fmt.Sprintf("Concurrency limit exceeded for %s, please retry later", slotType), streamStarted) } +// needForceCacheBilling 判断 failover 时是否需要强制缓存计费 +// 粘性会话切换账号、或上游明确标记时,将 input_tokens 转为 cache_read 计费 +func needForceCacheBilling(hasBoundSession bool, failoverErr *service.UpstreamFailoverError) bool { + return hasBoundSession || (failoverErr != nil && failoverErr.ForceCacheBilling) +} + +// sleepFailoverDelay 账号切换线性递增延时:第1次0s、第2次1s、第3次2s… +// 返回 false 表示 context 已取消。 +func sleepFailoverDelay(ctx context.Context, switchCount int) bool { + delay := time.Duration(switchCount-1) * time.Second + if delay <= 0 { + return true + } + select { + case <-ctx.Done(): + return false + case <-time.After(delay): + return true + } +} + func (h *GatewayHandler) handleFailoverExhausted(c *gin.Context, failoverErr *service.UpstreamFailoverError, platform string, streamStarted bool) { statusCode := failoverErr.StatusCode responseBody := failoverErr.ResponseBody @@ -934,7 +971,7 @@ func (h *GatewayHandler) CountTokens(c *gin.Context) { setOpsRequestContext(c, "", false, body) - parsedReq, err := service.ParseGatewayRequest(body) + parsedReq, err := service.ParseGatewayRequest(body, domain.PlatformAnthropic) if err != nil { h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "Failed to parse request body") return @@ -962,6 +999,11 @@ func (h *GatewayHandler) CountTokens(c *gin.Context) { } // 计算粘性会话 hash + parsedReq.SessionContext = &service.SessionContext{ + ClientIP: ip.GetClientIP(c), + UserAgent: c.GetHeader("User-Agent"), + APIKeyID: apiKey.ID, + } sessionHash := h.gatewayService.GenerateSessionHash(parsedReq) // 选择支持该模型的账号 diff --git a/backend/internal/handler/gemini_v1beta_handler.go b/backend/internal/handler/gemini_v1beta_handler.go index 2b69be2e..d5149f22 100644 --- a/backend/internal/handler/gemini_v1beta_handler.go +++ b/backend/internal/handler/gemini_v1beta_handler.go @@ -14,6 +14,7 @@ import ( "strings" "time" + "github.com/Wei-Shaw/sub2api/internal/domain" "github.com/Wei-Shaw/sub2api/internal/pkg/antigravity" "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" "github.com/Wei-Shaw/sub2api/internal/pkg/gemini" @@ -30,13 +31,6 @@ import ( // 匹配格式: /Users/xxx/.gemini/tmp/[64位十六进制哈希] var geminiCLITmpDirRegex = regexp.MustCompile(`/\.gemini/tmp/([A-Fa-f0-9]{64})`) -func isGeminiCLIRequest(c *gin.Context, body []byte) bool { - if strings.TrimSpace(c.GetHeader("x-gemini-api-privileged-user-id")) != "" { - return true - } - return geminiCLITmpDirRegex.Match(body) -} - // GeminiV1BetaListModels proxies: // GET /v1beta/models func (h *GatewayHandler) GeminiV1BetaListModels(c *gin.Context) { @@ -239,7 +233,14 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { sessionHash := extractGeminiCLISessionHash(c, body) if sessionHash == "" { // Fallback: 使用通用的会话哈希生成逻辑(适用于其他客户端) - parsedReq, _ := service.ParseGatewayRequest(body) + parsedReq, _ := service.ParseGatewayRequest(body, domain.PlatformGemini) + if parsedReq != nil { + parsedReq.SessionContext = &service.SessionContext{ + ClientIP: ip.GetClientIP(c), + UserAgent: c.GetHeader("User-Agent"), + APIKeyID: apiKey.ID, + } + } sessionHash = h.gatewayService.GenerateSessionHash(parsedReq) } sessionKey := sessionHash @@ -258,6 +259,7 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { var geminiDigestChain string var geminiPrefixHash string var geminiSessionUUID string + var matchedDigestChain string useDigestFallback := sessionBoundAccountID == 0 if useDigestFallback { @@ -284,13 +286,14 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { ) // 查找会话 - foundUUID, foundAccountID, found := h.gatewayService.FindGeminiSession( + foundUUID, foundAccountID, foundMatchedChain, found := h.gatewayService.FindGeminiSession( c.Request.Context(), derefGroupID(apiKey.GroupID), geminiPrefixHash, geminiDigestChain, ) if found { + matchedDigestChain = foundMatchedChain sessionBoundAccountID = foundAccountID geminiSessionUUID = foundUUID log.Printf("[Gemini] Digest fallback matched: uuid=%s, accountID=%d, chain=%s", @@ -316,7 +319,6 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { // 判断是否真的绑定了粘性会话:有 sessionKey 且已经绑定到某个账号 hasBoundSession := sessionKey != "" && sessionBoundAccountID > 0 - isCLI := isGeminiCLIRequest(c, body) cleanedForUnknownBinding := false maxAccountSwitches := h.maxAccountSwitchesGemini @@ -344,10 +346,10 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { log.Printf("[Gemini] Sticky session account switched: %d -> %d, cleaning thoughtSignature", sessionBoundAccountID, account.ID) body = service.CleanGeminiNativeThoughtSignatures(body) sessionBoundAccountID = account.ID - } else if sessionKey != "" && sessionBoundAccountID == 0 && isCLI && !cleanedForUnknownBinding && bytes.Contains(body, []byte(`"thoughtSignature"`)) { - // 无缓存绑定但请求里已有 thoughtSignature:常见于缓存丢失/TTL 过期后,CLI 继续携带旧签名。 + } else if sessionKey != "" && sessionBoundAccountID == 0 && !cleanedForUnknownBinding && bytes.Contains(body, []byte(`"thoughtSignature"`)) { + // 无缓存绑定但请求里已有 thoughtSignature:常见于缓存丢失/TTL 过期后,客户端继续携带旧签名。 // 为避免第一次转发就 400,这里做一次确定性清理,让新账号重新生成签名链路。 - log.Printf("[Gemini] Sticky session binding missing for CLI request, cleaning thoughtSignature proactively") + log.Printf("[Gemini] Sticky session binding missing, cleaning thoughtSignature proactively") body = service.CleanGeminiNativeThoughtSignatures(body) cleanedForUnknownBinding = true sessionBoundAccountID = account.ID @@ -422,7 +424,7 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { var failoverErr *service.UpstreamFailoverError if errors.As(err, &failoverErr) { failedAccountIDs[account.ID] = struct{}{} - if failoverErr.ForceCacheBilling { + if needForceCacheBilling(hasBoundSession, failoverErr) { forceCacheBilling = true } if switchCount >= maxAccountSwitches { @@ -433,6 +435,11 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { lastFailoverErr = failoverErr switchCount++ log.Printf("Gemini account %d: upstream error %d, switching account %d/%d", account.ID, failoverErr.StatusCode, switchCount, maxAccountSwitches) + if account.Platform == service.PlatformAntigravity { + if !sleepFailoverDelay(c.Request.Context(), switchCount) { + return + } + } continue } // ForwardNative already wrote the response @@ -453,6 +460,7 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { geminiDigestChain, geminiSessionUUID, account.ID, + matchedDigestChain, ); err != nil { log.Printf("[Gemini] Failed to save digest session: %v", err) } diff --git a/backend/internal/repository/account_repo.go b/backend/internal/repository/account_repo.go index 11c206d8..7fb7d4ed 100644 --- a/backend/internal/repository/account_repo.go +++ b/backend/internal/repository/account_repo.go @@ -798,53 +798,6 @@ func (r *accountRepository) SetRateLimited(ctx context.Context, id int64, resetA return nil } -func (r *accountRepository) SetAntigravityQuotaScopeLimit(ctx context.Context, id int64, scope service.AntigravityQuotaScope, resetAt time.Time) error { - now := time.Now().UTC() - payload := map[string]string{ - "rate_limited_at": now.Format(time.RFC3339), - "rate_limit_reset_at": resetAt.UTC().Format(time.RFC3339), - } - raw, err := json.Marshal(payload) - if err != nil { - return err - } - - scopeKey := string(scope) - client := clientFromContext(ctx, r.client) - result, err := client.ExecContext( - ctx, - `UPDATE accounts SET - extra = jsonb_set( - jsonb_set(COALESCE(extra, '{}'::jsonb), '{antigravity_quota_scopes}'::text[], COALESCE(extra->'antigravity_quota_scopes', '{}'::jsonb), true), - ARRAY['antigravity_quota_scopes', $1]::text[], - $2::jsonb, - true - ), - updated_at = NOW(), - last_used_at = NOW() - WHERE id = $3 AND deleted_at IS NULL`, - scopeKey, - raw, - id, - ) - if err != nil { - return err - } - - affected, err := result.RowsAffected() - if err != nil { - return err - } - if affected == 0 { - return service.ErrAccountNotFound - } - - if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil { - log.Printf("[SchedulerOutbox] enqueue quota scope failed: account=%d err=%v", id, err) - } - return nil -} - func (r *accountRepository) SetModelRateLimit(ctx context.Context, id int64, scope string, resetAt time.Time) error { if scope == "" { return nil diff --git a/backend/internal/repository/gateway_cache.go b/backend/internal/repository/gateway_cache.go index 46ae0c16..58291b66 100644 --- a/backend/internal/repository/gateway_cache.go +++ b/backend/internal/repository/gateway_cache.go @@ -11,63 +11,6 @@ import ( const stickySessionPrefix = "sticky_session:" -// Gemini Trie Lua 脚本 -const ( - // geminiTrieFindScript 查找最长前缀匹配的 Lua 脚本 - // KEYS[1] = trie key - // ARGV[1] = digestChain (如 "u:a-m:b-u:c-m:d") - // ARGV[2] = TTL seconds (用于刷新) - // 返回: 最长匹配的 value (uuid:accountID) 或 nil - // 查找成功时自动刷新 TTL,防止活跃会话意外过期 - geminiTrieFindScript = ` -local chain = ARGV[1] -local ttl = tonumber(ARGV[2]) -local lastMatch = nil -local path = "" - -for part in string.gmatch(chain, "[^-]+") do - path = path == "" and part or path .. "-" .. part - local val = redis.call('HGET', KEYS[1], path) - if val and val ~= "" then - lastMatch = val - end -end - -if lastMatch then - redis.call('EXPIRE', KEYS[1], ttl) -end - -return lastMatch -` - - // geminiTrieSaveScript 保存会话到 Trie 的 Lua 脚本 - // KEYS[1] = trie key - // ARGV[1] = digestChain - // ARGV[2] = value (uuid:accountID) - // ARGV[3] = TTL seconds - geminiTrieSaveScript = ` -local chain = ARGV[1] -local value = ARGV[2] -local ttl = tonumber(ARGV[3]) -local path = "" - -for part in string.gmatch(chain, "[^-]+") do - path = path == "" and part or path .. "-" .. part -end -redis.call('HSET', KEYS[1], path, value) -redis.call('EXPIRE', KEYS[1], ttl) -return "OK" -` -) - -// 模型负载统计相关常量 -const ( - modelLoadKeyPrefix = "ag:model_load:" // 模型调用次数 key 前缀 - modelLastUsedKeyPrefix = "ag:model_last_used:" // 模型最后调度时间 key 前缀 - modelLoadTTL = 24 * time.Hour // 调用次数 TTL(24 小时无调用后清零) - modelLastUsedTTL = 24 * time.Hour // 最后调度时间 TTL -) - type gatewayCache struct { rdb *redis.Client } @@ -108,171 +51,3 @@ func (c *gatewayCache) DeleteSessionAccountID(ctx context.Context, groupID int64 key := buildSessionKey(groupID, sessionHash) return c.rdb.Del(ctx, key).Err() } - -// ============ Antigravity 模型负载统计方法 ============ - -// modelLoadKey 构建模型调用次数 key -// 格式: ag:model_load:{accountID}:{model} -func modelLoadKey(accountID int64, model string) string { - return fmt.Sprintf("%s%d:%s", modelLoadKeyPrefix, accountID, model) -} - -// modelLastUsedKey 构建模型最后调度时间 key -// 格式: ag:model_last_used:{accountID}:{model} -func modelLastUsedKey(accountID int64, model string) string { - return fmt.Sprintf("%s%d:%s", modelLastUsedKeyPrefix, accountID, model) -} - -// IncrModelCallCount 增加模型调用次数并更新最后调度时间 -// 返回更新后的调用次数 -func (c *gatewayCache) IncrModelCallCount(ctx context.Context, accountID int64, model string) (int64, error) { - loadKey := modelLoadKey(accountID, model) - lastUsedKey := modelLastUsedKey(accountID, model) - - pipe := c.rdb.Pipeline() - incrCmd := pipe.Incr(ctx, loadKey) - pipe.Expire(ctx, loadKey, modelLoadTTL) // 每次调用刷新 TTL - pipe.Set(ctx, lastUsedKey, time.Now().Unix(), modelLastUsedTTL) - if _, err := pipe.Exec(ctx); err != nil { - return 0, err - } - return incrCmd.Val(), nil -} - -// GetModelLoadBatch 批量获取账号的模型负载信息 -func (c *gatewayCache) GetModelLoadBatch(ctx context.Context, accountIDs []int64, model string) (map[int64]*service.ModelLoadInfo, error) { - if len(accountIDs) == 0 { - return make(map[int64]*service.ModelLoadInfo), nil - } - - loadCmds, lastUsedCmds := c.pipelineModelLoadGet(ctx, accountIDs, model) - return c.parseModelLoadResults(accountIDs, loadCmds, lastUsedCmds), nil -} - -// pipelineModelLoadGet 批量获取模型负载的 Pipeline 操作 -func (c *gatewayCache) pipelineModelLoadGet( - ctx context.Context, - accountIDs []int64, - model string, -) (map[int64]*redis.StringCmd, map[int64]*redis.StringCmd) { - pipe := c.rdb.Pipeline() - loadCmds := make(map[int64]*redis.StringCmd, len(accountIDs)) - lastUsedCmds := make(map[int64]*redis.StringCmd, len(accountIDs)) - - for _, id := range accountIDs { - loadCmds[id] = pipe.Get(ctx, modelLoadKey(id, model)) - lastUsedCmds[id] = pipe.Get(ctx, modelLastUsedKey(id, model)) - } - _, _ = pipe.Exec(ctx) // 忽略错误,key 不存在是正常的 - return loadCmds, lastUsedCmds -} - -// parseModelLoadResults 解析 Pipeline 结果 -func (c *gatewayCache) parseModelLoadResults( - accountIDs []int64, - loadCmds map[int64]*redis.StringCmd, - lastUsedCmds map[int64]*redis.StringCmd, -) map[int64]*service.ModelLoadInfo { - result := make(map[int64]*service.ModelLoadInfo, len(accountIDs)) - for _, id := range accountIDs { - result[id] = &service.ModelLoadInfo{ - CallCount: getInt64OrZero(loadCmds[id]), - LastUsedAt: getTimeOrZero(lastUsedCmds[id]), - } - } - return result -} - -// getInt64OrZero 从 StringCmd 获取 int64 值,失败返回 0 -func getInt64OrZero(cmd *redis.StringCmd) int64 { - val, _ := cmd.Int64() - return val -} - -// getTimeOrZero 从 StringCmd 获取 time.Time,失败返回零值 -func getTimeOrZero(cmd *redis.StringCmd) time.Time { - val, err := cmd.Int64() - if err != nil { - return time.Time{} - } - return time.Unix(val, 0) -} - -// ============ Gemini 会话 Fallback 方法 (Trie 实现) ============ - -// FindGeminiSession 查找 Gemini 会话(使用 Trie + Lua 脚本实现 O(L) 查询) -// 返回最长匹配的会话信息,匹配成功时自动刷新 TTL -func (c *gatewayCache) FindGeminiSession(ctx context.Context, groupID int64, prefixHash, digestChain string) (uuid string, accountID int64, found bool) { - if digestChain == "" { - return "", 0, false - } - - trieKey := service.BuildGeminiTrieKey(groupID, prefixHash) - ttlSeconds := int(service.GeminiSessionTTL().Seconds()) - - // 使用 Lua 脚本在 Redis 端执行 Trie 查找,O(L) 次 HGET,1 次网络往返 - // 查找成功时自动刷新 TTL,防止活跃会话意外过期 - result, err := c.rdb.Eval(ctx, geminiTrieFindScript, []string{trieKey}, digestChain, ttlSeconds).Result() - if err != nil || result == nil { - return "", 0, false - } - - value, ok := result.(string) - if !ok || value == "" { - return "", 0, false - } - - uuid, accountID, ok = service.ParseGeminiSessionValue(value) - return uuid, accountID, ok -} - -// SaveGeminiSession 保存 Gemini 会话(使用 Trie + Lua 脚本) -func (c *gatewayCache) SaveGeminiSession(ctx context.Context, groupID int64, prefixHash, digestChain, uuid string, accountID int64) error { - if digestChain == "" { - return nil - } - - trieKey := service.BuildGeminiTrieKey(groupID, prefixHash) - value := service.FormatGeminiSessionValue(uuid, accountID) - ttlSeconds := int(service.GeminiSessionTTL().Seconds()) - - return c.rdb.Eval(ctx, geminiTrieSaveScript, []string{trieKey}, digestChain, value, ttlSeconds).Err() -} - -// ============ Anthropic 会话 Fallback 方法 (复用 Trie 实现) ============ - -// FindAnthropicSession 查找 Anthropic 会话(复用 Gemini Trie Lua 脚本) -func (c *gatewayCache) FindAnthropicSession(ctx context.Context, groupID int64, prefixHash, digestChain string) (uuid string, accountID int64, found bool) { - if digestChain == "" { - return "", 0, false - } - - trieKey := service.BuildAnthropicTrieKey(groupID, prefixHash) - ttlSeconds := int(service.AnthropicSessionTTL().Seconds()) - - result, err := c.rdb.Eval(ctx, geminiTrieFindScript, []string{trieKey}, digestChain, ttlSeconds).Result() - if err != nil || result == nil { - return "", 0, false - } - - value, ok := result.(string) - if !ok || value == "" { - return "", 0, false - } - - uuid, accountID, ok = service.ParseGeminiSessionValue(value) - return uuid, accountID, ok -} - -// SaveAnthropicSession 保存 Anthropic 会话(复用 Gemini Trie Lua 脚本) -func (c *gatewayCache) SaveAnthropicSession(ctx context.Context, groupID int64, prefixHash, digestChain, uuid string, accountID int64) error { - if digestChain == "" { - return nil - } - - trieKey := service.BuildAnthropicTrieKey(groupID, prefixHash) - value := service.FormatGeminiSessionValue(uuid, accountID) - ttlSeconds := int(service.AnthropicSessionTTL().Seconds()) - - return c.rdb.Eval(ctx, geminiTrieSaveScript, []string{trieKey}, digestChain, value, ttlSeconds).Err() -} diff --git a/backend/internal/repository/gateway_cache_integration_test.go b/backend/internal/repository/gateway_cache_integration_test.go index fc8e7372..2fdaa3d1 100644 --- a/backend/internal/repository/gateway_cache_integration_test.go +++ b/backend/internal/repository/gateway_cache_integration_test.go @@ -104,157 +104,6 @@ func (s *GatewayCacheSuite) TestGetSessionAccountID_CorruptedValue() { require.False(s.T(), errors.Is(err, redis.Nil), "expected parsing error, not redis.Nil") } -// ============ Gemini Trie 会话测试 ============ - -func (s *GatewayCacheSuite) TestGeminiSessionTrie_SaveAndFind() { - groupID := int64(1) - prefixHash := "testprefix" - digestChain := "u:hash1-m:hash2-u:hash3" - uuid := "test-uuid-123" - accountID := int64(42) - - // 保存会话 - err := s.cache.SaveGeminiSession(s.ctx, groupID, prefixHash, digestChain, uuid, accountID) - require.NoError(s.T(), err, "SaveGeminiSession") - - // 精确匹配查找 - foundUUID, foundAccountID, found := s.cache.FindGeminiSession(s.ctx, groupID, prefixHash, digestChain) - require.True(s.T(), found, "should find exact match") - require.Equal(s.T(), uuid, foundUUID) - require.Equal(s.T(), accountID, foundAccountID) -} - -func (s *GatewayCacheSuite) TestGeminiSessionTrie_PrefixMatch() { - groupID := int64(1) - prefixHash := "prefixmatch" - shortChain := "u:a-m:b" - longChain := "u:a-m:b-u:c-m:d" - uuid := "uuid-prefix" - accountID := int64(100) - - // 保存短链 - err := s.cache.SaveGeminiSession(s.ctx, groupID, prefixHash, shortChain, uuid, accountID) - require.NoError(s.T(), err) - - // 用长链查找,应该匹配到短链(前缀匹配) - foundUUID, foundAccountID, found := s.cache.FindGeminiSession(s.ctx, groupID, prefixHash, longChain) - require.True(s.T(), found, "should find prefix match") - require.Equal(s.T(), uuid, foundUUID) - require.Equal(s.T(), accountID, foundAccountID) -} - -func (s *GatewayCacheSuite) TestGeminiSessionTrie_LongestPrefixMatch() { - groupID := int64(1) - prefixHash := "longestmatch" - - // 保存多个不同长度的链 - err := s.cache.SaveGeminiSession(s.ctx, groupID, prefixHash, "u:a", "uuid-short", 1) - require.NoError(s.T(), err) - err = s.cache.SaveGeminiSession(s.ctx, groupID, prefixHash, "u:a-m:b", "uuid-medium", 2) - require.NoError(s.T(), err) - err = s.cache.SaveGeminiSession(s.ctx, groupID, prefixHash, "u:a-m:b-u:c", "uuid-long", 3) - require.NoError(s.T(), err) - - // 查找更长的链,应该匹配到最长的前缀 - foundUUID, foundAccountID, found := s.cache.FindGeminiSession(s.ctx, groupID, prefixHash, "u:a-m:b-u:c-m:d-u:e") - require.True(s.T(), found, "should find longest prefix match") - require.Equal(s.T(), "uuid-long", foundUUID) - require.Equal(s.T(), int64(3), foundAccountID) - - // 查找中等长度的链 - foundUUID, foundAccountID, found = s.cache.FindGeminiSession(s.ctx, groupID, prefixHash, "u:a-m:b-u:x") - require.True(s.T(), found) - require.Equal(s.T(), "uuid-medium", foundUUID) - require.Equal(s.T(), int64(2), foundAccountID) -} - -func (s *GatewayCacheSuite) TestGeminiSessionTrie_NoMatch() { - groupID := int64(1) - prefixHash := "nomatch" - digestChain := "u:a-m:b" - - // 保存一个会话 - err := s.cache.SaveGeminiSession(s.ctx, groupID, prefixHash, digestChain, "uuid", 1) - require.NoError(s.T(), err) - - // 用不同的链查找,应该找不到 - _, _, found := s.cache.FindGeminiSession(s.ctx, groupID, prefixHash, "u:x-m:y") - require.False(s.T(), found, "should not find non-matching chain") -} - -func (s *GatewayCacheSuite) TestGeminiSessionTrie_DifferentPrefixHash() { - groupID := int64(1) - digestChain := "u:a-m:b" - - // 保存到 prefixHash1 - err := s.cache.SaveGeminiSession(s.ctx, groupID, "prefix1", digestChain, "uuid1", 1) - require.NoError(s.T(), err) - - // 用 prefixHash2 查找,应该找不到(不同用户/客户端隔离) - _, _, found := s.cache.FindGeminiSession(s.ctx, groupID, "prefix2", digestChain) - require.False(s.T(), found, "different prefixHash should be isolated") -} - -func (s *GatewayCacheSuite) TestGeminiSessionTrie_DifferentGroupID() { - prefixHash := "sameprefix" - digestChain := "u:a-m:b" - - // 保存到 groupID 1 - err := s.cache.SaveGeminiSession(s.ctx, 1, prefixHash, digestChain, "uuid1", 1) - require.NoError(s.T(), err) - - // 用 groupID 2 查找,应该找不到(分组隔离) - _, _, found := s.cache.FindGeminiSession(s.ctx, 2, prefixHash, digestChain) - require.False(s.T(), found, "different groupID should be isolated") -} - -func (s *GatewayCacheSuite) TestGeminiSessionTrie_EmptyDigestChain() { - groupID := int64(1) - prefixHash := "emptytest" - - // 空链不应该保存 - err := s.cache.SaveGeminiSession(s.ctx, groupID, prefixHash, "", "uuid", 1) - require.NoError(s.T(), err, "empty chain should not error") - - // 空链查找应该返回 false - _, _, found := s.cache.FindGeminiSession(s.ctx, groupID, prefixHash, "") - require.False(s.T(), found, "empty chain should not match") -} - -func (s *GatewayCacheSuite) TestGeminiSessionTrie_MultipleSessions() { - groupID := int64(1) - prefixHash := "multisession" - - // 保存多个不同会话(模拟 1000 个并发会话的场景) - sessions := []struct { - chain string - uuid string - accountID int64 - }{ - {"u:session1", "uuid-1", 1}, - {"u:session2-m:reply2", "uuid-2", 2}, - {"u:session3-m:reply3-u:msg3", "uuid-3", 3}, - } - - for _, sess := range sessions { - err := s.cache.SaveGeminiSession(s.ctx, groupID, prefixHash, sess.chain, sess.uuid, sess.accountID) - require.NoError(s.T(), err) - } - - // 验证每个会话都能正确查找 - for _, sess := range sessions { - foundUUID, foundAccountID, found := s.cache.FindGeminiSession(s.ctx, groupID, prefixHash, sess.chain) - require.True(s.T(), found, "should find session: %s", sess.chain) - require.Equal(s.T(), sess.uuid, foundUUID) - require.Equal(s.T(), sess.accountID, foundAccountID) - } - - // 验证继续对话的场景 - foundUUID, foundAccountID, found := s.cache.FindGeminiSession(s.ctx, groupID, prefixHash, "u:session2-m:reply2-u:newmsg") - require.True(s.T(), found) - require.Equal(s.T(), "uuid-2", foundUUID) - require.Equal(s.T(), int64(2), foundAccountID) -} func TestGatewayCacheSuite(t *testing.T) { suite.Run(t, new(GatewayCacheSuite)) diff --git a/backend/internal/repository/gateway_cache_model_load_integration_test.go b/backend/internal/repository/gateway_cache_model_load_integration_test.go deleted file mode 100644 index de6fa5ae..00000000 --- a/backend/internal/repository/gateway_cache_model_load_integration_test.go +++ /dev/null @@ -1,234 +0,0 @@ -//go:build integration - -package repository - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" -) - -// ============ Gateway Cache 模型负载统计集成测试 ============ - -type GatewayCacheModelLoadSuite struct { - suite.Suite -} - -func TestGatewayCacheModelLoadSuite(t *testing.T) { - suite.Run(t, new(GatewayCacheModelLoadSuite)) -} - -func (s *GatewayCacheModelLoadSuite) TestIncrModelCallCount_Basic() { - t := s.T() - rdb := testRedis(t) - cache := &gatewayCache{rdb: rdb} - ctx := context.Background() - - accountID := int64(123) - model := "claude-sonnet-4-20250514" - - // 首次调用应返回 1 - count1, err := cache.IncrModelCallCount(ctx, accountID, model) - require.NoError(t, err) - require.Equal(t, int64(1), count1) - - // 第二次调用应返回 2 - count2, err := cache.IncrModelCallCount(ctx, accountID, model) - require.NoError(t, err) - require.Equal(t, int64(2), count2) - - // 第三次调用应返回 3 - count3, err := cache.IncrModelCallCount(ctx, accountID, model) - require.NoError(t, err) - require.Equal(t, int64(3), count3) -} - -func (s *GatewayCacheModelLoadSuite) TestIncrModelCallCount_DifferentModels() { - t := s.T() - rdb := testRedis(t) - cache := &gatewayCache{rdb: rdb} - ctx := context.Background() - - accountID := int64(456) - model1 := "claude-sonnet-4-20250514" - model2 := "claude-opus-4-5-20251101" - - // 不同模型应该独立计数 - count1, err := cache.IncrModelCallCount(ctx, accountID, model1) - require.NoError(t, err) - require.Equal(t, int64(1), count1) - - count2, err := cache.IncrModelCallCount(ctx, accountID, model2) - require.NoError(t, err) - require.Equal(t, int64(1), count2) - - count1Again, err := cache.IncrModelCallCount(ctx, accountID, model1) - require.NoError(t, err) - require.Equal(t, int64(2), count1Again) -} - -func (s *GatewayCacheModelLoadSuite) TestIncrModelCallCount_DifferentAccounts() { - t := s.T() - rdb := testRedis(t) - cache := &gatewayCache{rdb: rdb} - ctx := context.Background() - - account1 := int64(111) - account2 := int64(222) - model := "gemini-2.5-pro" - - // 不同账号应该独立计数 - count1, err := cache.IncrModelCallCount(ctx, account1, model) - require.NoError(t, err) - require.Equal(t, int64(1), count1) - - count2, err := cache.IncrModelCallCount(ctx, account2, model) - require.NoError(t, err) - require.Equal(t, int64(1), count2) -} - -func (s *GatewayCacheModelLoadSuite) TestGetModelLoadBatch_Empty() { - t := s.T() - rdb := testRedis(t) - cache := &gatewayCache{rdb: rdb} - ctx := context.Background() - - result, err := cache.GetModelLoadBatch(ctx, []int64{}, "any-model") - require.NoError(t, err) - require.NotNil(t, result) - require.Empty(t, result) -} - -func (s *GatewayCacheModelLoadSuite) TestGetModelLoadBatch_NonExistent() { - t := s.T() - rdb := testRedis(t) - cache := &gatewayCache{rdb: rdb} - ctx := context.Background() - - // 查询不存在的账号应返回零值 - result, err := cache.GetModelLoadBatch(ctx, []int64{9999, 9998}, "claude-sonnet-4-20250514") - require.NoError(t, err) - require.Len(t, result, 2) - - require.Equal(t, int64(0), result[9999].CallCount) - require.True(t, result[9999].LastUsedAt.IsZero()) - require.Equal(t, int64(0), result[9998].CallCount) - require.True(t, result[9998].LastUsedAt.IsZero()) -} - -func (s *GatewayCacheModelLoadSuite) TestGetModelLoadBatch_AfterIncrement() { - t := s.T() - rdb := testRedis(t) - cache := &gatewayCache{rdb: rdb} - ctx := context.Background() - - accountID := int64(789) - model := "claude-sonnet-4-20250514" - - // 先增加调用次数 - beforeIncr := time.Now() - _, err := cache.IncrModelCallCount(ctx, accountID, model) - require.NoError(t, err) - _, err = cache.IncrModelCallCount(ctx, accountID, model) - require.NoError(t, err) - _, err = cache.IncrModelCallCount(ctx, accountID, model) - require.NoError(t, err) - afterIncr := time.Now() - - // 获取负载信息 - result, err := cache.GetModelLoadBatch(ctx, []int64{accountID}, model) - require.NoError(t, err) - require.Len(t, result, 1) - - loadInfo := result[accountID] - require.NotNil(t, loadInfo) - require.Equal(t, int64(3), loadInfo.CallCount) - require.False(t, loadInfo.LastUsedAt.IsZero()) - // LastUsedAt 应该在 beforeIncr 和 afterIncr 之间 - require.True(t, loadInfo.LastUsedAt.After(beforeIncr.Add(-time.Second)) || loadInfo.LastUsedAt.Equal(beforeIncr)) - require.True(t, loadInfo.LastUsedAt.Before(afterIncr.Add(time.Second)) || loadInfo.LastUsedAt.Equal(afterIncr)) -} - -func (s *GatewayCacheModelLoadSuite) TestGetModelLoadBatch_MultipleAccounts() { - t := s.T() - rdb := testRedis(t) - cache := &gatewayCache{rdb: rdb} - ctx := context.Background() - - model := "claude-opus-4-5-20251101" - account1 := int64(1001) - account2 := int64(1002) - account3 := int64(1003) // 不调用 - - // account1 调用 2 次 - _, err := cache.IncrModelCallCount(ctx, account1, model) - require.NoError(t, err) - _, err = cache.IncrModelCallCount(ctx, account1, model) - require.NoError(t, err) - - // account2 调用 5 次 - for i := 0; i < 5; i++ { - _, err = cache.IncrModelCallCount(ctx, account2, model) - require.NoError(t, err) - } - - // 批量获取 - result, err := cache.GetModelLoadBatch(ctx, []int64{account1, account2, account3}, model) - require.NoError(t, err) - require.Len(t, result, 3) - - require.Equal(t, int64(2), result[account1].CallCount) - require.False(t, result[account1].LastUsedAt.IsZero()) - - require.Equal(t, int64(5), result[account2].CallCount) - require.False(t, result[account2].LastUsedAt.IsZero()) - - require.Equal(t, int64(0), result[account3].CallCount) - require.True(t, result[account3].LastUsedAt.IsZero()) -} - -func (s *GatewayCacheModelLoadSuite) TestGetModelLoadBatch_ModelIsolation() { - t := s.T() - rdb := testRedis(t) - cache := &gatewayCache{rdb: rdb} - ctx := context.Background() - - accountID := int64(2001) - model1 := "claude-sonnet-4-20250514" - model2 := "gemini-2.5-pro" - - // 对 model1 调用 3 次 - for i := 0; i < 3; i++ { - _, err := cache.IncrModelCallCount(ctx, accountID, model1) - require.NoError(t, err) - } - - // 获取 model1 的负载 - result1, err := cache.GetModelLoadBatch(ctx, []int64{accountID}, model1) - require.NoError(t, err) - require.Equal(t, int64(3), result1[accountID].CallCount) - - // 获取 model2 的负载(应该为 0) - result2, err := cache.GetModelLoadBatch(ctx, []int64{accountID}, model2) - require.NoError(t, err) - require.Equal(t, int64(0), result2[accountID].CallCount) -} - -// ============ 辅助函数测试 ============ - -func (s *GatewayCacheModelLoadSuite) TestModelLoadKey_Format() { - t := s.T() - - key := modelLoadKey(123, "claude-sonnet-4") - require.Equal(t, "ag:model_load:123:claude-sonnet-4", key) -} - -func (s *GatewayCacheModelLoadSuite) TestModelLastUsedKey_Format() { - t := s.T() - - key := modelLastUsedKey(456, "gemini-2.5-pro") - require.Equal(t, "ag:model_last_used:456:gemini-2.5-pro", key) -} diff --git a/backend/internal/server/api_contract_test.go b/backend/internal/server/api_contract_test.go index efef0452..9b571123 100644 --- a/backend/internal/server/api_contract_test.go +++ b/backend/internal/server/api_contract_test.go @@ -1004,10 +1004,6 @@ func (s *stubAccountRepo) SetRateLimited(ctx context.Context, id int64, resetAt return errors.New("not implemented") } -func (s *stubAccountRepo) SetAntigravityQuotaScopeLimit(ctx context.Context, id int64, scope service.AntigravityQuotaScope, resetAt time.Time) error { - return errors.New("not implemented") -} - func (s *stubAccountRepo) SetModelRateLimit(ctx context.Context, id int64, scope string, resetAt time.Time) error { return errors.New("not implemented") } diff --git a/backend/internal/service/account_service.go b/backend/internal/service/account_service.go index 90365d2f..9bf58988 100644 --- a/backend/internal/service/account_service.go +++ b/backend/internal/service/account_service.go @@ -50,7 +50,6 @@ type AccountRepository interface { ListSchedulableByGroupIDAndPlatforms(ctx context.Context, groupID int64, platforms []string) ([]Account, error) SetRateLimited(ctx context.Context, id int64, resetAt time.Time) error - SetAntigravityQuotaScopeLimit(ctx context.Context, id int64, scope AntigravityQuotaScope, resetAt time.Time) error SetModelRateLimit(ctx context.Context, id int64, scope string, resetAt time.Time) error SetOverloaded(ctx context.Context, id int64, until time.Time) error SetTempUnschedulable(ctx context.Context, id int64, until time.Time, reason string) error diff --git a/backend/internal/service/account_service_delete_test.go b/backend/internal/service/account_service_delete_test.go index e5eabfc6..af3a3784 100644 --- a/backend/internal/service/account_service_delete_test.go +++ b/backend/internal/service/account_service_delete_test.go @@ -143,10 +143,6 @@ func (s *accountRepoStub) SetRateLimited(ctx context.Context, id int64, resetAt panic("unexpected SetRateLimited call") } -func (s *accountRepoStub) SetAntigravityQuotaScopeLimit(ctx context.Context, id int64, scope AntigravityQuotaScope, resetAt time.Time) error { - panic("unexpected SetAntigravityQuotaScopeLimit call") -} - func (s *accountRepoStub) SetModelRateLimit(ctx context.Context, id int64, scope string, resetAt time.Time) error { panic("unexpected SetModelRateLimit call") } diff --git a/backend/internal/service/anthropic_session.go b/backend/internal/service/anthropic_session.go index 2d86ed35..26544c68 100644 --- a/backend/internal/service/anthropic_session.go +++ b/backend/internal/service/anthropic_session.go @@ -2,7 +2,6 @@ package service import ( "encoding/json" - "strconv" "strings" "time" ) @@ -12,9 +11,6 @@ const ( // anthropicSessionTTLSeconds Anthropic 会话缓存 TTL(5 分钟) anthropicSessionTTLSeconds = 300 - // anthropicTrieKeyPrefix Anthropic Trie 会话 key 前缀 - anthropicTrieKeyPrefix = "anthropic:trie:" - // anthropicDigestSessionKeyPrefix Anthropic 摘要 fallback 会话 key 前缀 anthropicDigestSessionKeyPrefix = "anthropic:digest:" ) @@ -68,12 +64,6 @@ func rolePrefix(role string) string { } } -// BuildAnthropicTrieKey 构建 Anthropic Trie Redis key -// 格式: anthropic:trie:{groupID}:{prefixHash} -func BuildAnthropicTrieKey(groupID int64, prefixHash string) string { - return anthropicTrieKeyPrefix + strconv.FormatInt(groupID, 10) + ":" + prefixHash -} - // GenerateAnthropicDigestSessionKey 生成 Anthropic 摘要 fallback 的 sessionKey // 组合 prefixHash 前 8 位 + uuid 前 8 位,确保不同会话产生不同的 sessionKey func GenerateAnthropicDigestSessionKey(prefixHash, uuid string) string { diff --git a/backend/internal/service/anthropic_session_test.go b/backend/internal/service/anthropic_session_test.go index e2f873e7..10406643 100644 --- a/backend/internal/service/anthropic_session_test.go +++ b/backend/internal/service/anthropic_session_test.go @@ -236,43 +236,6 @@ func TestBuildAnthropicDigestChain_Deterministic(t *testing.T) { } } -func TestBuildAnthropicTrieKey(t *testing.T) { - tests := []struct { - name string - groupID int64 - prefixHash string - want string - }{ - { - name: "normal", - groupID: 123, - prefixHash: "abcdef12", - want: "anthropic:trie:123:abcdef12", - }, - { - name: "zero group", - groupID: 0, - prefixHash: "xyz", - want: "anthropic:trie:0:xyz", - }, - { - name: "empty prefix", - groupID: 1, - prefixHash: "", - want: "anthropic:trie:1:", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := BuildAnthropicTrieKey(tt.groupID, tt.prefixHash) - if got != tt.want { - t.Errorf("BuildAnthropicTrieKey(%d, %q) = %q, want %q", tt.groupID, tt.prefixHash, got, tt.want) - } - }) - } -} - func TestGenerateAnthropicDigestSessionKey(t *testing.T) { tests := []struct { name string diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 3caf9a93..014b3c86 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -9,6 +9,7 @@ import ( "fmt" "io" "log" + "log/slog" mathrand "math/rand" "net" "net/http" @@ -100,12 +101,11 @@ type antigravityRetryLoopParams struct { accessToken string action string body []byte - quotaScope AntigravityQuotaScope c *gin.Context httpUpstream HTTPUpstream settingService *SettingService accountRepo AccountRepository // 用于智能重试的模型级别限流 - handleError func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult + handleError func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, requestedModel string, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult requestedModel string // 用于限流检查的原始请求模型 isStickySession bool // 是否为粘性会话(用于账号切换时的缓存计费判断) groupID int64 // 用于模型级限流时清除粘性会话 @@ -148,13 +148,17 @@ func (s *AntigravityGatewayService) handleSmartRetry(p antigravityRetryLoopParam // 情况1: retryDelay >= 阈值,限流模型并切换账号 if shouldRateLimitModel { - log.Printf("%s status=%d oauth_long_delay model=%s account=%d (model rate limit, switch account)", - p.prefix, resp.StatusCode, modelName, p.account.ID) + rateLimitDuration := waitDuration + if rateLimitDuration <= 0 { + rateLimitDuration = antigravityDefaultRateLimitDuration + } + log.Printf("%s status=%d oauth_long_delay model=%s account=%d upstream_retry_delay=%v body=%s (model rate limit, switch account)", + p.prefix, resp.StatusCode, modelName, p.account.ID, rateLimitDuration, truncateForLog(respBody, 200)) - resetAt := time.Now().Add(antigravityDefaultRateLimitDuration) + resetAt := time.Now().Add(rateLimitDuration) if !setModelRateLimitByModelName(p.ctx, p.accountRepo, p.account.ID, modelName, p.prefix, resp.StatusCode, resetAt, false) { - p.handleError(p.ctx, p.prefix, p.account, resp.StatusCode, resp.Header, respBody, p.quotaScope, p.groupID, p.sessionHash, p.isStickySession) - log.Printf("%s status=%d rate_limited account=%d (no scope mapping)", p.prefix, resp.StatusCode, p.account.ID) + p.handleError(p.ctx, p.prefix, p.account, resp.StatusCode, resp.Header, respBody, p.requestedModel, p.groupID, p.sessionHash, p.isStickySession) + log.Printf("%s status=%d rate_limited account=%d (no model mapping)", p.prefix, resp.StatusCode, p.account.ID) } else { s.updateAccountModelRateLimitInCache(p.ctx, p.account, modelName, resetAt) } @@ -190,7 +194,7 @@ func (s *AntigravityGatewayService) handleSmartRetry(p antigravityRetryLoopParam retryReq, err := antigravity.NewAPIRequestWithURL(p.ctx, baseURL, p.action, p.accessToken, p.body) if err != nil { log.Printf("%s status=smart_retry_request_build_failed error=%v", p.prefix, err) - p.handleError(p.ctx, p.prefix, p.account, resp.StatusCode, resp.Header, respBody, p.quotaScope, p.groupID, p.sessionHash, p.isStickySession) + p.handleError(p.ctx, p.prefix, p.account, resp.StatusCode, resp.Header, respBody, p.requestedModel, p.groupID, p.sessionHash, p.isStickySession) return &smartRetryResult{ action: smartRetryActionBreakWithResp, resp: &http.Response{ @@ -233,16 +237,24 @@ func (s *AntigravityGatewayService) handleSmartRetry(p antigravityRetryLoopParam } // 所有重试都失败,限流当前模型并切换账号 - log.Printf("%s status=%d smart_retry_exhausted attempts=%d model=%s account=%d (switch account)", - p.prefix, resp.StatusCode, antigravitySmartRetryMaxAttempts, modelName, p.account.ID) + rateLimitDuration := waitDuration + if rateLimitDuration <= 0 { + rateLimitDuration = antigravityDefaultRateLimitDuration + } + retryBody := lastRetryBody + if retryBody == nil { + retryBody = respBody + } + log.Printf("%s status=%d smart_retry_exhausted attempts=%d model=%s account=%d upstream_retry_delay=%v body=%s (switch account)", + p.prefix, resp.StatusCode, antigravitySmartRetryMaxAttempts, modelName, p.account.ID, rateLimitDuration, truncateForLog(retryBody, 200)) - resetAt := time.Now().Add(antigravityDefaultRateLimitDuration) + resetAt := time.Now().Add(rateLimitDuration) if p.accountRepo != nil && modelName != "" { if err := p.accountRepo.SetModelRateLimit(p.ctx, p.account.ID, modelName, resetAt); err != nil { log.Printf("%s status=%d model_rate_limit_failed model=%s error=%v", p.prefix, resp.StatusCode, modelName, err) } else { log.Printf("%s status=%d model_rate_limited_after_smart_retry model=%s account=%d reset_in=%v", - p.prefix, resp.StatusCode, modelName, p.account.ID, antigravityDefaultRateLimitDuration) + p.prefix, resp.StatusCode, modelName, p.account.ID, rateLimitDuration) s.updateAccountModelRateLimitInCache(p.ctx, p.account, modelName, resetAt) } } @@ -353,87 +365,102 @@ urlFallbackLoop: return nil, fmt.Errorf("upstream request failed after retries: %w", err) } - // 429/503 限流处理:区分 URL 级别限流、智能重试和账户配额限流 - if resp.StatusCode == http.StatusTooManyRequests || resp.StatusCode == http.StatusServiceUnavailable { + // 统一处理错误响应 + if resp.StatusCode >= 400 { respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) _ = resp.Body.Close() - // 尝试智能重试处理(OAuth 账号专用) - smartResult := s.handleSmartRetry(p, resp, respBody, baseURL, urlIdx, availableURLs) - switch smartResult.action { - case smartRetryActionContinueURL: - continue urlFallbackLoop - case smartRetryActionBreakWithResp: - if smartResult.err != nil { - return nil, smartResult.err + // ★ 统一入口:自定义错误码 + 临时不可调度 + if handled, policyErr := s.applyErrorPolicy(p, resp.StatusCode, resp.Header, respBody); handled { + if policyErr != nil { + return nil, policyErr } - // 模型限流时返回切换账号信号 - if smartResult.switchError != nil { - return nil, smartResult.switchError + resp = &http.Response{ + StatusCode: resp.StatusCode, + Header: resp.Header.Clone(), + Body: io.NopCloser(bytes.NewReader(respBody)), } - resp = smartResult.resp break urlFallbackLoop } - // smartRetryActionContinue: 继续默认重试逻辑 - // 账户/模型配额限流,重试 3 次(指数退避)- 默认逻辑(非 OAuth 账号或解析失败) - if attempt < antigravityMaxRetries { - upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(respBody)) - upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) - appendOpsUpstreamError(p.c, OpsUpstreamErrorEvent{ - Platform: p.account.Platform, - AccountID: p.account.ID, - AccountName: p.account.Name, - UpstreamStatusCode: resp.StatusCode, - UpstreamRequestID: resp.Header.Get("x-request-id"), - Kind: "retry", - Message: upstreamMsg, - Detail: getUpstreamDetail(respBody), - }) - log.Printf("%s status=%d retry=%d/%d body=%s", p.prefix, resp.StatusCode, attempt, antigravityMaxRetries, truncateForLog(respBody, 200)) - if !sleepAntigravityBackoffWithContext(p.ctx, attempt) { - log.Printf("%s status=context_canceled_during_backoff", p.prefix) - return nil, p.ctx.Err() + // 429/503 限流处理:区分 URL 级别限流、智能重试和账户配额限流 + if resp.StatusCode == http.StatusTooManyRequests || resp.StatusCode == http.StatusServiceUnavailable { + // 尝试智能重试处理(OAuth 账号专用) + smartResult := s.handleSmartRetry(p, resp, respBody, baseURL, urlIdx, availableURLs) + switch smartResult.action { + case smartRetryActionContinueURL: + continue urlFallbackLoop + case smartRetryActionBreakWithResp: + if smartResult.err != nil { + return nil, smartResult.err + } + // 模型限流时返回切换账号信号 + if smartResult.switchError != nil { + return nil, smartResult.switchError + } + resp = smartResult.resp + break urlFallbackLoop } - continue + // smartRetryActionContinue: 继续默认重试逻辑 + + // 账户/模型配额限流,重试 3 次(指数退避)- 默认逻辑(非 OAuth 账号或解析失败) + if attempt < antigravityMaxRetries { + upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + appendOpsUpstreamError(p.c, OpsUpstreamErrorEvent{ + Platform: p.account.Platform, + AccountID: p.account.ID, + AccountName: p.account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "retry", + Message: upstreamMsg, + Detail: getUpstreamDetail(respBody), + }) + log.Printf("%s status=%d retry=%d/%d body=%s", p.prefix, resp.StatusCode, attempt, antigravityMaxRetries, truncateForLog(respBody, 200)) + if !sleepAntigravityBackoffWithContext(p.ctx, attempt) { + log.Printf("%s status=context_canceled_during_backoff", p.prefix) + return nil, p.ctx.Err() + } + continue + } + + // 重试用尽,标记账户限流 + p.handleError(p.ctx, p.prefix, p.account, resp.StatusCode, resp.Header, respBody, p.requestedModel, p.groupID, p.sessionHash, p.isStickySession) + log.Printf("%s status=%d rate_limited base_url=%s body=%s", p.prefix, resp.StatusCode, baseURL, truncateForLog(respBody, 200)) + resp = &http.Response{ + StatusCode: resp.StatusCode, + Header: resp.Header.Clone(), + Body: io.NopCloser(bytes.NewReader(respBody)), + } + break urlFallbackLoop } - // 重试用尽,标记账户限流 - p.handleError(p.ctx, p.prefix, p.account, resp.StatusCode, resp.Header, respBody, p.quotaScope, p.groupID, p.sessionHash, p.isStickySession) - log.Printf("%s status=%d rate_limited base_url=%s body=%s", p.prefix, resp.StatusCode, baseURL, truncateForLog(respBody, 200)) - resp = &http.Response{ - StatusCode: resp.StatusCode, - Header: resp.Header.Clone(), - Body: io.NopCloser(bytes.NewReader(respBody)), - } - break urlFallbackLoop - } - - // 其他可重试错误(不包括 429 和 503,因为上面已处理) - if resp.StatusCode >= 400 && shouldRetryAntigravityError(resp.StatusCode) { - respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) - _ = resp.Body.Close() - - if attempt < antigravityMaxRetries { - upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(respBody)) - upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) - appendOpsUpstreamError(p.c, OpsUpstreamErrorEvent{ - Platform: p.account.Platform, - AccountID: p.account.ID, - AccountName: p.account.Name, - UpstreamStatusCode: resp.StatusCode, - UpstreamRequestID: resp.Header.Get("x-request-id"), - Kind: "retry", - Message: upstreamMsg, - Detail: getUpstreamDetail(respBody), - }) - log.Printf("%s status=%d retry=%d/%d body=%s", p.prefix, resp.StatusCode, attempt, antigravityMaxRetries, truncateForLog(respBody, 500)) - if !sleepAntigravityBackoffWithContext(p.ctx, attempt) { - log.Printf("%s status=context_canceled_during_backoff", p.prefix) - return nil, p.ctx.Err() - } - continue - } + // 其他可重试错误(500/502/504/529,不包括 429 和 503) + if shouldRetryAntigravityError(resp.StatusCode) { + if attempt < antigravityMaxRetries { + upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + appendOpsUpstreamError(p.c, OpsUpstreamErrorEvent{ + Platform: p.account.Platform, + AccountID: p.account.ID, + AccountName: p.account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "retry", + Message: upstreamMsg, + Detail: getUpstreamDetail(respBody), + }) + log.Printf("%s status=%d retry=%d/%d body=%s", p.prefix, resp.StatusCode, attempt, antigravityMaxRetries, truncateForLog(respBody, 500)) + if !sleepAntigravityBackoffWithContext(p.ctx, attempt) { + log.Printf("%s status=context_canceled_during_backoff", p.prefix) + return nil, p.ctx.Err() + } + continue + } + } + + // 其他 4xx 错误或重试用尽,直接返回 resp = &http.Response{ StatusCode: resp.StatusCode, Header: resp.Header.Clone(), @@ -442,6 +469,7 @@ urlFallbackLoop: break urlFallbackLoop } + // 成功响应(< 400) break urlFallbackLoop } } @@ -574,6 +602,31 @@ func (s *AntigravityGatewayService) getUpstreamErrorDetail(body []byte) string { return truncateString(string(body), maxBytes) } +// checkErrorPolicy nil 安全的包装 +func (s *AntigravityGatewayService) checkErrorPolicy(ctx context.Context, account *Account, statusCode int, body []byte) ErrorPolicyResult { + if s.rateLimitService == nil { + return ErrorPolicyNone + } + return s.rateLimitService.CheckErrorPolicy(ctx, account, statusCode, body) +} + +// applyErrorPolicy 应用错误策略结果,返回是否应终止当前循环 +func (s *AntigravityGatewayService) applyErrorPolicy(p antigravityRetryLoopParams, statusCode int, headers http.Header, respBody []byte) (handled bool, retErr error) { + switch s.checkErrorPolicy(p.ctx, p.account, statusCode, respBody) { + case ErrorPolicySkipped: + return true, nil + case ErrorPolicyMatched: + _ = p.handleError(p.ctx, p.prefix, p.account, statusCode, headers, respBody, + p.requestedModel, p.groupID, p.sessionHash, p.isStickySession) + return true, nil + case ErrorPolicyTempUnscheduled: + slog.Info("temp_unschedulable_matched", + "prefix", p.prefix, "status_code", statusCode, "account_id", p.account.ID) + return true, &AntigravityAccountSwitchError{OriginalAccountID: p.account.ID, IsStickySession: p.isStickySession} + } + return false, nil +} + // mapAntigravityModel 获取映射后的模型名 // 完全依赖映射配置:账户映射(通配符)→ 默认映射兜底(DefaultAntigravityModelMapping) // 注意:返回空字符串表示模型不被支持,调度时会过滤掉该账号 @@ -969,6 +1022,11 @@ func isModelNotFoundError(statusCode int, body []byte) bool { // ├─ 成功 → 正常返回 // └─ 失败 → 设置模型限流 + 清除粘性绑定 → 切换账号 func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, account *Account, body []byte, isStickySession bool) (*ForwardResult, error) { + // 上游透传账号直接转发,不走 OAuth token 刷新 + if account.Type == AccountTypeUpstream { + return s.ForwardUpstream(ctx, c, account, body) + } + startTime := time.Now() sessionID := getSessionID(c) @@ -988,11 +1046,9 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, if mappedModel == "" { return nil, s.writeClaudeError(c, http.StatusForbidden, "permission_error", fmt.Sprintf("model %s not in whitelist", claudeReq.Model)) } - loadModel := mappedModel // 应用 thinking 模式自动后缀:如果 thinking 开启且目标是 claude-sonnet-4-5,自动改为 thinking 版本 thinkingEnabled := claudeReq.Thinking != nil && claudeReq.Thinking.Type == "enabled" mappedModel = applyThinkingModelSuffix(mappedModel, thinkingEnabled) - quotaScope, _ := resolveAntigravityQuotaScope(originalModel) // 获取 access_token if s.tokenProvider == nil { @@ -1027,11 +1083,6 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, // 如果客户端请求非流式,在响应处理阶段会收集完整流式响应后转换返回 action := "streamGenerateContent" - // 统计模型调用次数(包括粘性会话,用于负载均衡调度) - if s.cache != nil { - _, _ = s.cache.IncrModelCallCount(ctx, account.ID, loadModel) - } - // 执行带重试的请求 result, err := s.antigravityRetryLoop(antigravityRetryLoopParams{ ctx: ctx, @@ -1041,7 +1092,6 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, accessToken: accessToken, action: action, body: geminiBody, - quotaScope: quotaScope, c: c, httpUpstream: s.httpUpstream, settingService: s.settingService, @@ -1122,7 +1172,6 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, accessToken: accessToken, action: action, body: retryGeminiBody, - quotaScope: quotaScope, c: c, httpUpstream: s.httpUpstream, settingService: s.settingService, @@ -1233,7 +1282,7 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, } } - s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope, 0, "", isStickySession) + s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, originalModel, 0, "", isStickySession) if s.shouldFailoverUpstreamError(resp.StatusCode) { upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(respBody)) @@ -1263,6 +1312,7 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, var usage *ClaudeUsage var firstTokenMs *int + var clientDisconnect bool if claudeReq.Stream { // 客户端要求流式,直接透传转换 streamRes, err := s.handleClaudeStreamingResponse(c, resp, startTime, originalModel) @@ -1272,6 +1322,7 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, } usage = streamRes.usage firstTokenMs = streamRes.firstTokenMs + clientDisconnect = streamRes.clientDisconnect } else { // 客户端要求非流式,收集流式响应后转换返回 streamRes, err := s.handleClaudeStreamToNonStreaming(c, resp, startTime, originalModel) @@ -1284,12 +1335,13 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, } return &ForwardResult{ - RequestID: requestID, - Usage: *usage, - Model: originalModel, // 使用原始模型用于计费和日志 - Stream: claudeReq.Stream, - Duration: time.Since(startTime), - FirstTokenMs: firstTokenMs, + RequestID: requestID, + Usage: *usage, + Model: originalModel, // 使用原始模型用于计费和日志 + Stream: claudeReq.Stream, + Duration: time.Since(startTime), + FirstTokenMs: firstTokenMs, + ClientDisconnect: clientDisconnect, }, nil } @@ -1613,7 +1665,6 @@ func (s *AntigravityGatewayService) ForwardGemini(ctx context.Context, c *gin.Co if len(body) == 0 { return nil, s.writeGoogleError(c, http.StatusBadRequest, "Request body is empty") } - quotaScope, _ := resolveAntigravityQuotaScope(originalModel) // 解析请求以获取 image_size(用于图片计费) imageSize := s.extractImageSize(body) @@ -1683,11 +1734,6 @@ func (s *AntigravityGatewayService) ForwardGemini(ctx context.Context, c *gin.Co // 如果客户端请求非流式,在响应处理阶段会收集完整流式响应后返回 upstreamAction := "streamGenerateContent" - // 统计模型调用次数(包括粘性会话,用于负载均衡调度) - if s.cache != nil { - _, _ = s.cache.IncrModelCallCount(ctx, account.ID, mappedModel) - } - // 执行带重试的请求 result, err := s.antigravityRetryLoop(antigravityRetryLoopParams{ ctx: ctx, @@ -1697,7 +1743,6 @@ func (s *AntigravityGatewayService) ForwardGemini(ctx context.Context, c *gin.Co accessToken: accessToken, action: upstreamAction, body: wrappedBody, - quotaScope: quotaScope, c: c, httpUpstream: s.httpUpstream, settingService: s.settingService, @@ -1771,7 +1816,7 @@ func (s *AntigravityGatewayService) ForwardGemini(ctx context.Context, c *gin.Co if unwrapErr != nil || len(unwrappedForOps) == 0 { unwrappedForOps = respBody } - s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope, 0, "", isStickySession) + s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, originalModel, 0, "", isStickySession) upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(unwrappedForOps)) upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) upstreamDetail := s.getUpstreamErrorDetail(unwrappedForOps) @@ -1818,6 +1863,7 @@ handleSuccess: var usage *ClaudeUsage var firstTokenMs *int + var clientDisconnect bool if stream { // 客户端要求流式,直接透传 @@ -1828,6 +1874,7 @@ handleSuccess: } usage = streamRes.usage firstTokenMs = streamRes.firstTokenMs + clientDisconnect = streamRes.clientDisconnect } else { // 客户端要求非流式,收集流式响应后返回 streamRes, err := s.handleGeminiStreamToNonStreaming(c, resp, startTime) @@ -1851,14 +1898,15 @@ handleSuccess: } return &ForwardResult{ - RequestID: requestID, - Usage: *usage, - Model: originalModel, - Stream: stream, - Duration: time.Since(startTime), - FirstTokenMs: firstTokenMs, - ImageCount: imageCount, - ImageSize: imageSize, + RequestID: requestID, + Usage: *usage, + Model: originalModel, + Stream: stream, + Duration: time.Since(startTime), + FirstTokenMs: firstTokenMs, + ClientDisconnect: clientDisconnect, + ImageCount: imageCount, + ImageSize: imageSize, }, nil } @@ -2067,9 +2115,9 @@ func shouldTriggerAntigravitySmartRetry(account *Account, respBody []byte) (shou } // retryDelay >= 阈值:直接限流模型,不重试 - // 注意:如果上游未提供 retryDelay,parseAntigravitySmartRetryInfo 已设置为默认 5 分钟 + // 注意:如果上游未提供 retryDelay,parseAntigravitySmartRetryInfo 已设置为默认 30s if info.RetryDelay >= antigravityRateLimitThreshold { - return false, true, 0, info.ModelName + return false, true, info.RetryDelay, info.ModelName } // retryDelay < 阈值:智能重试 @@ -2191,10 +2239,10 @@ func (s *AntigravityGatewayService) updateAccountModelRateLimitInCache(ctx conte func (s *AntigravityGatewayService) handleUpstreamError( ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, - quotaScope AntigravityQuotaScope, + requestedModel string, groupID int64, sessionHash string, isStickySession bool, ) *handleModelRateLimitResult { - // ✨ 模型级限流处理(在原有逻辑之前) + // 模型级限流处理(优先) result := s.handleModelRateLimit(&handleModelRateLimitParams{ ctx: ctx, prefix: prefix, @@ -2216,52 +2264,35 @@ func (s *AntigravityGatewayService) handleUpstreamError( return nil } - // ========== 原有逻辑,保持不变 ========== - // 429 使用 Gemini 格式解析(从 body 解析重置时间) + // 429:尝试解析模型级限流,解析失败时兜底为账号级限流 if statusCode == 429 { - // 调试日志遵循统一日志开关与长度限制,避免无条件记录完整上游响应体。 if logBody, maxBytes := s.getLogConfig(); logBody { log.Printf("[Antigravity-Debug] 429 response body: %s", truncateString(string(body), maxBytes)) } - useScopeLimit := quotaScope != "" resetAt := ParseGeminiRateLimitResetTime(body) - if resetAt == nil { - // 解析失败:使用默认限流时间(与临时限流保持一致) - // 可通过配置或环境变量覆盖 - defaultDur := antigravityDefaultRateLimitDuration - if s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.AntigravityFallbackCooldownMinutes > 0 { - defaultDur = time.Duration(s.settingService.cfg.Gateway.AntigravityFallbackCooldownMinutes) * time.Minute - } - // 秒级环境变量优先级最高 - if override, ok := antigravityFallbackCooldownSeconds(); ok { - defaultDur = override - } - ra := time.Now().Add(defaultDur) - if useScopeLimit { - log.Printf("%s status=429 rate_limited scope=%s reset_in=%v (fallback)", prefix, quotaScope, defaultDur) - if err := s.accountRepo.SetAntigravityQuotaScopeLimit(ctx, account.ID, quotaScope, ra); err != nil { - log.Printf("%s status=429 rate_limit_set_failed scope=%s error=%v", prefix, quotaScope, err) - } + defaultDur := s.getDefaultRateLimitDuration() + + // 尝试解析模型 key 并设置模型级限流 + modelKey := resolveAntigravityModelKey(requestedModel) + if modelKey != "" { + ra := s.resolveResetTime(resetAt, defaultDur) + if err := s.accountRepo.SetModelRateLimit(ctx, account.ID, modelKey, ra); err != nil { + log.Printf("%s status=429 model_rate_limit_set_failed model=%s error=%v", prefix, modelKey, err) } else { - log.Printf("%s status=429 rate_limited account=%d reset_in=%v (fallback)", prefix, account.ID, defaultDur) - if err := s.accountRepo.SetRateLimited(ctx, account.ID, ra); err != nil { - log.Printf("%s status=429 rate_limit_set_failed account=%d error=%v", prefix, account.ID, err) - } + log.Printf("%s status=429 model_rate_limited model=%s account=%d reset_at=%v reset_in=%v", + prefix, modelKey, account.ID, ra.Format("15:04:05"), time.Until(ra).Truncate(time.Second)) + s.updateAccountModelRateLimitInCache(ctx, account, modelKey, ra) } return nil } - resetTime := time.Unix(*resetAt, 0) - if useScopeLimit { - log.Printf("%s status=429 rate_limited scope=%s reset_at=%v reset_in=%v", prefix, quotaScope, resetTime.Format("15:04:05"), time.Until(resetTime).Truncate(time.Second)) - if err := s.accountRepo.SetAntigravityQuotaScopeLimit(ctx, account.ID, quotaScope, resetTime); err != nil { - log.Printf("%s status=429 rate_limit_set_failed scope=%s error=%v", prefix, quotaScope, err) - } - } else { - log.Printf("%s status=429 rate_limited account=%d reset_at=%v reset_in=%v", prefix, account.ID, resetTime.Format("15:04:05"), time.Until(resetTime).Truncate(time.Second)) - if err := s.accountRepo.SetRateLimited(ctx, account.ID, resetTime); err != nil { - log.Printf("%s status=429 rate_limit_set_failed account=%d error=%v", prefix, account.ID, err) - } + + // 无法解析模型 key,兜底为账号级限流 + ra := s.resolveResetTime(resetAt, defaultDur) + log.Printf("%s status=429 rate_limited account=%d reset_at=%v reset_in=%v (fallback)", + prefix, account.ID, ra.Format("15:04:05"), time.Until(ra).Truncate(time.Second)) + if err := s.accountRepo.SetRateLimited(ctx, account.ID, ra); err != nil { + log.Printf("%s status=429 rate_limit_set_failed account=%d error=%v", prefix, account.ID, err) } return nil } @@ -2276,9 +2307,90 @@ func (s *AntigravityGatewayService) handleUpstreamError( return nil } +// getDefaultRateLimitDuration 获取默认限流时间 +func (s *AntigravityGatewayService) getDefaultRateLimitDuration() time.Duration { + defaultDur := antigravityDefaultRateLimitDuration + if s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.AntigravityFallbackCooldownMinutes > 0 { + defaultDur = time.Duration(s.settingService.cfg.Gateway.AntigravityFallbackCooldownMinutes) * time.Minute + } + if override, ok := antigravityFallbackCooldownSeconds(); ok { + defaultDur = override + } + return defaultDur +} + +// resolveResetTime 根据解析的重置时间或默认时长计算重置时间点 +func (s *AntigravityGatewayService) resolveResetTime(resetAt *int64, defaultDur time.Duration) time.Time { + if resetAt != nil { + return time.Unix(*resetAt, 0) + } + return time.Now().Add(defaultDur) +} + type antigravityStreamResult struct { - usage *ClaudeUsage - firstTokenMs *int + usage *ClaudeUsage + firstTokenMs *int + clientDisconnect bool // 客户端是否在流式传输过程中断开 +} + +// antigravityClientWriter 封装流式响应的客户端写入,自动检测断开并标记。 +// 断开后所有写入操作变为 no-op,调用方通过 Disconnected() 判断是否继续 drain 上游。 +type antigravityClientWriter struct { + w gin.ResponseWriter + flusher http.Flusher + disconnected bool + prefix string // 日志前缀,标识来源方法 +} + +func newAntigravityClientWriter(w gin.ResponseWriter, flusher http.Flusher, prefix string) *antigravityClientWriter { + return &antigravityClientWriter{w: w, flusher: flusher, prefix: prefix} +} + +// Write 写入数据到客户端,写入失败时标记断开并返回 false +func (cw *antigravityClientWriter) Write(p []byte) bool { + if cw.disconnected { + return false + } + if _, err := cw.w.Write(p); err != nil { + cw.markDisconnected() + return false + } + cw.flusher.Flush() + return true +} + +// Fprintf 格式化写入数据到客户端,写入失败时标记断开并返回 false +func (cw *antigravityClientWriter) Fprintf(format string, args ...any) bool { + if cw.disconnected { + return false + } + if _, err := fmt.Fprintf(cw.w, format, args...); err != nil { + cw.markDisconnected() + return false + } + cw.flusher.Flush() + return true +} + +func (cw *antigravityClientWriter) Disconnected() bool { return cw.disconnected } + +func (cw *antigravityClientWriter) markDisconnected() { + cw.disconnected = true + log.Printf("Client disconnected during streaming (%s), continuing to drain upstream for billing", cw.prefix) +} + +// handleStreamReadError 处理上游读取错误的通用逻辑。 +// 返回 (clientDisconnect, handled):handled=true 表示错误已处理,调用方应返回已收集的 usage。 +func handleStreamReadError(err error, clientDisconnected bool, prefix string) (disconnect bool, handled bool) { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + log.Printf("Context canceled during streaming (%s), returning collected usage", prefix) + return true, true + } + if clientDisconnected { + log.Printf("Upstream read error after client disconnect (%s): %v, returning collected usage", prefix, err) + return true, true + } + return false, false } func (s *AntigravityGatewayService) handleGeminiStreamingResponse(c *gin.Context, resp *http.Response, startTime time.Time) (*antigravityStreamResult, error) { @@ -2354,10 +2466,12 @@ func (s *AntigravityGatewayService) handleGeminiStreamingResponse(c *gin.Context intervalCh = intervalTicker.C } + cw := newAntigravityClientWriter(c.Writer, flusher, "antigravity gemini") + // 仅发送一次错误事件,避免多次写入导致协议混乱 errorEventSent := false sendErrorEvent := func(reason string) { - if errorEventSent { + if errorEventSent || cw.Disconnected() { return } errorEventSent = true @@ -2369,9 +2483,12 @@ func (s *AntigravityGatewayService) handleGeminiStreamingResponse(c *gin.Context select { case ev, ok := <-events: if !ok { - return &antigravityStreamResult{usage: usage, firstTokenMs: firstTokenMs}, nil + return &antigravityStreamResult{usage: usage, firstTokenMs: firstTokenMs, clientDisconnect: cw.Disconnected()}, nil } if ev.err != nil { + if disconnect, handled := handleStreamReadError(ev.err, cw.Disconnected(), "antigravity gemini"); handled { + return &antigravityStreamResult{usage: usage, firstTokenMs: firstTokenMs, clientDisconnect: disconnect}, nil + } if errors.Is(ev.err, bufio.ErrTooLong) { log.Printf("SSE line too long (antigravity): max_size=%d error=%v", maxLineSize, ev.err) sendErrorEvent("response_too_large") @@ -2386,11 +2503,7 @@ func (s *AntigravityGatewayService) handleGeminiStreamingResponse(c *gin.Context if strings.HasPrefix(trimmed, "data:") { payload := strings.TrimSpace(strings.TrimPrefix(trimmed, "data:")) if payload == "" || payload == "[DONE]" { - if _, err := fmt.Fprintf(c.Writer, "%s\n", line); err != nil { - sendErrorEvent("write_failed") - return &antigravityStreamResult{usage: usage, firstTokenMs: firstTokenMs}, err - } - flusher.Flush() + cw.Fprintf("%s\n", line) continue } @@ -2426,27 +2539,22 @@ func (s *AntigravityGatewayService) handleGeminiStreamingResponse(c *gin.Context firstTokenMs = &ms } - if _, err := fmt.Fprintf(c.Writer, "data: %s\n\n", payload); err != nil { - sendErrorEvent("write_failed") - return &antigravityStreamResult{usage: usage, firstTokenMs: firstTokenMs}, err - } - flusher.Flush() + cw.Fprintf("data: %s\n\n", payload) continue } - if _, err := fmt.Fprintf(c.Writer, "%s\n", line); err != nil { - sendErrorEvent("write_failed") - return &antigravityStreamResult{usage: usage, firstTokenMs: firstTokenMs}, err - } - flusher.Flush() + cw.Fprintf("%s\n", line) case <-intervalCh: lastRead := time.Unix(0, atomic.LoadInt64(&lastReadAt)) if time.Since(lastRead) < streamInterval { continue } + if cw.Disconnected() { + log.Printf("Upstream timeout after client disconnect (antigravity gemini), returning collected usage") + return &antigravityStreamResult{usage: usage, firstTokenMs: firstTokenMs, clientDisconnect: true}, nil + } log.Printf("Stream data interval timeout (antigravity)") - // 注意:此函数没有 account 上下文,无法调用 HandleStreamTimeout sendErrorEvent("stream_timeout") return &antigravityStreamResult{usage: usage, firstTokenMs: firstTokenMs}, fmt.Errorf("stream data interval timeout") } @@ -3144,10 +3252,12 @@ func (s *AntigravityGatewayService) handleClaudeStreamingResponse(c *gin.Context intervalCh = intervalTicker.C } + cw := newAntigravityClientWriter(c.Writer, flusher, "antigravity claude") + // 仅发送一次错误事件,避免多次写入导致协议混乱 errorEventSent := false sendErrorEvent := func(reason string) { - if errorEventSent { + if errorEventSent || cw.Disconnected() { return } errorEventSent = true @@ -3155,19 +3265,27 @@ func (s *AntigravityGatewayService) handleClaudeStreamingResponse(c *gin.Context flusher.Flush() } + // finishUsage 是获取 processor 最终 usage 的辅助函数 + finishUsage := func() *ClaudeUsage { + _, agUsage := processor.Finish() + return convertUsage(agUsage) + } + for { select { case ev, ok := <-events: if !ok { - // 发送结束事件 + // 上游完成,发送结束事件 finalEvents, agUsage := processor.Finish() if len(finalEvents) > 0 { - _, _ = c.Writer.Write(finalEvents) - flusher.Flush() + cw.Write(finalEvents) } - return &antigravityStreamResult{usage: convertUsage(agUsage), firstTokenMs: firstTokenMs}, nil + return &antigravityStreamResult{usage: convertUsage(agUsage), firstTokenMs: firstTokenMs, clientDisconnect: cw.Disconnected()}, nil } if ev.err != nil { + if disconnect, handled := handleStreamReadError(ev.err, cw.Disconnected(), "antigravity claude"); handled { + return &antigravityStreamResult{usage: finishUsage(), firstTokenMs: firstTokenMs, clientDisconnect: disconnect}, nil + } if errors.Is(ev.err, bufio.ErrTooLong) { log.Printf("SSE line too long (antigravity): max_size=%d error=%v", maxLineSize, ev.err) sendErrorEvent("response_too_large") @@ -3177,25 +3295,14 @@ func (s *AntigravityGatewayService) handleClaudeStreamingResponse(c *gin.Context return nil, fmt.Errorf("stream read error: %w", ev.err) } - line := ev.line // 处理 SSE 行,转换为 Claude 格式 - claudeEvents := processor.ProcessLine(strings.TrimRight(line, "\r\n")) - + claudeEvents := processor.ProcessLine(strings.TrimRight(ev.line, "\r\n")) if len(claudeEvents) > 0 { if firstTokenMs == nil { ms := int(time.Since(startTime).Milliseconds()) firstTokenMs = &ms } - - if _, writeErr := c.Writer.Write(claudeEvents); writeErr != nil { - finalEvents, agUsage := processor.Finish() - if len(finalEvents) > 0 { - _, _ = c.Writer.Write(finalEvents) - } - sendErrorEvent("write_failed") - return &antigravityStreamResult{usage: convertUsage(agUsage), firstTokenMs: firstTokenMs}, writeErr - } - flusher.Flush() + cw.Write(claudeEvents) } case <-intervalCh: @@ -3203,13 +3310,15 @@ func (s *AntigravityGatewayService) handleClaudeStreamingResponse(c *gin.Context if time.Since(lastRead) < streamInterval { continue } + if cw.Disconnected() { + log.Printf("Upstream timeout after client disconnect (antigravity claude), returning collected usage") + return &antigravityStreamResult{usage: finishUsage(), firstTokenMs: firstTokenMs, clientDisconnect: true}, nil + } log.Printf("Stream data interval timeout (antigravity)") - // 注意:此函数没有 account 上下文,无法调用 HandleStreamTimeout sendErrorEvent("stream_timeout") return &antigravityStreamResult{usage: convertUsage(nil), firstTokenMs: firstTokenMs}, fmt.Errorf("stream data interval timeout") } } - } // extractImageSize 从 Gemini 请求中提取 image_size 参数 @@ -3348,3 +3457,288 @@ func filterEmptyPartsFromGeminiRequest(body []byte) ([]byte, error) { payload["contents"] = filtered return json.Marshal(payload) } + +// ForwardUpstream 使用 base_url + /v1/messages + 双 header 认证透传上游 Claude 请求 +func (s *AntigravityGatewayService) ForwardUpstream(ctx context.Context, c *gin.Context, account *Account, body []byte) (*ForwardResult, error) { + startTime := time.Now() + sessionID := getSessionID(c) + prefix := logPrefix(sessionID, account.Name) + + // 获取上游配置 + baseURL := strings.TrimSpace(account.GetCredential("base_url")) + apiKey := strings.TrimSpace(account.GetCredential("api_key")) + if baseURL == "" || apiKey == "" { + return nil, fmt.Errorf("upstream account missing base_url or api_key") + } + baseURL = strings.TrimSuffix(baseURL, "/") + + // 解析请求获取模型信息 + var claudeReq antigravity.ClaudeRequest + if err := json.Unmarshal(body, &claudeReq); err != nil { + return nil, fmt.Errorf("parse claude request: %w", err) + } + if strings.TrimSpace(claudeReq.Model) == "" { + return nil, fmt.Errorf("missing model") + } + originalModel := claudeReq.Model + billingModel := originalModel + + // 构建上游请求 URL + upstreamURL := baseURL + "/v1/messages" + + // 创建请求 + req, err := http.NewRequestWithContext(ctx, http.MethodPost, upstreamURL, bytes.NewReader(body)) + if err != nil { + return nil, fmt.Errorf("create upstream request: %w", err) + } + + // 设置请求头 + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", "Bearer "+apiKey) + req.Header.Set("x-api-key", apiKey) // Claude API 兼容 + + // 透传 Claude 相关 headers + if v := c.GetHeader("anthropic-version"); v != "" { + req.Header.Set("anthropic-version", v) + } + if v := c.GetHeader("anthropic-beta"); v != "" { + req.Header.Set("anthropic-beta", v) + } + + // 代理 URL + proxyURL := "" + if account.ProxyID != nil && account.Proxy != nil { + proxyURL = account.Proxy.URL() + } + + // 发送请求 + resp, err := s.httpUpstream.Do(req, proxyURL, account.ID, account.Concurrency) + if err != nil { + log.Printf("%s upstream request failed: %v", prefix, err) + return nil, fmt.Errorf("upstream request failed: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + // 处理错误响应 + if resp.StatusCode >= 400 { + respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + + // 429 错误时标记账号限流 + if resp.StatusCode == http.StatusTooManyRequests { + s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, originalModel, 0, "", false) + } + + // 透传上游错误 + c.Header("Content-Type", resp.Header.Get("Content-Type")) + c.Status(resp.StatusCode) + _, _ = c.Writer.Write(respBody) + + return &ForwardResult{ + Model: billingModel, + }, nil + } + + // 处理成功响应(流式/非流式) + var usage *ClaudeUsage + var firstTokenMs *int + var clientDisconnect bool + + if claudeReq.Stream { + // 流式响应:透传 + c.Header("Content-Type", "text/event-stream") + c.Header("Cache-Control", "no-cache") + c.Header("Connection", "keep-alive") + c.Header("X-Accel-Buffering", "no") + c.Status(http.StatusOK) + + streamRes := s.streamUpstreamResponse(c, resp, startTime) + usage = streamRes.usage + firstTokenMs = streamRes.firstTokenMs + clientDisconnect = streamRes.clientDisconnect + } else { + // 非流式响应:直接透传 + respBody, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("read upstream response: %w", err) + } + + // 提取 usage + usage = s.extractClaudeUsage(respBody) + + c.Header("Content-Type", resp.Header.Get("Content-Type")) + c.Status(http.StatusOK) + _, _ = c.Writer.Write(respBody) + } + + // 构建计费结果 + duration := time.Since(startTime) + log.Printf("%s status=success duration_ms=%d", prefix, duration.Milliseconds()) + + return &ForwardResult{ + Model: billingModel, + Stream: claudeReq.Stream, + Duration: duration, + FirstTokenMs: firstTokenMs, + ClientDisconnect: clientDisconnect, + Usage: ClaudeUsage{ + InputTokens: usage.InputTokens, + OutputTokens: usage.OutputTokens, + CacheReadInputTokens: usage.CacheReadInputTokens, + CacheCreationInputTokens: usage.CacheCreationInputTokens, + }, + }, nil +} + +// streamUpstreamResponse 透传上游 SSE 流并提取 Claude usage +func (s *AntigravityGatewayService) streamUpstreamResponse(c *gin.Context, resp *http.Response, startTime time.Time) *antigravityStreamResult { + usage := &ClaudeUsage{} + var firstTokenMs *int + + scanner := bufio.NewScanner(resp.Body) + maxLineSize := defaultMaxLineSize + if s.settingService.cfg != nil && s.settingService.cfg.Gateway.MaxLineSize > 0 { + maxLineSize = s.settingService.cfg.Gateway.MaxLineSize + } + scanner.Buffer(make([]byte, 64*1024), maxLineSize) + + type scanEvent struct { + line string + err error + } + events := make(chan scanEvent, 16) + done := make(chan struct{}) + sendEvent := func(ev scanEvent) bool { + select { + case events <- ev: + return true + case <-done: + return false + } + } + var lastReadAt int64 + atomic.StoreInt64(&lastReadAt, time.Now().UnixNano()) + go func() { + defer close(events) + for scanner.Scan() { + atomic.StoreInt64(&lastReadAt, time.Now().UnixNano()) + if !sendEvent(scanEvent{line: scanner.Text()}) { + return + } + } + if err := scanner.Err(); err != nil { + _ = sendEvent(scanEvent{err: err}) + } + }() + defer close(done) + + streamInterval := time.Duration(0) + if s.settingService.cfg != nil && s.settingService.cfg.Gateway.StreamDataIntervalTimeout > 0 { + streamInterval = time.Duration(s.settingService.cfg.Gateway.StreamDataIntervalTimeout) * time.Second + } + var intervalTicker *time.Ticker + if streamInterval > 0 { + intervalTicker = time.NewTicker(streamInterval) + defer intervalTicker.Stop() + } + var intervalCh <-chan time.Time + if intervalTicker != nil { + intervalCh = intervalTicker.C + } + + flusher, _ := c.Writer.(http.Flusher) + cw := newAntigravityClientWriter(c.Writer, flusher, "antigravity upstream") + + for { + select { + case ev, ok := <-events: + if !ok { + return &antigravityStreamResult{usage: usage, firstTokenMs: firstTokenMs, clientDisconnect: cw.Disconnected()} + } + if ev.err != nil { + if disconnect, handled := handleStreamReadError(ev.err, cw.Disconnected(), "antigravity upstream"); handled { + return &antigravityStreamResult{usage: usage, firstTokenMs: firstTokenMs, clientDisconnect: disconnect} + } + log.Printf("Stream read error (antigravity upstream): %v", ev.err) + return &antigravityStreamResult{usage: usage, firstTokenMs: firstTokenMs} + } + + line := ev.line + + // 记录首 token 时间 + if firstTokenMs == nil && len(line) > 0 { + ms := int(time.Since(startTime).Milliseconds()) + firstTokenMs = &ms + } + + // 尝试从 message_delta 或 message_stop 事件提取 usage + s.extractSSEUsage(line, usage) + + // 透传行 + cw.Fprintf("%s\n", line) + + case <-intervalCh: + lastRead := time.Unix(0, atomic.LoadInt64(&lastReadAt)) + if time.Since(lastRead) < streamInterval { + continue + } + if cw.Disconnected() { + log.Printf("Upstream timeout after client disconnect (antigravity upstream), returning collected usage") + return &antigravityStreamResult{usage: usage, firstTokenMs: firstTokenMs, clientDisconnect: true} + } + log.Printf("Stream data interval timeout (antigravity upstream)") + return &antigravityStreamResult{usage: usage, firstTokenMs: firstTokenMs} + } + } +} + +// extractSSEUsage 从 SSE data 行中提取 Claude usage(用于流式透传场景) +func (s *AntigravityGatewayService) extractSSEUsage(line string, usage *ClaudeUsage) { + if !strings.HasPrefix(line, "data: ") { + return + } + dataStr := strings.TrimPrefix(line, "data: ") + var event map[string]any + if json.Unmarshal([]byte(dataStr), &event) != nil { + return + } + u, ok := event["usage"].(map[string]any) + if !ok { + return + } + if v, ok := u["input_tokens"].(float64); ok && int(v) > 0 { + usage.InputTokens = int(v) + } + if v, ok := u["output_tokens"].(float64); ok && int(v) > 0 { + usage.OutputTokens = int(v) + } + if v, ok := u["cache_read_input_tokens"].(float64); ok && int(v) > 0 { + usage.CacheReadInputTokens = int(v) + } + if v, ok := u["cache_creation_input_tokens"].(float64); ok && int(v) > 0 { + usage.CacheCreationInputTokens = int(v) + } +} + +// extractClaudeUsage 从非流式 Claude 响应提取 usage +func (s *AntigravityGatewayService) extractClaudeUsage(body []byte) *ClaudeUsage { + usage := &ClaudeUsage{} + var resp map[string]any + if json.Unmarshal(body, &resp) != nil { + return usage + } + if u, ok := resp["usage"].(map[string]any); ok { + if v, ok := u["input_tokens"].(float64); ok { + usage.InputTokens = int(v) + } + if v, ok := u["output_tokens"].(float64); ok { + usage.OutputTokens = int(v) + } + if v, ok := u["cache_read_input_tokens"].(float64); ok { + usage.CacheReadInputTokens = int(v) + } + if v, ok := u["cache_creation_input_tokens"].(float64); ok { + usage.CacheCreationInputTokens = int(v) + } + } + return usage +} diff --git a/backend/internal/service/antigravity_gateway_service_test.go b/backend/internal/service/antigravity_gateway_service_test.go index 91cefc28..a6a349c1 100644 --- a/backend/internal/service/antigravity_gateway_service_test.go +++ b/backend/internal/service/antigravity_gateway_service_test.go @@ -4,17 +4,42 @@ import ( "bytes" "context" "encoding/json" + "errors" + "fmt" "io" "net/http" "net/http/httptest" "testing" "time" + "github.com/Wei-Shaw/sub2api/internal/config" "github.com/Wei-Shaw/sub2api/internal/pkg/antigravity" "github.com/gin-gonic/gin" "github.com/stretchr/testify/require" ) +// antigravityFailingWriter 模拟客户端断开连接的 gin.ResponseWriter +type antigravityFailingWriter struct { + gin.ResponseWriter + failAfter int // 允许成功写入的次数,之后所有写入返回错误 + writes int +} + +func (w *antigravityFailingWriter) Write(p []byte) (int, error) { + if w.writes >= w.failAfter { + return 0, errors.New("write failed: client disconnected") + } + w.writes++ + return w.ResponseWriter.Write(p) +} + +// newAntigravityTestService 创建用于流式测试的 AntigravityGatewayService +func newAntigravityTestService(cfg *config.Config) *AntigravityGatewayService { + return &AntigravityGatewayService{ + settingService: &SettingService{cfg: cfg}, + } +} + func TestStripSignatureSensitiveBlocksFromClaudeRequest(t *testing.T) { req := &antigravity.ClaudeRequest{ Model: "claude-sonnet-4-5", @@ -337,8 +362,8 @@ func TestAntigravityGatewayService_Forward_StickySessionForceCacheBilling(t *tes require.True(t, failoverErr.ForceCacheBilling, "ForceCacheBilling should be true for sticky session switch") } -// TestAntigravityGatewayService_ForwardGemini_StickySessionForceCacheBilling -// 验证:ForwardGemini 粘性会话切换时,UpstreamFailoverError.ForceCacheBilling 应为 true +// TestAntigravityGatewayService_ForwardGemini_StickySessionForceCacheBilling verifies +// that ForwardGemini sets ForceCacheBilling=true for sticky session switch. func TestAntigravityGatewayService_ForwardGemini_StickySessionForceCacheBilling(t *testing.T) { gin.SetMode(gin.TestMode) writer := httptest.NewRecorder() @@ -391,3 +416,438 @@ func TestAntigravityGatewayService_ForwardGemini_StickySessionForceCacheBilling( require.Equal(t, http.StatusServiceUnavailable, failoverErr.StatusCode) require.True(t, failoverErr.ForceCacheBilling, "ForceCacheBilling should be true for sticky session switch") } + +// --- 流式 happy path 测试 --- + +// TestStreamUpstreamResponse_NormalComplete +// 验证:正常流式转发完成时,数据正确透传、usage 正确收集、clientDisconnect=false +func TestStreamUpstreamResponse_NormalComplete(t *testing.T) { + gin.SetMode(gin.TestMode) + svc := newAntigravityTestService(&config.Config{ + Gateway: config.GatewayConfig{MaxLineSize: defaultMaxLineSize}, + }) + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/", nil) + + pr, pw := io.Pipe() + resp := &http.Response{StatusCode: http.StatusOK, Body: pr, Header: http.Header{}} + + go func() { + defer func() { _ = pw.Close() }() + fmt.Fprintln(pw, `event: message_start`) + fmt.Fprintln(pw, `data: {"type":"message_start","message":{"usage":{"input_tokens":10}}}`) + fmt.Fprintln(pw, "") + fmt.Fprintln(pw, `event: content_block_delta`) + fmt.Fprintln(pw, `data: {"type":"content_block_delta","delta":{"text":"hello"}}`) + fmt.Fprintln(pw, "") + fmt.Fprintln(pw, `event: message_delta`) + fmt.Fprintln(pw, `data: {"type":"message_delta","usage":{"output_tokens":5}}`) + fmt.Fprintln(pw, "") + }() + + result := svc.streamUpstreamResponse(c, resp, time.Now()) + _ = pr.Close() + + require.NotNil(t, result) + require.False(t, result.clientDisconnect, "normal completion should not set clientDisconnect") + require.NotNil(t, result.usage) + require.Equal(t, 5, result.usage.OutputTokens, "should collect output_tokens from message_delta") + require.NotNil(t, result.firstTokenMs, "should record first token time") + + // 验证数据被透传到客户端 + body := rec.Body.String() + require.Contains(t, body, "event: message_start") + require.Contains(t, body, "content_block_delta") + require.Contains(t, body, "message_delta") +} + +// TestHandleGeminiStreamingResponse_NormalComplete +// 验证:正常 Gemini 流式转发,数据正确透传、usage 正确收集 +func TestHandleGeminiStreamingResponse_NormalComplete(t *testing.T) { + gin.SetMode(gin.TestMode) + svc := newAntigravityTestService(&config.Config{ + Gateway: config.GatewayConfig{MaxLineSize: defaultMaxLineSize}, + }) + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/", nil) + + pr, pw := io.Pipe() + resp := &http.Response{StatusCode: http.StatusOK, Body: pr, Header: http.Header{}} + + go func() { + defer func() { _ = pw.Close() }() + // 第一个 chunk(部分内容) + fmt.Fprintln(pw, `data: {"candidates":[{"content":{"parts":[{"text":"Hello"}]}}],"usageMetadata":{"promptTokenCount":10,"candidatesTokenCount":3}}`) + fmt.Fprintln(pw, "") + // 第二个 chunk(最终内容+完整 usage) + fmt.Fprintln(pw, `data: {"candidates":[{"content":{"parts":[{"text":" world"}]},"finishReason":"STOP"}],"usageMetadata":{"promptTokenCount":10,"candidatesTokenCount":8,"cachedContentTokenCount":2}}`) + fmt.Fprintln(pw, "") + }() + + result, err := svc.handleGeminiStreamingResponse(c, resp, time.Now()) + _ = pr.Close() + + require.NoError(t, err) + require.NotNil(t, result) + require.False(t, result.clientDisconnect, "normal completion should not set clientDisconnect") + require.NotNil(t, result.usage) + // Gemini usage: promptTokenCount=10, candidatesTokenCount=8, cachedContentTokenCount=2 + // → InputTokens=10-2=8, OutputTokens=8, CacheReadInputTokens=2 + require.Equal(t, 8, result.usage.InputTokens) + require.Equal(t, 8, result.usage.OutputTokens) + require.Equal(t, 2, result.usage.CacheReadInputTokens) + require.NotNil(t, result.firstTokenMs, "should record first token time") + + // 验证数据被透传到客户端 + body := rec.Body.String() + require.Contains(t, body, "Hello") + require.Contains(t, body, "world") + // 不应包含错误事件 + require.NotContains(t, body, "event: error") +} + +// TestHandleClaudeStreamingResponse_NormalComplete +// 验证:正常 Claude 流式转发(Gemini→Claude 转换),数据正确转换并输出 +func TestHandleClaudeStreamingResponse_NormalComplete(t *testing.T) { + gin.SetMode(gin.TestMode) + svc := newAntigravityTestService(&config.Config{ + Gateway: config.GatewayConfig{MaxLineSize: defaultMaxLineSize}, + }) + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/", nil) + + pr, pw := io.Pipe() + resp := &http.Response{StatusCode: http.StatusOK, Body: pr, Header: http.Header{}} + + go func() { + defer func() { _ = pw.Close() }() + // v1internal 包装格式:Gemini 数据嵌套在 "response" 字段下 + // ProcessLine 先尝试反序列化为 V1InternalResponse,裸格式会导致 Response.UsageMetadata 为空 + fmt.Fprintln(pw, `data: {"response":{"candidates":[{"content":{"parts":[{"text":"Hi there"}]},"finishReason":"STOP"}],"usageMetadata":{"promptTokenCount":5,"candidatesTokenCount":3}}}`) + fmt.Fprintln(pw, "") + }() + + result, err := svc.handleClaudeStreamingResponse(c, resp, time.Now(), "claude-sonnet-4-5") + _ = pr.Close() + + require.NoError(t, err) + require.NotNil(t, result) + require.False(t, result.clientDisconnect, "normal completion should not set clientDisconnect") + require.NotNil(t, result.usage) + // Gemini→Claude 转换的 usage:promptTokenCount=5→InputTokens=5, candidatesTokenCount=3→OutputTokens=3 + require.Equal(t, 5, result.usage.InputTokens) + require.Equal(t, 3, result.usage.OutputTokens) + require.NotNil(t, result.firstTokenMs, "should record first token time") + + // 验证输出是 Claude SSE 格式(processor 会转换) + body := rec.Body.String() + require.Contains(t, body, "event: message_start", "should contain Claude message_start event") + require.Contains(t, body, "event: message_stop", "should contain Claude message_stop event") + // 不应包含错误事件 + require.NotContains(t, body, "event: error") +} + +// --- 流式客户端断开检测测试 --- + +// TestStreamUpstreamResponse_ClientDisconnectDrainsUsage +// 验证:客户端写入失败后,streamUpstreamResponse 继续读取上游以收集 usage +func TestStreamUpstreamResponse_ClientDisconnectDrainsUsage(t *testing.T) { + gin.SetMode(gin.TestMode) + svc := newAntigravityTestService(&config.Config{ + Gateway: config.GatewayConfig{MaxLineSize: defaultMaxLineSize}, + }) + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/", nil) + c.Writer = &antigravityFailingWriter{ResponseWriter: c.Writer, failAfter: 0} + + pr, pw := io.Pipe() + resp := &http.Response{StatusCode: http.StatusOK, Body: pr, Header: http.Header{}} + + go func() { + defer func() { _ = pw.Close() }() + fmt.Fprintln(pw, `event: message_start`) + fmt.Fprintln(pw, `data: {"type":"message_start","message":{"usage":{"input_tokens":10}}}`) + fmt.Fprintln(pw, "") + fmt.Fprintln(pw, `event: message_delta`) + fmt.Fprintln(pw, `data: {"type":"message_delta","usage":{"output_tokens":20}}`) + fmt.Fprintln(pw, "") + }() + + result := svc.streamUpstreamResponse(c, resp, time.Now()) + _ = pr.Close() + + require.NotNil(t, result) + require.True(t, result.clientDisconnect) + require.NotNil(t, result.usage) + require.Equal(t, 20, result.usage.OutputTokens) +} + +// TestStreamUpstreamResponse_ContextCanceled +// 验证:context 取消时返回 usage 且标记 clientDisconnect +func TestStreamUpstreamResponse_ContextCanceled(t *testing.T) { + gin.SetMode(gin.TestMode) + svc := newAntigravityTestService(&config.Config{ + Gateway: config.GatewayConfig{MaxLineSize: defaultMaxLineSize}, + }) + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + ctx, cancel := context.WithCancel(context.Background()) + cancel() + c.Request = httptest.NewRequest(http.MethodPost, "/", nil).WithContext(ctx) + + resp := &http.Response{StatusCode: http.StatusOK, Body: cancelReadCloser{}, Header: http.Header{}} + + result := svc.streamUpstreamResponse(c, resp, time.Now()) + + require.NotNil(t, result) + require.True(t, result.clientDisconnect) + require.NotContains(t, rec.Body.String(), "event: error") +} + +// TestStreamUpstreamResponse_Timeout +// 验证:上游超时时返回已收集的 usage +func TestStreamUpstreamResponse_Timeout(t *testing.T) { + gin.SetMode(gin.TestMode) + svc := newAntigravityTestService(&config.Config{ + Gateway: config.GatewayConfig{StreamDataIntervalTimeout: 1, MaxLineSize: defaultMaxLineSize}, + }) + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/", nil) + + pr, pw := io.Pipe() + resp := &http.Response{StatusCode: http.StatusOK, Body: pr, Header: http.Header{}} + + result := svc.streamUpstreamResponse(c, resp, time.Now()) + _ = pw.Close() + _ = pr.Close() + + require.NotNil(t, result) + require.False(t, result.clientDisconnect) +} + +// TestStreamUpstreamResponse_TimeoutAfterClientDisconnect +// 验证:客户端断开后上游超时,返回 usage 并标记 clientDisconnect +func TestStreamUpstreamResponse_TimeoutAfterClientDisconnect(t *testing.T) { + gin.SetMode(gin.TestMode) + svc := newAntigravityTestService(&config.Config{ + Gateway: config.GatewayConfig{StreamDataIntervalTimeout: 1, MaxLineSize: defaultMaxLineSize}, + }) + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/", nil) + c.Writer = &antigravityFailingWriter{ResponseWriter: c.Writer, failAfter: 0} + + pr, pw := io.Pipe() + resp := &http.Response{StatusCode: http.StatusOK, Body: pr, Header: http.Header{}} + + go func() { + fmt.Fprintln(pw, `data: {"type":"message_start","message":{"usage":{"input_tokens":5}}}`) + fmt.Fprintln(pw, "") + // 不关闭 pw → 等待超时 + }() + + result := svc.streamUpstreamResponse(c, resp, time.Now()) + _ = pw.Close() + _ = pr.Close() + + require.NotNil(t, result) + require.True(t, result.clientDisconnect) +} + +// TestHandleGeminiStreamingResponse_ClientDisconnect +// 验证:Gemini 流式转发中客户端断开后继续 drain 上游 +func TestHandleGeminiStreamingResponse_ClientDisconnect(t *testing.T) { + gin.SetMode(gin.TestMode) + svc := newAntigravityTestService(&config.Config{ + Gateway: config.GatewayConfig{MaxLineSize: defaultMaxLineSize}, + }) + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/", nil) + c.Writer = &antigravityFailingWriter{ResponseWriter: c.Writer, failAfter: 0} + + pr, pw := io.Pipe() + resp := &http.Response{StatusCode: http.StatusOK, Body: pr, Header: http.Header{}} + + go func() { + defer func() { _ = pw.Close() }() + fmt.Fprintln(pw, `data: {"candidates":[{"content":{"parts":[{"text":"hi"}]}}],"usageMetadata":{"promptTokenCount":5,"candidatesTokenCount":10}}`) + fmt.Fprintln(pw, "") + }() + + result, err := svc.handleGeminiStreamingResponse(c, resp, time.Now()) + _ = pr.Close() + + require.NoError(t, err) + require.NotNil(t, result) + require.True(t, result.clientDisconnect) + require.NotContains(t, rec.Body.String(), "write_failed") +} + +// TestHandleGeminiStreamingResponse_ContextCanceled +// 验证:context 取消时不注入错误事件 +func TestHandleGeminiStreamingResponse_ContextCanceled(t *testing.T) { + gin.SetMode(gin.TestMode) + svc := newAntigravityTestService(&config.Config{ + Gateway: config.GatewayConfig{MaxLineSize: defaultMaxLineSize}, + }) + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + ctx, cancel := context.WithCancel(context.Background()) + cancel() + c.Request = httptest.NewRequest(http.MethodPost, "/", nil).WithContext(ctx) + + resp := &http.Response{StatusCode: http.StatusOK, Body: cancelReadCloser{}, Header: http.Header{}} + + result, err := svc.handleGeminiStreamingResponse(c, resp, time.Now()) + + require.NoError(t, err) + require.NotNil(t, result) + require.True(t, result.clientDisconnect) + require.NotContains(t, rec.Body.String(), "event: error") +} + +// TestHandleClaudeStreamingResponse_ClientDisconnect +// 验证:Claude 流式转发中客户端断开后继续 drain 上游 +func TestHandleClaudeStreamingResponse_ClientDisconnect(t *testing.T) { + gin.SetMode(gin.TestMode) + svc := newAntigravityTestService(&config.Config{ + Gateway: config.GatewayConfig{MaxLineSize: defaultMaxLineSize}, + }) + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/", nil) + c.Writer = &antigravityFailingWriter{ResponseWriter: c.Writer, failAfter: 0} + + pr, pw := io.Pipe() + resp := &http.Response{StatusCode: http.StatusOK, Body: pr, Header: http.Header{}} + + go func() { + defer func() { _ = pw.Close() }() + // v1internal 包装格式 + fmt.Fprintln(pw, `data: {"response":{"candidates":[{"content":{"parts":[{"text":"hello"}]},"finishReason":"STOP"}],"usageMetadata":{"promptTokenCount":8,"candidatesTokenCount":15}}}`) + fmt.Fprintln(pw, "") + }() + + result, err := svc.handleClaudeStreamingResponse(c, resp, time.Now(), "claude-sonnet-4-5") + _ = pr.Close() + + require.NoError(t, err) + require.NotNil(t, result) + require.True(t, result.clientDisconnect) +} + +// TestHandleClaudeStreamingResponse_ContextCanceled +// 验证:context 取消时不注入错误事件 +func TestHandleClaudeStreamingResponse_ContextCanceled(t *testing.T) { + gin.SetMode(gin.TestMode) + svc := newAntigravityTestService(&config.Config{ + Gateway: config.GatewayConfig{MaxLineSize: defaultMaxLineSize}, + }) + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + ctx, cancel := context.WithCancel(context.Background()) + cancel() + c.Request = httptest.NewRequest(http.MethodPost, "/", nil).WithContext(ctx) + + resp := &http.Response{StatusCode: http.StatusOK, Body: cancelReadCloser{}, Header: http.Header{}} + + result, err := svc.handleClaudeStreamingResponse(c, resp, time.Now(), "claude-sonnet-4-5") + + require.NoError(t, err) + require.NotNil(t, result) + require.True(t, result.clientDisconnect) + require.NotContains(t, rec.Body.String(), "event: error") +} + +// TestExtractSSEUsage 验证 extractSSEUsage 从 SSE data 行正确提取 usage +func TestExtractSSEUsage(t *testing.T) { + svc := &AntigravityGatewayService{} + tests := []struct { + name string + line string + expected ClaudeUsage + }{ + { + name: "message_delta with output_tokens", + line: `data: {"type":"message_delta","usage":{"output_tokens":42}}`, + expected: ClaudeUsage{OutputTokens: 42}, + }, + { + name: "non-data line ignored", + line: `event: message_start`, + expected: ClaudeUsage{}, + }, + { + name: "top-level usage with all fields", + line: `data: {"usage":{"input_tokens":10,"output_tokens":20,"cache_read_input_tokens":5,"cache_creation_input_tokens":3}}`, + expected: ClaudeUsage{InputTokens: 10, OutputTokens: 20, CacheReadInputTokens: 5, CacheCreationInputTokens: 3}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + usage := &ClaudeUsage{} + svc.extractSSEUsage(tt.line, usage) + require.Equal(t, tt.expected, *usage) + }) + } +} + +// TestAntigravityClientWriter 验证 antigravityClientWriter 的断开检测 +func TestAntigravityClientWriter(t *testing.T) { + t.Run("normal write succeeds", func(t *testing.T) { + gin.SetMode(gin.TestMode) + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + flusher, _ := c.Writer.(http.Flusher) + cw := newAntigravityClientWriter(c.Writer, flusher, "test") + + ok := cw.Write([]byte("hello")) + require.True(t, ok) + require.False(t, cw.Disconnected()) + require.Contains(t, rec.Body.String(), "hello") + }) + + t.Run("write failure marks disconnected", func(t *testing.T) { + gin.SetMode(gin.TestMode) + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + fw := &antigravityFailingWriter{ResponseWriter: c.Writer, failAfter: 0} + flusher, _ := c.Writer.(http.Flusher) + cw := newAntigravityClientWriter(fw, flusher, "test") + + ok := cw.Write([]byte("hello")) + require.False(t, ok) + require.True(t, cw.Disconnected()) + }) + + t.Run("subsequent writes are no-op", func(t *testing.T) { + gin.SetMode(gin.TestMode) + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + fw := &antigravityFailingWriter{ResponseWriter: c.Writer, failAfter: 0} + flusher, _ := c.Writer.(http.Flusher) + cw := newAntigravityClientWriter(fw, flusher, "test") + + cw.Write([]byte("first")) + ok := cw.Fprintf("second %d", 2) + require.False(t, ok) + require.True(t, cw.Disconnected()) + }) +} diff --git a/backend/internal/service/antigravity_quota_scope.go b/backend/internal/service/antigravity_quota_scope.go index 43ac6c2f..e181e7f8 100644 --- a/backend/internal/service/antigravity_quota_scope.go +++ b/backend/internal/service/antigravity_quota_scope.go @@ -2,63 +2,23 @@ package service import ( "context" - "slices" "strings" "time" ) -const antigravityQuotaScopesKey = "antigravity_quota_scopes" - -// AntigravityQuotaScope 表示 Antigravity 的配额域 -type AntigravityQuotaScope string - -const ( - AntigravityQuotaScopeClaude AntigravityQuotaScope = "claude" - AntigravityQuotaScopeGeminiText AntigravityQuotaScope = "gemini_text" - AntigravityQuotaScopeGeminiImage AntigravityQuotaScope = "gemini_image" -) - -// IsScopeSupported 检查给定的 scope 是否在分组支持的 scope 列表中 -func IsScopeSupported(supportedScopes []string, scope AntigravityQuotaScope) bool { - if len(supportedScopes) == 0 { - // 未配置时默认全部支持 - return true - } - supported := slices.Contains(supportedScopes, string(scope)) - return supported -} - -// ResolveAntigravityQuotaScope 根据模型名称解析配额域(导出版本) -func ResolveAntigravityQuotaScope(requestedModel string) (AntigravityQuotaScope, bool) { - return resolveAntigravityQuotaScope(requestedModel) -} - -// resolveAntigravityQuotaScope 根据模型名称解析配额域 -func resolveAntigravityQuotaScope(requestedModel string) (AntigravityQuotaScope, bool) { - model := normalizeAntigravityModelName(requestedModel) - if model == "" { - return "", false - } - switch { - case strings.HasPrefix(model, "claude-"): - return AntigravityQuotaScopeClaude, true - case strings.HasPrefix(model, "gemini-"): - if isImageGenerationModel(model) { - return AntigravityQuotaScopeGeminiImage, true - } - return AntigravityQuotaScopeGeminiText, true - default: - return "", false - } -} - func normalizeAntigravityModelName(model string) string { normalized := strings.ToLower(strings.TrimSpace(model)) normalized = strings.TrimPrefix(normalized, "models/") return normalized } -// IsSchedulableForModel 结合 Antigravity 配额域限流判断是否可调度。 +// resolveAntigravityModelKey 根据请求的模型名解析限流 key +// 返回空字符串表示无法解析 +func resolveAntigravityModelKey(requestedModel string) string { + return normalizeAntigravityModelName(requestedModel) +} + +// IsSchedulableForModel 结合模型级限流判断是否可调度。 // 保持旧签名以兼容既有调用方;默认使用 context.Background()。 func (a *Account) IsSchedulableForModel(requestedModel string) bool { return a.IsSchedulableForModelWithContext(context.Background(), requestedModel) @@ -74,107 +34,20 @@ func (a *Account) IsSchedulableForModelWithContext(ctx context.Context, requeste if a.isModelRateLimitedWithContext(ctx, requestedModel) { return false } - if a.Platform != PlatformAntigravity { - return true - } - scope, ok := resolveAntigravityQuotaScope(requestedModel) - if !ok { - return true - } - resetAt := a.antigravityQuotaScopeResetAt(scope) - if resetAt == nil { - return true - } - now := time.Now() - return !now.Before(*resetAt) + return true } -func (a *Account) antigravityQuotaScopeResetAt(scope AntigravityQuotaScope) *time.Time { - if a == nil || a.Extra == nil || scope == "" { - return nil - } - rawScopes, ok := a.Extra[antigravityQuotaScopesKey].(map[string]any) - if !ok { - return nil - } - rawScope, ok := rawScopes[string(scope)].(map[string]any) - if !ok { - return nil - } - resetAtRaw, ok := rawScope["rate_limit_reset_at"].(string) - if !ok || strings.TrimSpace(resetAtRaw) == "" { - return nil - } - resetAt, err := time.Parse(time.RFC3339, resetAtRaw) - if err != nil { - return nil - } - return &resetAt -} - -var antigravityAllScopes = []AntigravityQuotaScope{ - AntigravityQuotaScopeClaude, - AntigravityQuotaScopeGeminiText, - AntigravityQuotaScopeGeminiImage, -} - -func (a *Account) GetAntigravityScopeRateLimits() map[string]int64 { - if a == nil || a.Platform != PlatformAntigravity { - return nil - } - now := time.Now() - result := make(map[string]int64) - for _, scope := range antigravityAllScopes { - resetAt := a.antigravityQuotaScopeResetAt(scope) - if resetAt != nil && now.Before(*resetAt) { - remainingSec := int64(time.Until(*resetAt).Seconds()) - if remainingSec > 0 { - result[string(scope)] = remainingSec - } - } - } - if len(result) == 0 { - return nil - } - return result -} - -// GetQuotaScopeRateLimitRemainingTime 获取模型域限流剩余时间 -// 返回 0 表示未限流或已过期 -func (a *Account) GetQuotaScopeRateLimitRemainingTime(requestedModel string) time.Duration { - if a == nil || a.Platform != PlatformAntigravity { - return 0 - } - scope, ok := resolveAntigravityQuotaScope(requestedModel) - if !ok { - return 0 - } - resetAt := a.antigravityQuotaScopeResetAt(scope) - if resetAt == nil { - return 0 - } - if remaining := time.Until(*resetAt); remaining > 0 { - return remaining - } - return 0 -} - -// GetRateLimitRemainingTime 获取限流剩余时间(模型限流和模型域限流取最大值) +// GetRateLimitRemainingTime 获取限流剩余时间(模型级限流) // 返回 0 表示未限流或已过期 func (a *Account) GetRateLimitRemainingTime(requestedModel string) time.Duration { return a.GetRateLimitRemainingTimeWithContext(context.Background(), requestedModel) } -// GetRateLimitRemainingTimeWithContext 获取限流剩余时间(模型限流和模型域限流取最大值) +// GetRateLimitRemainingTimeWithContext 获取限流剩余时间(模型级限流) // 返回 0 表示未限流或已过期 func (a *Account) GetRateLimitRemainingTimeWithContext(ctx context.Context, requestedModel string) time.Duration { if a == nil { return 0 } - modelRemaining := a.GetModelRateLimitRemainingTimeWithContext(ctx, requestedModel) - scopeRemaining := a.GetQuotaScopeRateLimitRemainingTime(requestedModel) - if modelRemaining > scopeRemaining { - return modelRemaining - } - return scopeRemaining + return a.GetModelRateLimitRemainingTimeWithContext(ctx, requestedModel) } diff --git a/backend/internal/service/antigravity_rate_limit_test.go b/backend/internal/service/antigravity_rate_limit_test.go index cd2a7a4a..59cc9331 100644 --- a/backend/internal/service/antigravity_rate_limit_test.go +++ b/backend/internal/service/antigravity_rate_limit_test.go @@ -59,12 +59,6 @@ func (s *stubAntigravityUpstream) DoWithTLS(req *http.Request, proxyURL string, return s.Do(req, proxyURL, accountID, accountConcurrency) } -type scopeLimitCall struct { - accountID int64 - scope AntigravityQuotaScope - resetAt time.Time -} - type rateLimitCall struct { accountID int64 resetAt time.Time @@ -78,16 +72,10 @@ type modelRateLimitCall struct { type stubAntigravityAccountRepo struct { AccountRepository - scopeCalls []scopeLimitCall rateCalls []rateLimitCall modelRateLimitCalls []modelRateLimitCall } -func (s *stubAntigravityAccountRepo) SetAntigravityQuotaScopeLimit(ctx context.Context, id int64, scope AntigravityQuotaScope, resetAt time.Time) error { - s.scopeCalls = append(s.scopeCalls, scopeLimitCall{accountID: id, scope: scope, resetAt: resetAt}) - return nil -} - func (s *stubAntigravityAccountRepo) SetRateLimited(ctx context.Context, id int64, resetAt time.Time) error { s.rateCalls = append(s.rateCalls, rateLimitCall{accountID: id, resetAt: resetAt}) return nil @@ -131,10 +119,9 @@ func TestAntigravityRetryLoop_URLFallback_UsesLatestSuccess(t *testing.T) { accessToken: "token", action: "generateContent", body: []byte(`{"input":"test"}`), - quotaScope: AntigravityQuotaScopeClaude, httpUpstream: upstream, requestedModel: "claude-sonnet-4-5", - handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { + handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, requestedModel string, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { handleErrorCalled = true return nil }, @@ -155,23 +142,6 @@ func TestAntigravityRetryLoop_URLFallback_UsesLatestSuccess(t *testing.T) { require.Equal(t, base2, available[0]) } -func TestAntigravityHandleUpstreamError_UsesScopeLimit(t *testing.T) { - // 分区限流始终开启,不再支持通过环境变量关闭 - repo := &stubAntigravityAccountRepo{} - svc := &AntigravityGatewayService{accountRepo: repo} - account := &Account{ID: 9, Name: "acc-9", Platform: PlatformAntigravity} - - body := buildGeminiRateLimitBody("3s") - svc.handleUpstreamError(context.Background(), "[test]", account, http.StatusTooManyRequests, http.Header{}, body, AntigravityQuotaScopeClaude, 0, "", false) - - require.Len(t, repo.scopeCalls, 1) - require.Empty(t, repo.rateCalls) - call := repo.scopeCalls[0] - require.Equal(t, account.ID, call.accountID) - require.Equal(t, AntigravityQuotaScopeClaude, call.scope) - require.WithinDuration(t, time.Now().Add(3*time.Second), call.resetAt, 2*time.Second) -} - // TestHandleUpstreamError_429_ModelRateLimit 测试 429 模型限流场景 func TestHandleUpstreamError_429_ModelRateLimit(t *testing.T) { repo := &stubAntigravityAccountRepo{} @@ -189,7 +159,7 @@ func TestHandleUpstreamError_429_ModelRateLimit(t *testing.T) { } }`) - result := svc.handleUpstreamError(context.Background(), "[test]", account, http.StatusTooManyRequests, http.Header{}, body, AntigravityQuotaScopeClaude, 0, "", false) + result := svc.handleUpstreamError(context.Background(), "[test]", account, http.StatusTooManyRequests, http.Header{}, body, "claude-sonnet-4-5", 0, "", false) // 应该触发模型限流 require.NotNil(t, result) @@ -200,22 +170,22 @@ func TestHandleUpstreamError_429_ModelRateLimit(t *testing.T) { require.Equal(t, "claude-sonnet-4-5", repo.modelRateLimitCalls[0].modelKey) } -// TestHandleUpstreamError_429_NonModelRateLimit 测试 429 非模型限流场景(走 scope 限流) +// TestHandleUpstreamError_429_NonModelRateLimit 测试 429 非模型限流场景(走模型级限流兜底) func TestHandleUpstreamError_429_NonModelRateLimit(t *testing.T) { repo := &stubAntigravityAccountRepo{} svc := &AntigravityGatewayService{accountRepo: repo} account := &Account{ID: 2, Name: "acc-2", Platform: PlatformAntigravity} - // 429 + 普通限流响应(无 RATE_LIMIT_EXCEEDED reason)→ scope 限流 + // 429 + 普通限流响应(无 RATE_LIMIT_EXCEEDED reason)→ 走模型级限流兜底 body := buildGeminiRateLimitBody("5s") - result := svc.handleUpstreamError(context.Background(), "[test]", account, http.StatusTooManyRequests, http.Header{}, body, AntigravityQuotaScopeClaude, 0, "", false) + result := svc.handleUpstreamError(context.Background(), "[test]", account, http.StatusTooManyRequests, http.Header{}, body, "claude-sonnet-4-5", 0, "", false) - // 不应该触发模型限流,应该走 scope 限流 + // handleModelRateLimit 不会处理(因为没有 RATE_LIMIT_EXCEEDED), + // 但 429 兜底逻辑会使用 requestedModel 设置模型级限流 require.Nil(t, result) - require.Empty(t, repo.modelRateLimitCalls) - require.Len(t, repo.scopeCalls, 1) - require.Equal(t, AntigravityQuotaScopeClaude, repo.scopeCalls[0].scope) + require.Len(t, repo.modelRateLimitCalls, 1) + require.Equal(t, "claude-sonnet-4-5", repo.modelRateLimitCalls[0].modelKey) } // TestHandleUpstreamError_503_ModelRateLimit 测试 503 模型限流场景 @@ -235,7 +205,7 @@ func TestHandleUpstreamError_503_ModelRateLimit(t *testing.T) { } }`) - result := svc.handleUpstreamError(context.Background(), "[test]", account, http.StatusServiceUnavailable, http.Header{}, body, AntigravityQuotaScopeGeminiText, 0, "", false) + result := svc.handleUpstreamError(context.Background(), "[test]", account, http.StatusServiceUnavailable, http.Header{}, body, "gemini-3-pro-high", 0, "", false) // 应该触发模型限流 require.NotNil(t, result) @@ -263,12 +233,11 @@ func TestHandleUpstreamError_503_NonModelRateLimit(t *testing.T) { } }`) - result := svc.handleUpstreamError(context.Background(), "[test]", account, http.StatusServiceUnavailable, http.Header{}, body, AntigravityQuotaScopeGeminiText, 0, "", false) + result := svc.handleUpstreamError(context.Background(), "[test]", account, http.StatusServiceUnavailable, http.Header{}, body, "gemini-3-pro-high", 0, "", false) // 503 非模型限流不应该做任何处理 require.Nil(t, result) require.Empty(t, repo.modelRateLimitCalls, "503 non-model rate limit should not trigger model rate limit") - require.Empty(t, repo.scopeCalls, "503 non-model rate limit should not trigger scope rate limit") require.Empty(t, repo.rateCalls, "503 non-model rate limit should not trigger account rate limit") } @@ -281,12 +250,11 @@ func TestHandleUpstreamError_503_EmptyBody(t *testing.T) { // 503 + 空响应体 → 不做任何处理 body := []byte(`{}`) - result := svc.handleUpstreamError(context.Background(), "[test]", account, http.StatusServiceUnavailable, http.Header{}, body, AntigravityQuotaScopeGeminiText, 0, "", false) + result := svc.handleUpstreamError(context.Background(), "[test]", account, http.StatusServiceUnavailable, http.Header{}, body, "gemini-3-pro-high", 0, "", false) // 503 空响应不应该做任何处理 require.Nil(t, result) require.Empty(t, repo.modelRateLimitCalls) - require.Empty(t, repo.scopeCalls) require.Empty(t, repo.rateCalls) } @@ -307,15 +275,7 @@ func TestAccountIsSchedulableForModel_AntigravityRateLimits(t *testing.T) { require.False(t, account.IsSchedulableForModel("gemini-3-flash")) account.RateLimitResetAt = nil - account.Extra = map[string]any{ - antigravityQuotaScopesKey: map[string]any{ - "claude": map[string]any{ - "rate_limit_reset_at": future.Format(time.RFC3339), - }, - }, - } - - require.False(t, account.IsSchedulableForModel("claude-sonnet-4-5")) + require.True(t, account.IsSchedulableForModel("claude-sonnet-4-5")) require.True(t, account.IsSchedulableForModel("gemini-3-flash")) } @@ -635,6 +595,7 @@ func TestShouldTriggerAntigravitySmartRetry(t *testing.T) { }`, expectedShouldRetry: false, expectedShouldRateLimit: true, + minWait: 7 * time.Second, modelName: "gemini-pro", }, { @@ -652,6 +613,7 @@ func TestShouldTriggerAntigravitySmartRetry(t *testing.T) { }`, expectedShouldRetry: false, expectedShouldRateLimit: true, + minWait: 39 * time.Second, modelName: "gemini-3-pro-high", }, { @@ -669,6 +631,7 @@ func TestShouldTriggerAntigravitySmartRetry(t *testing.T) { }`, expectedShouldRetry: false, expectedShouldRateLimit: true, + minWait: 30 * time.Second, modelName: "gemini-2.5-flash", }, { @@ -686,6 +649,7 @@ func TestShouldTriggerAntigravitySmartRetry(t *testing.T) { }`, expectedShouldRetry: false, expectedShouldRateLimit: true, + minWait: 30 * time.Second, modelName: "claude-sonnet-4-5", }, } @@ -704,6 +668,11 @@ func TestShouldTriggerAntigravitySmartRetry(t *testing.T) { t.Errorf("wait = %v, want >= %v", wait, tt.minWait) } } + if shouldRateLimit && tt.minWait > 0 { + if wait < tt.minWait { + t.Errorf("rate limit wait = %v, want >= %v", wait, tt.minWait) + } + } if (shouldRetry || shouldRateLimit) && model != tt.modelName { t.Errorf("modelName = %q, want %q", model, tt.modelName) } @@ -832,7 +801,7 @@ func TestAntigravityRetryLoop_PreCheck_SwitchesWhenRateLimited(t *testing.T) { requestedModel: "claude-sonnet-4-5", httpUpstream: upstream, isStickySession: true, - handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { + handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, requestedModel string, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { return nil }, }) @@ -875,7 +844,7 @@ func TestAntigravityRetryLoop_PreCheck_SwitchesWhenRemainingLong(t *testing.T) { requestedModel: "claude-sonnet-4-5", httpUpstream: upstream, isStickySession: true, - handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { + handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, requestedModel string, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { return nil }, }) diff --git a/backend/internal/service/antigravity_smart_retry_test.go b/backend/internal/service/antigravity_smart_retry_test.go index 999b408f..a7e0d296 100644 --- a/backend/internal/service/antigravity_smart_retry_test.go +++ b/backend/internal/service/antigravity_smart_retry_test.go @@ -75,7 +75,7 @@ func TestHandleSmartRetry_URLLevelRateLimit(t *testing.T) { accessToken: "token", action: "generateContent", body: []byte(`{"input":"test"}`), - handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { + handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, requestedModel string, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { return nil }, } @@ -127,7 +127,7 @@ func TestHandleSmartRetry_LongDelay_ReturnsSwitchError(t *testing.T) { body: []byte(`{"input":"test"}`), accountRepo: repo, isStickySession: true, - handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { + handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, requestedModel string, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { return nil }, } @@ -194,7 +194,7 @@ func TestHandleSmartRetry_ShortDelay_SmartRetrySuccess(t *testing.T) { action: "generateContent", body: []byte(`{"input":"test"}`), httpUpstream: upstream, - handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { + handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, requestedModel string, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { return nil }, } @@ -269,7 +269,7 @@ func TestHandleSmartRetry_ShortDelay_SmartRetryFailed_ReturnsSwitchError(t *test httpUpstream: upstream, accountRepo: repo, isStickySession: false, - handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { + handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, requestedModel string, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { return nil }, } @@ -331,7 +331,7 @@ func TestHandleSmartRetry_503_ModelCapacityExhausted_ReturnsSwitchError(t *testi body: []byte(`{"input":"test"}`), accountRepo: repo, isStickySession: true, - handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { + handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, requestedModel string, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { return nil }, } @@ -387,7 +387,7 @@ func TestHandleSmartRetry_NonAntigravityAccount_ContinuesDefaultLogic(t *testing accessToken: "token", action: "generateContent", body: []byte(`{"input":"test"}`), - handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { + handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, requestedModel string, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { return nil }, } @@ -436,7 +436,7 @@ func TestHandleSmartRetry_NonModelRateLimit_ContinuesDefaultLogic(t *testing.T) accessToken: "token", action: "generateContent", body: []byte(`{"input":"test"}`), - handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { + handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, requestedModel string, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { return nil }, } @@ -487,7 +487,7 @@ func TestHandleSmartRetry_ExactlyAtThreshold_ReturnsSwitchError(t *testing.T) { action: "generateContent", body: []byte(`{"input":"test"}`), accountRepo: repo, - handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { + handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, requestedModel string, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { return nil }, } @@ -548,7 +548,7 @@ func TestAntigravityRetryLoop_HandleSmartRetry_SwitchError_Propagates(t *testing httpUpstream: upstream, accountRepo: repo, isStickySession: true, - handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { + handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, requestedModel string, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { return nil }, }) @@ -604,7 +604,7 @@ func TestHandleSmartRetry_NetworkError_ExhaustsRetry(t *testing.T) { body: []byte(`{"input":"test"}`), httpUpstream: upstream, accountRepo: repo, - handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { + handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, requestedModel string, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { return nil }, } @@ -662,7 +662,7 @@ func TestHandleSmartRetry_NoRetryDelay_UsesDefaultRateLimit(t *testing.T) { body: []byte(`{"input":"test"}`), accountRepo: repo, isStickySession: true, - handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { + handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, requestedModel string, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { return nil }, } @@ -754,7 +754,7 @@ func TestHandleSmartRetry_ShortDelay_StickySession_FailedRetry_ClearsSession(t * isStickySession: true, groupID: 42, sessionHash: "sticky-hash-abc", - handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { + handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, requestedModel string, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { return nil }, } @@ -842,7 +842,7 @@ func TestHandleSmartRetry_ShortDelay_NonStickySession_FailedRetry_NoDeleteSessio isStickySession: false, groupID: 42, sessionHash: "", // 非粘性会话,sessionHash 为空 - handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { + handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, requestedModel string, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { return nil }, } @@ -918,7 +918,7 @@ func TestHandleSmartRetry_ShortDelay_StickySession_FailedRetry_NilCache_NoPanic( isStickySession: true, groupID: 42, sessionHash: "sticky-hash-nil-cache", - handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { + handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, requestedModel string, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { return nil }, } @@ -983,7 +983,7 @@ func TestHandleSmartRetry_ShortDelay_StickySession_SuccessRetry_NoDeleteSession( isStickySession: true, groupID: 42, sessionHash: "sticky-hash-success", - handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { + handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, requestedModel string, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { return nil }, } @@ -1043,7 +1043,7 @@ func TestHandleSmartRetry_LongDelay_StickySession_NoDeleteInHandleSmartRetry(t * isStickySession: true, groupID: 42, sessionHash: "sticky-hash-long-delay", - handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { + handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, requestedModel string, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { return nil }, } @@ -1108,7 +1108,7 @@ func TestHandleSmartRetry_ShortDelay_NetworkError_StickySession_ClearsSession(t isStickySession: true, groupID: 99, sessionHash: "sticky-net-error", - handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { + handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, requestedModel string, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { return nil }, } @@ -1188,7 +1188,7 @@ func TestHandleSmartRetry_ShortDelay_503_StickySession_FailedRetry_ClearsSession isStickySession: true, groupID: 77, sessionHash: "sticky-503-short", - handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { + handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, requestedModel string, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { return nil }, } @@ -1278,7 +1278,7 @@ func TestAntigravityRetryLoop_SmartRetryFailed_StickySession_SwitchErrorPropagat isStickySession: true, groupID: 55, sessionHash: "sticky-loop-test", - handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { + handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, requestedModel string, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { return nil }, }) @@ -1296,4 +1296,4 @@ func TestAntigravityRetryLoop_SmartRetryFailed_StickySession_SwitchErrorPropagat require.Len(t, cache.deleteCalls, 1, "should clear sticky session in handleSmartRetry") require.Equal(t, int64(55), cache.deleteCalls[0].groupID) require.Equal(t, "sticky-loop-test", cache.deleteCalls[0].sessionHash) -} \ No newline at end of file +} diff --git a/backend/internal/service/digest_session_store.go b/backend/internal/service/digest_session_store.go new file mode 100644 index 00000000..3ac08936 --- /dev/null +++ b/backend/internal/service/digest_session_store.go @@ -0,0 +1,69 @@ +package service + +import ( + "strconv" + "strings" + "time" + + gocache "github.com/patrickmn/go-cache" +) + +// digestSessionTTL 摘要会话默认 TTL +const digestSessionTTL = 5 * time.Minute + +// sessionEntry flat cache 条目 +type sessionEntry struct { + uuid string + accountID int64 +} + +// DigestSessionStore 内存摘要会话存储(flat cache 实现) +// key: "{groupID}:{prefixHash}|{digestChain}" → *sessionEntry +type DigestSessionStore struct { + cache *gocache.Cache +} + +// NewDigestSessionStore 创建内存摘要会话存储 +func NewDigestSessionStore() *DigestSessionStore { + return &DigestSessionStore{ + cache: gocache.New(digestSessionTTL, time.Minute), + } +} + +// Save 保存摘要会话。oldDigestChain 为 Find 返回的 matchedChain,用于删旧 key。 +func (s *DigestSessionStore) Save(groupID int64, prefixHash, digestChain, uuid string, accountID int64, oldDigestChain string) { + if digestChain == "" { + return + } + ns := buildNS(groupID, prefixHash) + s.cache.Set(ns+digestChain, &sessionEntry{uuid: uuid, accountID: accountID}, gocache.DefaultExpiration) + if oldDigestChain != "" && oldDigestChain != digestChain { + s.cache.Delete(ns + oldDigestChain) + } +} + +// Find 查找摘要会话,从完整 chain 逐段截断,返回最长匹配及对应 matchedChain。 +func (s *DigestSessionStore) Find(groupID int64, prefixHash, digestChain string) (uuid string, accountID int64, matchedChain string, found bool) { + if digestChain == "" { + return "", 0, "", false + } + ns := buildNS(groupID, prefixHash) + chain := digestChain + for { + if val, ok := s.cache.Get(ns + chain); ok { + if e, ok := val.(*sessionEntry); ok { + return e.uuid, e.accountID, chain, true + } + } + i := strings.LastIndex(chain, "-") + if i < 0 { + return "", 0, "", false + } + chain = chain[:i] + } +} + +// buildNS 构建 namespace 前缀 +func buildNS(groupID int64, prefixHash string) string { + return strconv.FormatInt(groupID, 10) + ":" + prefixHash + "|" +} diff --git a/backend/internal/service/digest_session_store_test.go b/backend/internal/service/digest_session_store_test.go new file mode 100644 index 00000000..e505bf30 --- /dev/null +++ b/backend/internal/service/digest_session_store_test.go @@ -0,0 +1,312 @@ +//go:build unit + +package service + +import ( + "fmt" + "sync" + "testing" + "time" + + gocache "github.com/patrickmn/go-cache" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDigestSessionStore_SaveAndFind(t *testing.T) { + store := NewDigestSessionStore() + + store.Save(1, "prefix", "s:a1-u:b2-m:c3", "uuid-1", 100, "") + + uuid, accountID, _, found := store.Find(1, "prefix", "s:a1-u:b2-m:c3") + require.True(t, found) + assert.Equal(t, "uuid-1", uuid) + assert.Equal(t, int64(100), accountID) +} + +func TestDigestSessionStore_PrefixMatch(t *testing.T) { + store := NewDigestSessionStore() + + // 保存短链 + store.Save(1, "prefix", "u:a-m:b", "uuid-short", 10, "") + + // 用长链查找,应前缀匹配到短链 + uuid, accountID, matchedChain, found := store.Find(1, "prefix", "u:a-m:b-u:c-m:d") + require.True(t, found) + assert.Equal(t, "uuid-short", uuid) + assert.Equal(t, int64(10), accountID) + assert.Equal(t, "u:a-m:b", matchedChain) +} + +func TestDigestSessionStore_LongestPrefixMatch(t *testing.T) { + store := NewDigestSessionStore() + + store.Save(1, "prefix", "u:a", "uuid-1", 1, "") + store.Save(1, "prefix", "u:a-m:b", "uuid-2", 2, "") + store.Save(1, "prefix", "u:a-m:b-u:c", "uuid-3", 3, "") + + // 应匹配最深的 "u:a-m:b-u:c"(从完整 chain 逐段截断,先命中最长的) + uuid, accountID, _, found := store.Find(1, "prefix", "u:a-m:b-u:c-m:d-u:e") + require.True(t, found) + assert.Equal(t, "uuid-3", uuid) + assert.Equal(t, int64(3), accountID) + + // 查找中等长度,应匹配到 "u:a-m:b" + uuid, accountID, _, found = store.Find(1, "prefix", "u:a-m:b-u:x") + require.True(t, found) + assert.Equal(t, "uuid-2", uuid) + assert.Equal(t, int64(2), accountID) +} + +func TestDigestSessionStore_SaveDeletesOldChain(t *testing.T) { + store := NewDigestSessionStore() + + // 第一轮:保存 "u:a-m:b" + store.Save(1, "prefix", "u:a-m:b", "uuid-1", 100, "") + + // 第二轮:同一 uuid 保存更长的链,传入旧 chain + store.Save(1, "prefix", "u:a-m:b-u:c-m:d", "uuid-1", 100, "u:a-m:b") + + // 旧链 "u:a-m:b" 应已被删除 + _, _, _, found := store.Find(1, "prefix", "u:a-m:b") + assert.False(t, found, "old chain should be deleted") + + // 新链应能找到 + uuid, accountID, _, found := store.Find(1, "prefix", "u:a-m:b-u:c-m:d") + require.True(t, found) + assert.Equal(t, "uuid-1", uuid) + assert.Equal(t, int64(100), accountID) +} + +func TestDigestSessionStore_DifferentSessionsNoInterference(t *testing.T) { + store := NewDigestSessionStore() + + // 相同系统提示词,不同用户提示词 + store.Save(1, "prefix", "s:sys-u:user1", "uuid-1", 100, "") + store.Save(1, "prefix", "s:sys-u:user2", "uuid-2", 200, "") + + uuid, accountID, _, found := store.Find(1, "prefix", "s:sys-u:user1-m:reply1") + require.True(t, found) + assert.Equal(t, "uuid-1", uuid) + assert.Equal(t, int64(100), accountID) + + uuid, accountID, _, found = store.Find(1, "prefix", "s:sys-u:user2-m:reply2") + require.True(t, found) + assert.Equal(t, "uuid-2", uuid) + assert.Equal(t, int64(200), accountID) +} + +func TestDigestSessionStore_NoMatch(t *testing.T) { + store := NewDigestSessionStore() + + store.Save(1, "prefix", "u:a-m:b", "uuid-1", 100, "") + + // 完全不同的 chain + _, _, _, found := store.Find(1, "prefix", "u:x-m:y") + assert.False(t, found) +} + +func TestDigestSessionStore_DifferentPrefixHash(t *testing.T) { + store := NewDigestSessionStore() + + store.Save(1, "prefix1", "u:a-m:b", "uuid-1", 100, "") + + // 不同 prefixHash 应隔离 + _, _, _, found := store.Find(1, "prefix2", "u:a-m:b") + assert.False(t, found) +} + +func TestDigestSessionStore_DifferentGroupID(t *testing.T) { + store := NewDigestSessionStore() + + store.Save(1, "prefix", "u:a-m:b", "uuid-1", 100, "") + + // 不同 groupID 应隔离 + _, _, _, found := store.Find(2, "prefix", "u:a-m:b") + assert.False(t, found) +} + +func TestDigestSessionStore_EmptyDigestChain(t *testing.T) { + store := NewDigestSessionStore() + + // 空链不应保存 + store.Save(1, "prefix", "", "uuid-1", 100, "") + _, _, _, found := store.Find(1, "prefix", "") + assert.False(t, found) +} + +func TestDigestSessionStore_TTLExpiration(t *testing.T) { + store := &DigestSessionStore{ + cache: gocache.New(100*time.Millisecond, 50*time.Millisecond), + } + + store.Save(1, "prefix", "u:a-m:b", "uuid-1", 100, "") + + // 立即应该能找到 + _, _, _, found := store.Find(1, "prefix", "u:a-m:b") + require.True(t, found) + + // 等待过期 + 清理周期 + time.Sleep(300 * time.Millisecond) + + // 过期后应找不到 + _, _, _, found = store.Find(1, "prefix", "u:a-m:b") + assert.False(t, found) +} + +func TestDigestSessionStore_ConcurrentSafety(t *testing.T) { + store := NewDigestSessionStore() + + var wg sync.WaitGroup + const goroutines = 50 + const operations = 100 + + wg.Add(goroutines) + for g := 0; g < goroutines; g++ { + go func(id int) { + defer wg.Done() + prefix := fmt.Sprintf("prefix-%d", id%5) + for i := 0; i < operations; i++ { + chain := fmt.Sprintf("u:%d-m:%d", id, i) + uuid := fmt.Sprintf("uuid-%d-%d", id, i) + store.Save(1, prefix, chain, uuid, int64(id), "") + store.Find(1, prefix, chain) + } + }(g) + } + wg.Wait() +} + +func TestDigestSessionStore_MultipleSessions(t *testing.T) { + store := NewDigestSessionStore() + + sessions := []struct { + chain string + uuid string + accountID int64 + }{ + {"u:session1", "uuid-1", 1}, + {"u:session2-m:reply2", "uuid-2", 2}, + {"u:session3-m:reply3-u:msg3", "uuid-3", 3}, + } + + for _, sess := range sessions { + store.Save(1, "prefix", sess.chain, sess.uuid, sess.accountID, "") + } + + // 验证每个会话都能正确查找 + for _, sess := range sessions { + uuid, accountID, _, found := store.Find(1, "prefix", sess.chain) + require.True(t, found, "should find session: %s", sess.chain) + assert.Equal(t, sess.uuid, uuid) + assert.Equal(t, sess.accountID, accountID) + } + + // 验证继续对话的场景 + uuid, accountID, _, found := store.Find(1, "prefix", "u:session2-m:reply2-u:newmsg") + require.True(t, found) + assert.Equal(t, "uuid-2", uuid) + assert.Equal(t, int64(2), accountID) +} + +func TestDigestSessionStore_Performance1000Sessions(t *testing.T) { + store := NewDigestSessionStore() + + // 插入 1000 个会话 + for i := 0; i < 1000; i++ { + chain := fmt.Sprintf("s:sys-u:user%d-m:reply%d", i, i) + store.Save(1, "prefix", chain, fmt.Sprintf("uuid-%d", i), int64(i), "") + } + + // 查找性能测试 + start := time.Now() + const lookups = 10000 + for i := 0; i < lookups; i++ { + idx := i % 1000 + chain := fmt.Sprintf("s:sys-u:user%d-m:reply%d-u:newmsg", idx, idx) + _, _, _, found := store.Find(1, "prefix", chain) + assert.True(t, found) + } + elapsed := time.Since(start) + t.Logf("%d lookups in %v (%.0f ns/op)", lookups, elapsed, float64(elapsed.Nanoseconds())/lookups) +} + +func TestDigestSessionStore_FindReturnsMatchedChain(t *testing.T) { + store := NewDigestSessionStore() + + store.Save(1, "prefix", "u:a-m:b-u:c", "uuid-1", 100, "") + + // 精确匹配 + _, _, matchedChain, found := store.Find(1, "prefix", "u:a-m:b-u:c") + require.True(t, found) + assert.Equal(t, "u:a-m:b-u:c", matchedChain) + + // 前缀匹配(截断后命中) + _, _, matchedChain, found = store.Find(1, "prefix", "u:a-m:b-u:c-m:d-u:e") + require.True(t, found) + assert.Equal(t, "u:a-m:b-u:c", matchedChain) +} + +func TestDigestSessionStore_CacheItemCountStable(t *testing.T) { + store := NewDigestSessionStore() + + // 模拟 100 个独立会话,每个进行 10 轮对话 + // 正确传递 oldDigestChain 时,每个会话始终只保留 1 个 key + for conv := 0; conv < 100; conv++ { + var prevMatchedChain string + for round := 0; round < 10; round++ { + chain := fmt.Sprintf("s:sys-u:user%d", conv) + for r := 0; r < round; r++ { + chain += fmt.Sprintf("-m:a%d-u:q%d", r, r+1) + } + uuid := fmt.Sprintf("uuid-conv%d", conv) + + _, _, matched, _ := store.Find(1, "prefix", chain) + store.Save(1, "prefix", chain, uuid, int64(conv), matched) + prevMatchedChain = matched + _ = prevMatchedChain + } + } + + // 100 个会话 × 1 key/会话 = 应该 ≤ 100 个 key + // 允许少量并发残留,但绝不能接近 100×10=1000 + itemCount := store.cache.ItemCount() + assert.LessOrEqual(t, itemCount, 100, "cache should have at most 100 items (1 per conversation), got %d", itemCount) + t.Logf("Cache item count after 100 conversations × 10 rounds: %d", itemCount) +} + +func TestDigestSessionStore_TTLPreventsUnboundedGrowth(t *testing.T) { + // 使用极短 TTL 验证大量写入后 cache 能被清理 + store := &DigestSessionStore{ + cache: gocache.New(100*time.Millisecond, 50*time.Millisecond), + } + + // 插入 500 个不同的 key(无 oldDigestChain,模拟最坏场景:全是新会话首轮) + for i := 0; i < 500; i++ { + chain := fmt.Sprintf("u:user%d", i) + store.Save(1, "prefix", chain, fmt.Sprintf("uuid-%d", i), int64(i), "") + } + + assert.Equal(t, 500, store.cache.ItemCount()) + + // 等待 TTL + 清理周期 + time.Sleep(300 * time.Millisecond) + + assert.Equal(t, 0, store.cache.ItemCount(), "all items should be expired and cleaned up") +} + +func TestDigestSessionStore_SaveSameChainNoDelete(t *testing.T) { + store := NewDigestSessionStore() + + // 保存 chain + store.Save(1, "prefix", "u:a-m:b", "uuid-1", 100, "") + + // 用户重发相同消息:oldDigestChain == digestChain,不应删掉刚设置的 key + store.Save(1, "prefix", "u:a-m:b", "uuid-1", 100, "u:a-m:b") + + // 仍然能找到 + uuid, accountID, _, found := store.Find(1, "prefix", "u:a-m:b") + require.True(t, found) + assert.Equal(t, "uuid-1", uuid) + assert.Equal(t, int64(100), accountID) +} diff --git a/backend/internal/service/error_policy_integration_test.go b/backend/internal/service/error_policy_integration_test.go new file mode 100644 index 00000000..9f8ad938 --- /dev/null +++ b/backend/internal/service/error_policy_integration_test.go @@ -0,0 +1,366 @@ +//go:build unit + +package service + +import ( + "context" + "io" + "net/http" + "strings" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/antigravity" + "github.com/stretchr/testify/require" +) + +// --------------------------------------------------------------------------- +// Mocks (scoped to this file by naming convention) +// --------------------------------------------------------------------------- + +// epFixedUpstream returns a fixed response for every request. +type epFixedUpstream struct { + statusCode int + body string + calls int +} + +func (u *epFixedUpstream) Do(req *http.Request, proxyURL string, accountID int64, accountConcurrency int) (*http.Response, error) { + u.calls++ + return &http.Response{ + StatusCode: u.statusCode, + Header: http.Header{}, + Body: io.NopCloser(strings.NewReader(u.body)), + }, nil +} + +func (u *epFixedUpstream) DoWithTLS(req *http.Request, proxyURL string, accountID int64, accountConcurrency int, enableTLSFingerprint bool) (*http.Response, error) { + return u.Do(req, proxyURL, accountID, accountConcurrency) +} + +// epAccountRepo records SetTempUnschedulable / SetError calls. +type epAccountRepo struct { + mockAccountRepoForGemini + tempCalls int + setErrCalls int +} + +func (r *epAccountRepo) SetTempUnschedulable(_ context.Context, _ int64, _ time.Time, _ string) error { + r.tempCalls++ + return nil +} + +func (r *epAccountRepo) SetError(_ context.Context, _ int64, _ string) error { + r.setErrCalls++ + return nil +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +func saveAndSetBaseURLs(t *testing.T) { + t.Helper() + oldBaseURLs := append([]string(nil), antigravity.BaseURLs...) + oldAvail := antigravity.DefaultURLAvailability + antigravity.BaseURLs = []string{"https://ep-test.example"} + antigravity.DefaultURLAvailability = antigravity.NewURLAvailability(time.Minute) + t.Cleanup(func() { + antigravity.BaseURLs = oldBaseURLs + antigravity.DefaultURLAvailability = oldAvail + }) +} + +func newRetryParams(account *Account, upstream HTTPUpstream, handleError func(context.Context, string, *Account, int, http.Header, []byte, string, int64, string, bool) *handleModelRateLimitResult) antigravityRetryLoopParams { + return antigravityRetryLoopParams{ + ctx: context.Background(), + prefix: "[ep-test]", + account: account, + accessToken: "token", + action: "generateContent", + body: []byte(`{"input":"test"}`), + httpUpstream: upstream, + requestedModel: "claude-sonnet-4-5", + handleError: handleError, + } +} + +// --------------------------------------------------------------------------- +// TestRetryLoop_ErrorPolicy_CustomErrorCodes +// --------------------------------------------------------------------------- + +func TestRetryLoop_ErrorPolicy_CustomErrorCodes(t *testing.T) { + tests := []struct { + name string + upstreamStatus int + upstreamBody string + customCodes []any + expectHandleError int + expectUpstream int + expectStatusCode int + }{ + { + name: "429_in_custom_codes_matched", + upstreamStatus: 429, + upstreamBody: `{"error":"rate limited"}`, + customCodes: []any{float64(429)}, + expectHandleError: 1, + expectUpstream: 1, + expectStatusCode: 429, + }, + { + name: "429_not_in_custom_codes_skipped", + upstreamStatus: 429, + upstreamBody: `{"error":"rate limited"}`, + customCodes: []any{float64(500)}, + expectHandleError: 0, + expectUpstream: 1, + expectStatusCode: 429, + }, + { + name: "500_in_custom_codes_matched", + upstreamStatus: 500, + upstreamBody: `{"error":"internal"}`, + customCodes: []any{float64(500)}, + expectHandleError: 1, + expectUpstream: 1, + expectStatusCode: 500, + }, + { + name: "500_not_in_custom_codes_skipped", + upstreamStatus: 500, + upstreamBody: `{"error":"internal"}`, + customCodes: []any{float64(429)}, + expectHandleError: 0, + expectUpstream: 1, + expectStatusCode: 500, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + saveAndSetBaseURLs(t) + + upstream := &epFixedUpstream{statusCode: tt.upstreamStatus, body: tt.upstreamBody} + repo := &epAccountRepo{} + rlSvc := NewRateLimitService(repo, nil, &config.Config{}, nil, nil) + + account := &Account{ + ID: 100, + Type: AccountTypeAPIKey, + Platform: PlatformAntigravity, + Schedulable: true, + Status: StatusActive, + Concurrency: 1, + Credentials: map[string]any{ + "custom_error_codes_enabled": true, + "custom_error_codes": tt.customCodes, + }, + } + + svc := &AntigravityGatewayService{rateLimitService: rlSvc} + + var handleErrorCount int + p := newRetryParams(account, upstream, func(_ context.Context, _ string, _ *Account, _ int, _ http.Header, _ []byte, _ string, _ int64, _ string, _ bool) *handleModelRateLimitResult { + handleErrorCount++ + return nil + }) + + result, err := svc.antigravityRetryLoop(p) + + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.resp) + defer func() { _ = result.resp.Body.Close() }() + + require.Equal(t, tt.expectStatusCode, result.resp.StatusCode) + require.Equal(t, tt.expectHandleError, handleErrorCount, "handleError call count") + require.Equal(t, tt.expectUpstream, upstream.calls, "upstream call count") + }) + } +} + +// --------------------------------------------------------------------------- +// TestRetryLoop_ErrorPolicy_TempUnschedulable +// --------------------------------------------------------------------------- + +func TestRetryLoop_ErrorPolicy_TempUnschedulable(t *testing.T) { + tempRulesAccount := func(rules []any) *Account { + return &Account{ + ID: 200, + Type: AccountTypeOAuth, + Platform: PlatformAntigravity, + Schedulable: true, + Status: StatusActive, + Concurrency: 1, + Credentials: map[string]any{ + "temp_unschedulable_enabled": true, + "temp_unschedulable_rules": rules, + }, + } + } + + overloadedRule := map[string]any{ + "error_code": float64(503), + "keywords": []any{"overloaded"}, + "duration_minutes": float64(10), + } + + rateLimitRule := map[string]any{ + "error_code": float64(429), + "keywords": []any{"rate limited keyword"}, + "duration_minutes": float64(5), + } + + t.Run("503_overloaded_matches_rule", func(t *testing.T) { + saveAndSetBaseURLs(t) + + upstream := &epFixedUpstream{statusCode: 503, body: `overloaded`} + repo := &epAccountRepo{} + rlSvc := NewRateLimitService(repo, nil, &config.Config{}, nil, nil) + svc := &AntigravityGatewayService{rateLimitService: rlSvc} + + account := tempRulesAccount([]any{overloadedRule}) + p := newRetryParams(account, upstream, func(_ context.Context, _ string, _ *Account, _ int, _ http.Header, _ []byte, _ string, _ int64, _ string, _ bool) *handleModelRateLimitResult { + t.Error("handleError should not be called for temp unschedulable") + return nil + }) + + result, err := svc.antigravityRetryLoop(p) + + require.Nil(t, result) + var switchErr *AntigravityAccountSwitchError + require.ErrorAs(t, err, &switchErr) + require.Equal(t, account.ID, switchErr.OriginalAccountID) + require.Equal(t, 1, upstream.calls, "should not retry") + }) + + t.Run("429_rate_limited_keyword_matches_rule", func(t *testing.T) { + saveAndSetBaseURLs(t) + + upstream := &epFixedUpstream{statusCode: 429, body: `rate limited keyword`} + repo := &epAccountRepo{} + rlSvc := NewRateLimitService(repo, nil, &config.Config{}, nil, nil) + svc := &AntigravityGatewayService{rateLimitService: rlSvc} + + account := tempRulesAccount([]any{rateLimitRule}) + p := newRetryParams(account, upstream, func(_ context.Context, _ string, _ *Account, _ int, _ http.Header, _ []byte, _ string, _ int64, _ string, _ bool) *handleModelRateLimitResult { + t.Error("handleError should not be called for temp unschedulable") + return nil + }) + + result, err := svc.antigravityRetryLoop(p) + + require.Nil(t, result) + var switchErr *AntigravityAccountSwitchError + require.ErrorAs(t, err, &switchErr) + require.Equal(t, account.ID, switchErr.OriginalAccountID) + require.Equal(t, 1, upstream.calls, "should not retry") + }) + + t.Run("503_body_no_match_continues_default_retry", func(t *testing.T) { + saveAndSetBaseURLs(t) + + upstream := &epFixedUpstream{statusCode: 503, body: `random`} + repo := &epAccountRepo{} + rlSvc := NewRateLimitService(repo, nil, &config.Config{}, nil, nil) + svc := &AntigravityGatewayService{rateLimitService: rlSvc} + + account := tempRulesAccount([]any{overloadedRule}) + + // Use a short-lived context: the backoff sleep (~1s) will be + // interrupted, proving the code entered the default retry path + // instead of breaking early via error policy. + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + + p := newRetryParams(account, upstream, func(_ context.Context, _ string, _ *Account, _ int, _ http.Header, _ []byte, _ string, _ int64, _ string, _ bool) *handleModelRateLimitResult { + return nil + }) + p.ctx = ctx + + result, err := svc.antigravityRetryLoop(p) + + // Context cancellation during backoff proves default retry was entered + require.Nil(t, result) + require.ErrorIs(t, err, context.DeadlineExceeded) + require.GreaterOrEqual(t, upstream.calls, 1, "should have called upstream at least once") + }) +} + +// --------------------------------------------------------------------------- +// TestRetryLoop_ErrorPolicy_NilRateLimitService +// --------------------------------------------------------------------------- + +func TestRetryLoop_ErrorPolicy_NilRateLimitService(t *testing.T) { + saveAndSetBaseURLs(t) + + upstream := &epFixedUpstream{statusCode: 429, body: `{"error":"rate limited"}`} + // rateLimitService is nil — must not panic + svc := &AntigravityGatewayService{rateLimitService: nil} + + account := &Account{ + ID: 300, + Type: AccountTypeOAuth, + Platform: PlatformAntigravity, + Schedulable: true, + Status: StatusActive, + Concurrency: 1, + } + + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + + p := newRetryParams(account, upstream, func(_ context.Context, _ string, _ *Account, _ int, _ http.Header, _ []byte, _ string, _ int64, _ string, _ bool) *handleModelRateLimitResult { + return nil + }) + p.ctx = ctx + + // Should not panic; enters the default retry path (eventually times out) + result, err := svc.antigravityRetryLoop(p) + + require.Nil(t, result) + require.ErrorIs(t, err, context.DeadlineExceeded) + require.GreaterOrEqual(t, upstream.calls, 1) +} + +// --------------------------------------------------------------------------- +// TestRetryLoop_ErrorPolicy_NoPolicy_OriginalBehavior +// --------------------------------------------------------------------------- + +func TestRetryLoop_ErrorPolicy_NoPolicy_OriginalBehavior(t *testing.T) { + saveAndSetBaseURLs(t) + + upstream := &epFixedUpstream{statusCode: 429, body: `{"error":"rate limited"}`} + repo := &epAccountRepo{} + rlSvc := NewRateLimitService(repo, nil, &config.Config{}, nil, nil) + svc := &AntigravityGatewayService{rateLimitService: rlSvc} + + // Plain OAuth account with no error policy configured + account := &Account{ + ID: 400, + Type: AccountTypeOAuth, + Platform: PlatformAntigravity, + Schedulable: true, + Status: StatusActive, + Concurrency: 1, + } + + var handleErrorCount int + p := newRetryParams(account, upstream, func(_ context.Context, _ string, _ *Account, _ int, _ http.Header, _ []byte, _ string, _ int64, _ string, _ bool) *handleModelRateLimitResult { + handleErrorCount++ + return nil + }) + + result, err := svc.antigravityRetryLoop(p) + + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.resp) + defer func() { _ = result.resp.Body.Close() }() + + require.Equal(t, http.StatusTooManyRequests, result.resp.StatusCode) + require.Equal(t, antigravityMaxRetries, upstream.calls, "should exhaust all retries") + require.Equal(t, 1, handleErrorCount, "handleError should be called once after retries exhausted") +} diff --git a/backend/internal/service/error_policy_test.go b/backend/internal/service/error_policy_test.go new file mode 100644 index 00000000..a8b69c22 --- /dev/null +++ b/backend/internal/service/error_policy_test.go @@ -0,0 +1,289 @@ +//go:build unit + +package service + +import ( + "context" + "net/http" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/stretchr/testify/require" +) + +// --------------------------------------------------------------------------- +// TestCheckErrorPolicy — 6 table-driven cases for the pure logic function +// --------------------------------------------------------------------------- + +func TestCheckErrorPolicy(t *testing.T) { + tests := []struct { + name string + account *Account + statusCode int + body []byte + expected ErrorPolicyResult + }{ + { + name: "no_policy_oauth_returns_none", + account: &Account{ + ID: 1, + Type: AccountTypeOAuth, + Platform: PlatformAntigravity, + // no custom error codes, no temp rules + }, + statusCode: 500, + body: []byte(`"error"`), + expected: ErrorPolicyNone, + }, + { + name: "custom_error_codes_hit_returns_matched", + account: &Account{ + ID: 2, + Type: AccountTypeAPIKey, + Platform: PlatformAntigravity, + Credentials: map[string]any{ + "custom_error_codes_enabled": true, + "custom_error_codes": []any{float64(429), float64(500)}, + }, + }, + statusCode: 500, + body: []byte(`"error"`), + expected: ErrorPolicyMatched, + }, + { + name: "custom_error_codes_miss_returns_skipped", + account: &Account{ + ID: 3, + Type: AccountTypeAPIKey, + Platform: PlatformAntigravity, + Credentials: map[string]any{ + "custom_error_codes_enabled": true, + "custom_error_codes": []any{float64(429), float64(500)}, + }, + }, + statusCode: 503, + body: []byte(`"error"`), + expected: ErrorPolicySkipped, + }, + { + name: "temp_unschedulable_hit_returns_temp_unscheduled", + account: &Account{ + ID: 4, + Type: AccountTypeOAuth, + Platform: PlatformAntigravity, + Credentials: map[string]any{ + "temp_unschedulable_enabled": true, + "temp_unschedulable_rules": []any{ + map[string]any{ + "error_code": float64(503), + "keywords": []any{"overloaded"}, + "duration_minutes": float64(10), + "description": "overloaded rule", + }, + }, + }, + }, + statusCode: 503, + body: []byte(`overloaded service`), + expected: ErrorPolicyTempUnscheduled, + }, + { + name: "temp_unschedulable_body_miss_returns_none", + account: &Account{ + ID: 5, + Type: AccountTypeOAuth, + Platform: PlatformAntigravity, + Credentials: map[string]any{ + "temp_unschedulable_enabled": true, + "temp_unschedulable_rules": []any{ + map[string]any{ + "error_code": float64(503), + "keywords": []any{"overloaded"}, + "duration_minutes": float64(10), + "description": "overloaded rule", + }, + }, + }, + }, + statusCode: 503, + body: []byte(`random msg`), + expected: ErrorPolicyNone, + }, + { + name: "custom_error_codes_override_temp_unschedulable", + account: &Account{ + ID: 6, + Type: AccountTypeAPIKey, + Platform: PlatformAntigravity, + Credentials: map[string]any{ + "custom_error_codes_enabled": true, + "custom_error_codes": []any{float64(503)}, + "temp_unschedulable_enabled": true, + "temp_unschedulable_rules": []any{ + map[string]any{ + "error_code": float64(503), + "keywords": []any{"overloaded"}, + "duration_minutes": float64(10), + "description": "overloaded rule", + }, + }, + }, + }, + statusCode: 503, + body: []byte(`overloaded`), + expected: ErrorPolicyMatched, // custom codes take precedence + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + repo := &errorPolicyRepoStub{} + svc := NewRateLimitService(repo, nil, &config.Config{}, nil, nil) + + result := svc.CheckErrorPolicy(context.Background(), tt.account, tt.statusCode, tt.body) + require.Equal(t, tt.expected, result, "unexpected ErrorPolicyResult") + }) + } +} + +// --------------------------------------------------------------------------- +// TestApplyErrorPolicy — 4 table-driven cases for the wrapper method +// --------------------------------------------------------------------------- + +func TestApplyErrorPolicy(t *testing.T) { + tests := []struct { + name string + account *Account + statusCode int + body []byte + expectedHandled bool + expectedSwitchErr bool // expect *AntigravityAccountSwitchError + handleErrorCalls int + }{ + { + name: "none_not_handled", + account: &Account{ + ID: 10, + Type: AccountTypeOAuth, + Platform: PlatformAntigravity, + }, + statusCode: 500, + body: []byte(`"error"`), + expectedHandled: false, + handleErrorCalls: 0, + }, + { + name: "skipped_handled_no_handleError", + account: &Account{ + ID: 11, + Type: AccountTypeAPIKey, + Platform: PlatformAntigravity, + Credentials: map[string]any{ + "custom_error_codes_enabled": true, + "custom_error_codes": []any{float64(429)}, + }, + }, + statusCode: 500, // not in custom codes + body: []byte(`"error"`), + expectedHandled: true, + handleErrorCalls: 0, + }, + { + name: "matched_handled_calls_handleError", + account: &Account{ + ID: 12, + Type: AccountTypeAPIKey, + Platform: PlatformAntigravity, + Credentials: map[string]any{ + "custom_error_codes_enabled": true, + "custom_error_codes": []any{float64(500)}, + }, + }, + statusCode: 500, + body: []byte(`"error"`), + expectedHandled: true, + handleErrorCalls: 1, + }, + { + name: "temp_unscheduled_returns_switch_error", + account: &Account{ + ID: 13, + Type: AccountTypeOAuth, + Platform: PlatformAntigravity, + Credentials: map[string]any{ + "temp_unschedulable_enabled": true, + "temp_unschedulable_rules": []any{ + map[string]any{ + "error_code": float64(503), + "keywords": []any{"overloaded"}, + "duration_minutes": float64(10), + }, + }, + }, + }, + statusCode: 503, + body: []byte(`overloaded`), + expectedHandled: true, + expectedSwitchErr: true, + handleErrorCalls: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + repo := &errorPolicyRepoStub{} + rlSvc := NewRateLimitService(repo, nil, &config.Config{}, nil, nil) + svc := &AntigravityGatewayService{ + rateLimitService: rlSvc, + } + + var handleErrorCount int + p := antigravityRetryLoopParams{ + ctx: context.Background(), + prefix: "[test]", + account: tt.account, + handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, requestedModel string, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { + handleErrorCount++ + return nil + }, + isStickySession: true, + } + + handled, retErr := svc.applyErrorPolicy(p, tt.statusCode, http.Header{}, tt.body) + + require.Equal(t, tt.expectedHandled, handled, "handled mismatch") + require.Equal(t, tt.handleErrorCalls, handleErrorCount, "handleError call count mismatch") + + if tt.expectedSwitchErr { + var switchErr *AntigravityAccountSwitchError + require.ErrorAs(t, retErr, &switchErr) + require.Equal(t, tt.account.ID, switchErr.OriginalAccountID) + } else { + require.NoError(t, retErr) + } + }) + } +} + +// --------------------------------------------------------------------------- +// errorPolicyRepoStub — minimal AccountRepository stub for error policy tests +// --------------------------------------------------------------------------- + +type errorPolicyRepoStub struct { + mockAccountRepoForGemini + tempCalls int + setErrCalls int + lastErrorMsg string +} + +func (r *errorPolicyRepoStub) SetTempUnschedulable(ctx context.Context, id int64, until time.Time, reason string) error { + r.tempCalls++ + return nil +} + +func (r *errorPolicyRepoStub) SetError(ctx context.Context, id int64, errorMsg string) error { + r.setErrCalls++ + r.lastErrorMsg = errorMsg + return nil +} diff --git a/backend/internal/service/gateway_multiplatform_test.go b/backend/internal/service/gateway_multiplatform_test.go index d9c852e0..e7544024 100644 --- a/backend/internal/service/gateway_multiplatform_test.go +++ b/backend/internal/service/gateway_multiplatform_test.go @@ -142,9 +142,6 @@ func (m *mockAccountRepoForPlatform) ListSchedulableByGroupIDAndPlatforms(ctx co func (m *mockAccountRepoForPlatform) SetRateLimited(ctx context.Context, id int64, resetAt time.Time) error { return nil } -func (m *mockAccountRepoForPlatform) SetAntigravityQuotaScopeLimit(ctx context.Context, id int64, scope AntigravityQuotaScope, resetAt time.Time) error { - return nil -} func (m *mockAccountRepoForPlatform) SetModelRateLimit(ctx context.Context, id int64, scope string, resetAt time.Time) error { return nil } @@ -216,29 +213,6 @@ func (m *mockGatewayCacheForPlatform) DeleteSessionAccountID(ctx context.Context return nil } -func (m *mockGatewayCacheForPlatform) IncrModelCallCount(ctx context.Context, accountID int64, model string) (int64, error) { - return 0, nil -} - -func (m *mockGatewayCacheForPlatform) GetModelLoadBatch(ctx context.Context, accountIDs []int64, model string) (map[int64]*ModelLoadInfo, error) { - return nil, nil -} - -func (m *mockGatewayCacheForPlatform) FindGeminiSession(ctx context.Context, groupID int64, prefixHash, digestChain string) (uuid string, accountID int64, found bool) { - return "", 0, false -} - -func (m *mockGatewayCacheForPlatform) SaveGeminiSession(ctx context.Context, groupID int64, prefixHash, digestChain, uuid string, accountID int64) error { - return nil -} - -func (m *mockGatewayCacheForPlatform) FindAnthropicSession(ctx context.Context, groupID int64, prefixHash, digestChain string) (uuid string, accountID int64, found bool) { - return "", 0, false -} - -func (m *mockGatewayCacheForPlatform) SaveAnthropicSession(ctx context.Context, groupID int64, prefixHash, digestChain, uuid string, accountID int64) error { - return nil -} type mockGroupRepoForGateway struct { groups map[int64]*Group diff --git a/backend/internal/service/gateway_request.go b/backend/internal/service/gateway_request.go index 0ecd18aa..c039f030 100644 --- a/backend/internal/service/gateway_request.go +++ b/backend/internal/service/gateway_request.go @@ -6,9 +6,19 @@ import ( "fmt" "math" + "github.com/Wei-Shaw/sub2api/internal/domain" "github.com/Wei-Shaw/sub2api/internal/pkg/antigravity" ) +// SessionContext 粘性会话上下文,用于区分不同来源的请求。 +// 仅在 GenerateSessionHash 第 3 级 fallback(消息内容 hash)时混入, +// 避免不同用户发送相同消息产生相同 hash 导致账号集中。 +type SessionContext struct { + ClientIP string + UserAgent string + APIKeyID int64 +} + // ParsedRequest 保存网关请求的预解析结果 // // 性能优化说明: @@ -22,20 +32,22 @@ import ( // 2. 将解析结果 ParsedRequest 传递给 Service 层 // 3. 避免重复 json.Unmarshal,减少 CPU 和内存开销 type ParsedRequest struct { - Body []byte // 原始请求体(保留用于转发) - Model string // 请求的模型名称 - Stream bool // 是否为流式请求 - MetadataUserID string // metadata.user_id(用于会话亲和) - System any // system 字段内容 - Messages []any // messages 数组 - HasSystem bool // 是否包含 system 字段(包含 null 也视为显式传入) - ThinkingEnabled bool // 是否开启 thinking(部分平台会影响最终模型名) - MaxTokens int // max_tokens 值(用于探测请求拦截) + Body []byte // 原始请求体(保留用于转发) + Model string // 请求的模型名称 + Stream bool // 是否为流式请求 + MetadataUserID string // metadata.user_id(用于会话亲和) + System any // system 字段内容 + Messages []any // messages 数组 + HasSystem bool // 是否包含 system 字段(包含 null 也视为显式传入) + ThinkingEnabled bool // 是否开启 thinking(部分平台会影响最终模型名) + MaxTokens int // max_tokens 值(用于探测请求拦截) + SessionContext *SessionContext // 可选:请求上下文区分因子(nil 时行为不变) } -// ParseGatewayRequest 解析网关请求体并返回结构化结果 -// 性能优化:一次解析提取所有需要的字段,避免重复 Unmarshal -func ParseGatewayRequest(body []byte) (*ParsedRequest, error) { +// ParseGatewayRequest 解析网关请求体并返回结构化结果。 +// protocol 指定请求协议格式(domain.PlatformAnthropic / domain.PlatformGemini), +// 不同协议使用不同的 system/messages 字段名。 +func ParseGatewayRequest(body []byte, protocol string) (*ParsedRequest, error) { var req map[string]any if err := json.Unmarshal(body, &req); err != nil { return nil, err @@ -64,14 +76,29 @@ func ParseGatewayRequest(body []byte) (*ParsedRequest, error) { parsed.MetadataUserID = userID } } - // system 字段只要存在就视为显式提供(即使为 null), - // 以避免客户端传 null 时被默认 system 误注入。 - if system, ok := req["system"]; ok { - parsed.HasSystem = true - parsed.System = system - } - if messages, ok := req["messages"].([]any); ok { - parsed.Messages = messages + + switch protocol { + case domain.PlatformGemini: + // Gemini 原生格式: systemInstruction.parts / contents + if sysInst, ok := req["systemInstruction"].(map[string]any); ok { + if parts, ok := sysInst["parts"].([]any); ok { + parsed.System = parts + } + } + if contents, ok := req["contents"].([]any); ok { + parsed.Messages = contents + } + default: + // Anthropic / OpenAI 格式: system / messages + // system 字段只要存在就视为显式提供(即使为 null), + // 以避免客户端传 null 时被默认 system 误注入。 + if system, ok := req["system"]; ok { + parsed.HasSystem = true + parsed.System = system + } + if messages, ok := req["messages"].([]any); ok { + parsed.Messages = messages + } } // thinking: {type: "enabled"} diff --git a/backend/internal/service/gateway_request_test.go b/backend/internal/service/gateway_request_test.go index 4e390b0a..cef41c91 100644 --- a/backend/internal/service/gateway_request_test.go +++ b/backend/internal/service/gateway_request_test.go @@ -4,12 +4,13 @@ import ( "encoding/json" "testing" + "github.com/Wei-Shaw/sub2api/internal/domain" "github.com/stretchr/testify/require" ) func TestParseGatewayRequest(t *testing.T) { body := []byte(`{"model":"claude-3-7-sonnet","stream":true,"metadata":{"user_id":"session_123e4567-e89b-12d3-a456-426614174000"},"system":[{"type":"text","text":"hello","cache_control":{"type":"ephemeral"}}],"messages":[{"content":"hi"}]}`) - parsed, err := ParseGatewayRequest(body) + parsed, err := ParseGatewayRequest(body, "") require.NoError(t, err) require.Equal(t, "claude-3-7-sonnet", parsed.Model) require.True(t, parsed.Stream) @@ -22,7 +23,7 @@ func TestParseGatewayRequest(t *testing.T) { func TestParseGatewayRequest_ThinkingEnabled(t *testing.T) { body := []byte(`{"model":"claude-sonnet-4-5","thinking":{"type":"enabled"},"messages":[{"content":"hi"}]}`) - parsed, err := ParseGatewayRequest(body) + parsed, err := ParseGatewayRequest(body, "") require.NoError(t, err) require.Equal(t, "claude-sonnet-4-5", parsed.Model) require.True(t, parsed.ThinkingEnabled) @@ -30,21 +31,21 @@ func TestParseGatewayRequest_ThinkingEnabled(t *testing.T) { func TestParseGatewayRequest_MaxTokens(t *testing.T) { body := []byte(`{"model":"claude-haiku-4-5","max_tokens":1}`) - parsed, err := ParseGatewayRequest(body) + parsed, err := ParseGatewayRequest(body, "") require.NoError(t, err) require.Equal(t, 1, parsed.MaxTokens) } func TestParseGatewayRequest_MaxTokensNonIntegralIgnored(t *testing.T) { body := []byte(`{"model":"claude-haiku-4-5","max_tokens":1.5}`) - parsed, err := ParseGatewayRequest(body) + parsed, err := ParseGatewayRequest(body, "") require.NoError(t, err) require.Equal(t, 0, parsed.MaxTokens) } func TestParseGatewayRequest_SystemNull(t *testing.T) { body := []byte(`{"model":"claude-3","system":null}`) - parsed, err := ParseGatewayRequest(body) + parsed, err := ParseGatewayRequest(body, "") require.NoError(t, err) // 显式传入 system:null 也应视为“字段已存在”,避免默认 system 被注入。 require.True(t, parsed.HasSystem) @@ -53,16 +54,112 @@ func TestParseGatewayRequest_SystemNull(t *testing.T) { func TestParseGatewayRequest_InvalidModelType(t *testing.T) { body := []byte(`{"model":123}`) - _, err := ParseGatewayRequest(body) + _, err := ParseGatewayRequest(body, "") require.Error(t, err) } func TestParseGatewayRequest_InvalidStreamType(t *testing.T) { body := []byte(`{"stream":"true"}`) - _, err := ParseGatewayRequest(body) + _, err := ParseGatewayRequest(body, "") require.Error(t, err) } +// ============ Gemini 原生格式解析测试 ============ + +func TestParseGatewayRequest_GeminiContents(t *testing.T) { + body := []byte(`{ + "contents": [ + {"role": "user", "parts": [{"text": "Hello"}]}, + {"role": "model", "parts": [{"text": "Hi there"}]}, + {"role": "user", "parts": [{"text": "How are you?"}]} + ] + }`) + parsed, err := ParseGatewayRequest(body, domain.PlatformGemini) + require.NoError(t, err) + require.Len(t, parsed.Messages, 3, "should parse contents as Messages") + require.False(t, parsed.HasSystem, "Gemini format should not set HasSystem") + require.Nil(t, parsed.System, "no systemInstruction means nil System") +} + +func TestParseGatewayRequest_GeminiSystemInstruction(t *testing.T) { + body := []byte(`{ + "systemInstruction": { + "parts": [{"text": "You are a helpful assistant."}] + }, + "contents": [ + {"role": "user", "parts": [{"text": "Hello"}]} + ] + }`) + parsed, err := ParseGatewayRequest(body, domain.PlatformGemini) + require.NoError(t, err) + require.NotNil(t, parsed.System, "should parse systemInstruction.parts as System") + parts, ok := parsed.System.([]any) + require.True(t, ok) + require.Len(t, parts, 1) + partMap, ok := parts[0].(map[string]any) + require.True(t, ok) + require.Equal(t, "You are a helpful assistant.", partMap["text"]) + require.Len(t, parsed.Messages, 1) +} + +func TestParseGatewayRequest_GeminiWithModel(t *testing.T) { + body := []byte(`{ + "model": "gemini-2.5-pro", + "contents": [{"role": "user", "parts": [{"text": "test"}]}] + }`) + parsed, err := ParseGatewayRequest(body, domain.PlatformGemini) + require.NoError(t, err) + require.Equal(t, "gemini-2.5-pro", parsed.Model) + require.Len(t, parsed.Messages, 1) +} + +func TestParseGatewayRequest_GeminiIgnoresAnthropicFields(t *testing.T) { + // Gemini 格式下 system/messages 字段应被忽略 + body := []byte(`{ + "system": "should be ignored", + "messages": [{"role": "user", "content": "ignored"}], + "contents": [{"role": "user", "parts": [{"text": "real content"}]}] + }`) + parsed, err := ParseGatewayRequest(body, domain.PlatformGemini) + require.NoError(t, err) + require.False(t, parsed.HasSystem, "Gemini protocol should not parse Anthropic system field") + require.Nil(t, parsed.System, "no systemInstruction = nil System") + require.Len(t, parsed.Messages, 1, "should use contents, not messages") +} + +func TestParseGatewayRequest_GeminiEmptyContents(t *testing.T) { + body := []byte(`{"contents": []}`) + parsed, err := ParseGatewayRequest(body, domain.PlatformGemini) + require.NoError(t, err) + require.Empty(t, parsed.Messages) +} + +func TestParseGatewayRequest_GeminiNoContents(t *testing.T) { + body := []byte(`{"model": "gemini-2.5-flash"}`) + parsed, err := ParseGatewayRequest(body, domain.PlatformGemini) + require.NoError(t, err) + require.Nil(t, parsed.Messages) + require.Equal(t, "gemini-2.5-flash", parsed.Model) +} + +func TestParseGatewayRequest_AnthropicIgnoresGeminiFields(t *testing.T) { + // Anthropic 格式下 contents/systemInstruction 字段应被忽略 + body := []byte(`{ + "system": "real system", + "messages": [{"role": "user", "content": "real content"}], + "contents": [{"role": "user", "parts": [{"text": "ignored"}]}], + "systemInstruction": {"parts": [{"text": "ignored"}]} + }`) + parsed, err := ParseGatewayRequest(body, domain.PlatformAnthropic) + require.NoError(t, err) + require.True(t, parsed.HasSystem) + require.Equal(t, "real system", parsed.System) + require.Len(t, parsed.Messages, 1) + msg, ok := parsed.Messages[0].(map[string]any) + require.True(t, ok) + require.Equal(t, "real content", msg["content"]) +} + func TestFilterThinkingBlocks(t *testing.T) { containsThinkingBlock := func(body []byte) bool { var req map[string]any diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 480f5b67..4e723232 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -5,7 +5,6 @@ import ( "bytes" "context" "crypto/sha256" - "encoding/hex" "encoding/json" "errors" "fmt" @@ -17,6 +16,7 @@ import ( "os" "regexp" "sort" + "strconv" "strings" "sync/atomic" "time" @@ -26,6 +26,7 @@ import ( "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" "github.com/Wei-Shaw/sub2api/internal/util/responseheaders" "github.com/Wei-Shaw/sub2api/internal/util/urlvalidator" + "github.com/cespare/xxhash/v2" "github.com/google/uuid" "github.com/tidwall/gjson" "github.com/tidwall/sjson" @@ -245,9 +246,6 @@ var ( // ErrClaudeCodeOnly 表示分组仅允许 Claude Code 客户端访问 var ErrClaudeCodeOnly = errors.New("this group only allows Claude Code clients") -// ErrModelScopeNotSupported 表示请求的模型系列不在分组支持的范围内 -var ErrModelScopeNotSupported = errors.New("model scope not supported by this group") - // allowedHeaders 白名单headers(参考CRS项目) var allowedHeaders = map[string]bool{ "accept": true, @@ -273,13 +271,6 @@ var allowedHeaders = map[string]bool{ // GatewayCache 定义网关服务的缓存操作接口。 // 提供粘性会话(Sticky Session)的存储、查询、刷新和删除功能。 // -// ModelLoadInfo 模型负载信息(用于 Antigravity 调度) -// Model load info for Antigravity scheduling -type ModelLoadInfo struct { - CallCount int64 // 当前分钟调用次数 / Call count in current minute - LastUsedAt time.Time // 最后调度时间(零值表示未调度过)/ Last scheduling time (zero means never scheduled) -} - // GatewayCache defines cache operations for gateway service. // Provides sticky session storage, retrieval, refresh and deletion capabilities. type GatewayCache interface { @@ -295,32 +286,6 @@ type GatewayCache interface { // DeleteSessionAccountID 删除粘性会话绑定,用于账号不可用时主动清理 // Delete sticky session binding, used to proactively clean up when account becomes unavailable DeleteSessionAccountID(ctx context.Context, groupID int64, sessionHash string) error - - // IncrModelCallCount 增加模型调用次数并更新最后调度时间(Antigravity 专用) - // Increment model call count and update last scheduling time (Antigravity only) - // 返回更新后的调用次数 - IncrModelCallCount(ctx context.Context, accountID int64, model string) (int64, error) - - // GetModelLoadBatch 批量获取账号的模型负载信息(Antigravity 专用) - // Batch get model load info for accounts (Antigravity only) - GetModelLoadBatch(ctx context.Context, accountIDs []int64, model string) (map[int64]*ModelLoadInfo, error) - - // FindGeminiSession 查找 Gemini 会话(MGET 倒序匹配) - // Find Gemini session using MGET reverse order matching - // 返回最长匹配的会话信息(uuid, accountID) - FindGeminiSession(ctx context.Context, groupID int64, prefixHash, digestChain string) (uuid string, accountID int64, found bool) - - // SaveGeminiSession 保存 Gemini 会话 - // Save Gemini session binding - SaveGeminiSession(ctx context.Context, groupID int64, prefixHash, digestChain, uuid string, accountID int64) error - - // FindAnthropicSession 查找 Anthropic 会话(Trie 匹配) - // Find Anthropic session using Trie matching - FindAnthropicSession(ctx context.Context, groupID int64, prefixHash, digestChain string) (uuid string, accountID int64, found bool) - - // SaveAnthropicSession 保存 Anthropic 会话 - // Save Anthropic session binding - SaveAnthropicSession(ctx context.Context, groupID int64, prefixHash, digestChain, uuid string, accountID int64) error } // derefGroupID safely dereferences *int64 to int64, returning 0 if nil @@ -415,6 +380,7 @@ type GatewayService struct { userSubRepo UserSubscriptionRepository userGroupRateRepo UserGroupRateRepository cache GatewayCache + digestStore *DigestSessionStore cfg *config.Config schedulerSnapshot *SchedulerSnapshotService billingService *BillingService @@ -448,6 +414,7 @@ func NewGatewayService( deferredService *DeferredService, claudeTokenProvider *ClaudeTokenProvider, sessionLimitCache SessionLimitCache, + digestStore *DigestSessionStore, ) *GatewayService { return &GatewayService{ accountRepo: accountRepo, @@ -457,6 +424,7 @@ func NewGatewayService( userSubRepo: userSubRepo, userGroupRateRepo: userGroupRateRepo, cache: cache, + digestStore: digestStore, cfg: cfg, schedulerSnapshot: schedulerSnapshot, concurrencyService: concurrencyService, @@ -490,8 +458,17 @@ func (s *GatewayService) GenerateSessionHash(parsed *ParsedRequest) string { return s.hashContent(cacheableContent) } - // 3. 最后 fallback: 使用 system + 所有消息的完整摘要串 + // 3. 最后 fallback: 使用 session上下文 + system + 所有消息的完整摘要串 var combined strings.Builder + // 混入请求上下文区分因子,避免不同用户相同消息产生相同 hash + if parsed.SessionContext != nil { + _, _ = combined.WriteString(parsed.SessionContext.ClientIP) + _, _ = combined.WriteString(":") + _, _ = combined.WriteString(parsed.SessionContext.UserAgent) + _, _ = combined.WriteString(":") + _, _ = combined.WriteString(strconv.FormatInt(parsed.SessionContext.APIKeyID, 10)) + _, _ = combined.WriteString("|") + } if parsed.System != nil { systemText := s.extractTextFromSystem(parsed.System) if systemText != "" { @@ -500,9 +477,20 @@ func (s *GatewayService) GenerateSessionHash(parsed *ParsedRequest) string { } for _, msg := range parsed.Messages { if m, ok := msg.(map[string]any); ok { - msgText := s.extractTextFromContent(m["content"]) - if msgText != "" { - _, _ = combined.WriteString(msgText) + if content, exists := m["content"]; exists { + // Anthropic: messages[].content + if msgText := s.extractTextFromContent(content); msgText != "" { + _, _ = combined.WriteString(msgText) + } + } else if parts, ok := m["parts"].([]any); ok { + // Gemini: contents[].parts[].text + for _, part := range parts { + if partMap, ok := part.(map[string]any); ok { + if text, ok := partMap["text"].(string); ok { + _, _ = combined.WriteString(text) + } + } + } } } } @@ -536,35 +524,37 @@ func (s *GatewayService) GetCachedSessionAccountID(ctx context.Context, groupID // FindGeminiSession 查找 Gemini 会话(基于内容摘要链的 Fallback 匹配) // 返回最长匹配的会话信息(uuid, accountID) -func (s *GatewayService) FindGeminiSession(ctx context.Context, groupID int64, prefixHash, digestChain string) (uuid string, accountID int64, found bool) { - if digestChain == "" || s.cache == nil { - return "", 0, false +func (s *GatewayService) FindGeminiSession(_ context.Context, groupID int64, prefixHash, digestChain string) (uuid string, accountID int64, matchedChain string, found bool) { + if digestChain == "" || s.digestStore == nil { + return "", 0, "", false } - return s.cache.FindGeminiSession(ctx, groupID, prefixHash, digestChain) + return s.digestStore.Find(groupID, prefixHash, digestChain) } -// SaveGeminiSession 保存 Gemini 会话 -func (s *GatewayService) SaveGeminiSession(ctx context.Context, groupID int64, prefixHash, digestChain, uuid string, accountID int64) error { - if digestChain == "" || s.cache == nil { +// SaveGeminiSession 保存 Gemini 会话。oldDigestChain 为 Find 返回的 matchedChain,用于删旧 key。 +func (s *GatewayService) SaveGeminiSession(_ context.Context, groupID int64, prefixHash, digestChain, uuid string, accountID int64, oldDigestChain string) error { + if digestChain == "" || s.digestStore == nil { return nil } - return s.cache.SaveGeminiSession(ctx, groupID, prefixHash, digestChain, uuid, accountID) + s.digestStore.Save(groupID, prefixHash, digestChain, uuid, accountID, oldDigestChain) + return nil } // FindAnthropicSession 查找 Anthropic 会话(基于内容摘要链的 Fallback 匹配) -func (s *GatewayService) FindAnthropicSession(ctx context.Context, groupID int64, prefixHash, digestChain string) (uuid string, accountID int64, found bool) { - if digestChain == "" || s.cache == nil { - return "", 0, false +func (s *GatewayService) FindAnthropicSession(_ context.Context, groupID int64, prefixHash, digestChain string) (uuid string, accountID int64, matchedChain string, found bool) { + if digestChain == "" || s.digestStore == nil { + return "", 0, "", false } - return s.cache.FindAnthropicSession(ctx, groupID, prefixHash, digestChain) + return s.digestStore.Find(groupID, prefixHash, digestChain) } // SaveAnthropicSession 保存 Anthropic 会话 -func (s *GatewayService) SaveAnthropicSession(ctx context.Context, groupID int64, prefixHash, digestChain, uuid string, accountID int64) error { - if digestChain == "" || s.cache == nil { +func (s *GatewayService) SaveAnthropicSession(_ context.Context, groupID int64, prefixHash, digestChain, uuid string, accountID int64, oldDigestChain string) error { + if digestChain == "" || s.digestStore == nil { return nil } - return s.cache.SaveAnthropicSession(ctx, groupID, prefixHash, digestChain, uuid, accountID) + s.digestStore.Save(groupID, prefixHash, digestChain, uuid, accountID, oldDigestChain) + return nil } func (s *GatewayService) extractCacheableContent(parsed *ParsedRequest) string { @@ -649,8 +639,8 @@ func (s *GatewayService) extractTextFromContent(content any) string { } func (s *GatewayService) hashContent(content string) string { - hash := sha256.Sum256([]byte(content)) - return hex.EncodeToString(hash[:16]) // 32字符 + h := xxhash.Sum64String(content) + return strconv.FormatUint(h, 36) } // replaceModelInBody 替换请求体中的model字段 @@ -1009,13 +999,6 @@ func (s *GatewayService) SelectAccountWithLoadAwareness(ctx context.Context, gro log.Printf("[ModelRoutingDebug] load-aware enabled: group_id=%v model=%s session=%s platform=%s", derefGroupID(groupID), requestedModel, shortSessionHash(sessionHash), platform) } - // Antigravity 模型系列检查(在账号选择前检查,确保所有代码路径都经过此检查) - if platform == PlatformAntigravity && groupID != nil && requestedModel != "" { - if err := s.checkAntigravityModelScope(ctx, *groupID, requestedModel); err != nil { - return nil, err - } - } - accounts, useMixed, err := s.listSchedulableAccounts(ctx, groupID, platform, hasForcePlatform) if err != nil { return nil, err @@ -1209,6 +1192,7 @@ func (s *GatewayService) SelectAccountWithLoadAwareness(ctx context.Context, gro return a.account.LastUsedAt.Before(*b.account.LastUsedAt) } }) + shuffleWithinSortGroups(routingAvailable) // 4. 尝试获取槽位 for _, item := range routingAvailable { @@ -1362,10 +1346,6 @@ func (s *GatewayService) SelectAccountWithLoadAwareness(ctx context.Context, gro return result, nil } } else { - // Antigravity 平台:获取模型负载信息 - var modelLoadMap map[int64]*ModelLoadInfo - isAntigravity := platform == PlatformAntigravity - var available []accountWithLoad for _, acc := range candidates { loadInfo := loadMap[acc.ID] @@ -1380,109 +1360,44 @@ func (s *GatewayService) SelectAccountWithLoadAwareness(ctx context.Context, gro } } - // Antigravity 平台:按账号实际映射后的模型名获取模型负载(与 Forward 的统计保持一致) - if isAntigravity && requestedModel != "" && s.cache != nil && len(available) > 0 { - modelLoadMap = make(map[int64]*ModelLoadInfo, len(available)) - modelToAccountIDs := make(map[string][]int64) - for _, item := range available { - mappedModel := mapAntigravityModel(item.account, requestedModel) - if mappedModel == "" { - continue - } - modelToAccountIDs[mappedModel] = append(modelToAccountIDs[mappedModel], item.account.ID) + // 分层过滤选择:优先级 → 负载率 → LRU + for len(available) > 0 { + // 1. 取优先级最小的集合 + candidates := filterByMinPriority(available) + // 2. 取负载率最低的集合 + candidates = filterByMinLoadRate(candidates) + // 3. LRU 选择最久未用的账号 + selected := selectByLRU(candidates, preferOAuth) + if selected == nil { + break } - for model, ids := range modelToAccountIDs { - batch, err := s.cache.GetModelLoadBatch(ctx, ids, model) - if err != nil { - continue - } - for id, info := range batch { - modelLoadMap[id] = info - } - } - if len(modelLoadMap) == 0 { - modelLoadMap = nil - } - } - // Antigravity 平台:优先级硬过滤 →(同优先级内)按调用次数选择(最少优先,新账号用平均值) - // 其他平台:分层过滤选择:优先级 → 负载率 → LRU - if isAntigravity { - for len(available) > 0 { - // 1. 取优先级最小的集合(硬过滤) - candidates := filterByMinPriority(available) - // 2. 同优先级内按调用次数选择(调用次数最少优先,新账号使用平均值) - selected := selectByCallCount(candidates, modelLoadMap, preferOAuth) - if selected == nil { - break - } - - result, err := s.tryAcquireAccountSlot(ctx, selected.account.ID, selected.account.Concurrency) - if err == nil && result.Acquired { - // 会话数量限制检查 - if !s.checkAndRegisterSession(ctx, selected.account, sessionHash) { - result.ReleaseFunc() // 释放槽位,继续尝试下一个账号 - } else { - if sessionHash != "" && s.cache != nil { - _ = s.cache.SetSessionAccountID(ctx, derefGroupID(groupID), sessionHash, selected.account.ID, stickySessionTTL) - } - return &AccountSelectionResult{ - Account: selected.account, - Acquired: true, - ReleaseFunc: result.ReleaseFunc, - }, nil + result, err := s.tryAcquireAccountSlot(ctx, selected.account.ID, selected.account.Concurrency) + if err == nil && result.Acquired { + // 会话数量限制检查 + if !s.checkAndRegisterSession(ctx, selected.account, sessionHash) { + result.ReleaseFunc() // 释放槽位,继续尝试下一个账号 + } else { + if sessionHash != "" && s.cache != nil { + _ = s.cache.SetSessionAccountID(ctx, derefGroupID(groupID), sessionHash, selected.account.ID, stickySessionTTL) } + return &AccountSelectionResult{ + Account: selected.account, + Acquired: true, + ReleaseFunc: result.ReleaseFunc, + }, nil } - - // 移除已尝试的账号,重新选择 - selectedID := selected.account.ID - newAvailable := make([]accountWithLoad, 0, len(available)-1) - for _, acc := range available { - if acc.account.ID != selectedID { - newAvailable = append(newAvailable, acc) - } - } - available = newAvailable } - } else { - for len(available) > 0 { - // 1. 取优先级最小的集合 - candidates := filterByMinPriority(available) - // 2. 取负载率最低的集合 - candidates = filterByMinLoadRate(candidates) - // 3. LRU 选择最久未用的账号 - selected := selectByLRU(candidates, preferOAuth) - if selected == nil { - break - } - result, err := s.tryAcquireAccountSlot(ctx, selected.account.ID, selected.account.Concurrency) - if err == nil && result.Acquired { - // 会话数量限制检查 - if !s.checkAndRegisterSession(ctx, selected.account, sessionHash) { - result.ReleaseFunc() // 释放槽位,继续尝试下一个账号 - } else { - if sessionHash != "" && s.cache != nil { - _ = s.cache.SetSessionAccountID(ctx, derefGroupID(groupID), sessionHash, selected.account.ID, stickySessionTTL) - } - return &AccountSelectionResult{ - Account: selected.account, - Acquired: true, - ReleaseFunc: result.ReleaseFunc, - }, nil - } + // 移除已尝试的账号,重新进行分层过滤 + selectedID := selected.account.ID + newAvailable := make([]accountWithLoad, 0, len(available)-1) + for _, acc := range available { + if acc.account.ID != selectedID { + newAvailable = append(newAvailable, acc) } - - // 移除已尝试的账号,重新进行分层过滤 - selectedID := selected.account.ID - newAvailable := make([]accountWithLoad, 0, len(available)-1) - for _, acc := range available { - if acc.account.ID != selectedID { - newAvailable = append(newAvailable, acc) - } - } - available = newAvailable } + available = newAvailable } } @@ -2018,87 +1933,79 @@ func sortAccountsByPriorityAndLastUsed(accounts []*Account, preferOAuth bool) { return a.LastUsedAt.Before(*b.LastUsedAt) } }) + shuffleWithinPriorityAndLastUsed(accounts) } -// selectByCallCount 从候选账号中选择调用次数最少的账号(Antigravity 专用) -// 新账号(CallCount=0)使用平均调用次数作为虚拟值,避免冷启动被猛调 -// 如果有多个账号具有相同的最小调用次数,则随机选择一个 -func selectByCallCount(accounts []accountWithLoad, modelLoadMap map[int64]*ModelLoadInfo, preferOAuth bool) *accountWithLoad { - if len(accounts) == 0 { - return nil +// shuffleWithinSortGroups 对排序后的 accountWithLoad 切片,按 (Priority, LoadRate, LastUsedAt) 分组后组内随机打乱。 +// 防止并发请求读取同一快照时,确定性排序导致所有请求命中相同账号。 +func shuffleWithinSortGroups(accounts []accountWithLoad) { + if len(accounts) <= 1 { + return } - if len(accounts) == 1 { - return &accounts[0] - } - - // 如果没有负载信息,回退到 LRU - if modelLoadMap == nil { - return selectByLRU(accounts, preferOAuth) - } - - // 1. 计算平均调用次数(用于新账号冷启动) - var totalCallCount int64 - var countWithCalls int - for _, acc := range accounts { - if info := modelLoadMap[acc.account.ID]; info != nil && info.CallCount > 0 { - totalCallCount += info.CallCount - countWithCalls++ + i := 0 + for i < len(accounts) { + j := i + 1 + for j < len(accounts) && sameAccountWithLoadGroup(accounts[i], accounts[j]) { + j++ } - } - - var avgCallCount int64 - if countWithCalls > 0 { - avgCallCount = totalCallCount / int64(countWithCalls) - } - - // 2. 获取每个账号的有效调用次数 - getEffectiveCallCount := func(acc accountWithLoad) int64 { - if acc.account == nil { - return 0 + if j-i > 1 { + mathrand.Shuffle(j-i, func(a, b int) { + accounts[i+a], accounts[i+b] = accounts[i+b], accounts[i+a] + }) } - info := modelLoadMap[acc.account.ID] - if info == nil || info.CallCount == 0 { - return avgCallCount // 新账号使用平均值 - } - return info.CallCount + i = j } +} - // 3. 找到最小调用次数 - minCount := getEffectiveCallCount(accounts[0]) - for _, acc := range accounts[1:] { - if c := getEffectiveCallCount(acc); c < minCount { - minCount = c - } +// sameAccountWithLoadGroup 判断两个 accountWithLoad 是否属于同一排序组 +func sameAccountWithLoadGroup(a, b accountWithLoad) bool { + if a.account.Priority != b.account.Priority { + return false } - - // 4. 收集所有具有最小调用次数的账号 - var candidateIdxs []int - for i, acc := range accounts { - if getEffectiveCallCount(acc) == minCount { - candidateIdxs = append(candidateIdxs, i) - } + if a.loadInfo.LoadRate != b.loadInfo.LoadRate { + return false } + return sameLastUsedAt(a.account.LastUsedAt, b.account.LastUsedAt) +} - // 5. 如果只有一个候选,直接返回 - if len(candidateIdxs) == 1 { - return &accounts[candidateIdxs[0]] +// shuffleWithinPriorityAndLastUsed 对排序后的 []*Account 切片,按 (Priority, LastUsedAt) 分组后组内随机打乱。 +func shuffleWithinPriorityAndLastUsed(accounts []*Account) { + if len(accounts) <= 1 { + return } - - // 6. preferOAuth 处理 - if preferOAuth { - var oauthIdxs []int - for _, idx := range candidateIdxs { - if accounts[idx].account.Type == AccountTypeOAuth { - oauthIdxs = append(oauthIdxs, idx) - } + i := 0 + for i < len(accounts) { + j := i + 1 + for j < len(accounts) && sameAccountGroup(accounts[i], accounts[j]) { + j++ } - if len(oauthIdxs) > 0 { - candidateIdxs = oauthIdxs + if j-i > 1 { + mathrand.Shuffle(j-i, func(a, b int) { + accounts[i+a], accounts[i+b] = accounts[i+b], accounts[i+a] + }) } + i = j } +} - // 7. 随机选择 - return &accounts[candidateIdxs[mathrand.Intn(len(candidateIdxs))]] +// sameAccountGroup 判断两个 Account 是否属于同一排序组(Priority + LastUsedAt) +func sameAccountGroup(a, b *Account) bool { + if a.Priority != b.Priority { + return false + } + return sameLastUsedAt(a.LastUsedAt, b.LastUsedAt) +} + +// sameLastUsedAt 判断两个 LastUsedAt 是否相同(精度到秒) +func sameLastUsedAt(a, b *time.Time) bool { + switch { + case a == nil && b == nil: + return true + case a == nil || b == nil: + return false + default: + return a.Unix() == b.Unix() + } } // sortCandidatesForFallback 根据配置选择排序策略 @@ -2153,13 +2060,6 @@ func shuffleWithinPriority(accounts []*Account) { // selectAccountForModelWithPlatform 选择单平台账户(完全隔离) func (s *GatewayService) selectAccountForModelWithPlatform(ctx context.Context, groupID *int64, sessionHash string, requestedModel string, excludedIDs map[int64]struct{}, platform string) (*Account, error) { - // 对 Antigravity 平台,检查请求的模型系列是否在分组支持范围内 - if platform == PlatformAntigravity && groupID != nil && requestedModel != "" { - if err := s.checkAntigravityModelScope(ctx, *groupID, requestedModel); err != nil { - return nil, err - } - } - preferOAuth := platform == PlatformGemini routingAccountIDs := s.routingAccountIDsForRequest(ctx, groupID, requestedModel, platform) @@ -5171,27 +5071,6 @@ func (s *GatewayService) validateUpstreamBaseURL(raw string) (string, error) { return normalized, nil } -// checkAntigravityModelScope 检查 Antigravity 平台的模型系列是否在分组支持范围内 -func (s *GatewayService) checkAntigravityModelScope(ctx context.Context, groupID int64, requestedModel string) error { - scope, ok := ResolveAntigravityQuotaScope(requestedModel) - if !ok { - return nil // 无法解析 scope,跳过检查 - } - - group, err := s.resolveGroupByID(ctx, groupID) - if err != nil { - return nil // 查询失败时放行 - } - if group == nil { - return nil // 分组不存在时放行 - } - - if !IsScopeSupported(group.SupportedModelScopes, scope) { - return ErrModelScopeNotSupported - } - return nil -} - // GetAvailableModels returns the list of models available for a group // It aggregates model_mapping keys from all schedulable accounts in the group func (s *GatewayService) GetAvailableModels(ctx context.Context, groupID *int64, platform string) []string { diff --git a/backend/internal/service/gateway_service_benchmark_test.go b/backend/internal/service/gateway_service_benchmark_test.go index f15a85d6..c9c4d3dd 100644 --- a/backend/internal/service/gateway_service_benchmark_test.go +++ b/backend/internal/service/gateway_service_benchmark_test.go @@ -14,7 +14,7 @@ func BenchmarkGenerateSessionHash_Metadata(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { - parsed, err := ParseGatewayRequest(body) + parsed, err := ParseGatewayRequest(body, "") if err != nil { b.Fatalf("解析请求失败: %v", err) } diff --git a/backend/internal/service/gemini_error_policy_test.go b/backend/internal/service/gemini_error_policy_test.go new file mode 100644 index 00000000..2ce8793a --- /dev/null +++ b/backend/internal/service/gemini_error_policy_test.go @@ -0,0 +1,384 @@ +//go:build unit + +package service + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/require" +) + +// --------------------------------------------------------------------------- +// TestShouldFailoverGeminiUpstreamError — verifies the failover decision +// for the ErrorPolicyNone path (original logic preserved). +// --------------------------------------------------------------------------- + +func TestShouldFailoverGeminiUpstreamError(t *testing.T) { + svc := &GeminiMessagesCompatService{} + + tests := []struct { + name string + statusCode int + expected bool + }{ + {"401_failover", 401, true}, + {"403_failover", 403, true}, + {"429_failover", 429, true}, + {"529_failover", 529, true}, + {"500_failover", 500, true}, + {"502_failover", 502, true}, + {"503_failover", 503, true}, + {"400_no_failover", 400, false}, + {"404_no_failover", 404, false}, + {"422_no_failover", 422, false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := svc.shouldFailoverGeminiUpstreamError(tt.statusCode) + require.Equal(t, tt.expected, got) + }) + } +} + +// --------------------------------------------------------------------------- +// TestCheckErrorPolicy_GeminiAccounts — verifies CheckErrorPolicy works +// correctly for Gemini platform accounts (API Key type). +// --------------------------------------------------------------------------- + +func TestCheckErrorPolicy_GeminiAccounts(t *testing.T) { + tests := []struct { + name string + account *Account + statusCode int + body []byte + expected ErrorPolicyResult + }{ + { + name: "gemini_apikey_custom_codes_hit", + account: &Account{ + ID: 100, + Type: AccountTypeAPIKey, + Platform: PlatformGemini, + Credentials: map[string]any{ + "custom_error_codes_enabled": true, + "custom_error_codes": []any{float64(429), float64(500)}, + }, + }, + statusCode: 429, + body: []byte(`{"error":"rate limited"}`), + expected: ErrorPolicyMatched, + }, + { + name: "gemini_apikey_custom_codes_miss", + account: &Account{ + ID: 101, + Type: AccountTypeAPIKey, + Platform: PlatformGemini, + Credentials: map[string]any{ + "custom_error_codes_enabled": true, + "custom_error_codes": []any{float64(429)}, + }, + }, + statusCode: 500, + body: []byte(`{"error":"internal"}`), + expected: ErrorPolicySkipped, + }, + { + name: "gemini_apikey_no_custom_codes_returns_none", + account: &Account{ + ID: 102, + Type: AccountTypeAPIKey, + Platform: PlatformGemini, + }, + statusCode: 500, + body: []byte(`{"error":"internal"}`), + expected: ErrorPolicyNone, + }, + { + name: "gemini_apikey_temp_unschedulable_hit", + account: &Account{ + ID: 103, + Type: AccountTypeAPIKey, + Platform: PlatformGemini, + Credentials: map[string]any{ + "temp_unschedulable_enabled": true, + "temp_unschedulable_rules": []any{ + map[string]any{ + "error_code": float64(503), + "keywords": []any{"overloaded"}, + "duration_minutes": float64(10), + }, + }, + }, + }, + statusCode: 503, + body: []byte(`overloaded service`), + expected: ErrorPolicyTempUnscheduled, + }, + { + name: "gemini_custom_codes_override_temp_unschedulable", + account: &Account{ + ID: 104, + Type: AccountTypeAPIKey, + Platform: PlatformGemini, + Credentials: map[string]any{ + "custom_error_codes_enabled": true, + "custom_error_codes": []any{float64(503)}, + "temp_unschedulable_enabled": true, + "temp_unschedulable_rules": []any{ + map[string]any{ + "error_code": float64(503), + "keywords": []any{"overloaded"}, + "duration_minutes": float64(10), + }, + }, + }, + }, + statusCode: 503, + body: []byte(`overloaded`), + expected: ErrorPolicyMatched, // custom codes take precedence + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + repo := &errorPolicyRepoStub{} + svc := NewRateLimitService(repo, nil, &config.Config{}, nil, nil) + + result := svc.CheckErrorPolicy(context.Background(), tt.account, tt.statusCode, tt.body) + require.Equal(t, tt.expected, result) + }) + } +} + +// --------------------------------------------------------------------------- +// TestGeminiErrorPolicyIntegration — verifies the Gemini error handling +// paths produce the correct behavior for each ErrorPolicyResult. +// +// These tests simulate the inline error policy switch in handleClaudeCompat +// and forwardNativeGemini by calling the same methods in the same order. +// --------------------------------------------------------------------------- + +func TestGeminiErrorPolicyIntegration(t *testing.T) { + gin.SetMode(gin.TestMode) + + tests := []struct { + name string + account *Account + statusCode int + respBody []byte + expectFailover bool // expect UpstreamFailoverError + expectHandleError bool // expect handleGeminiUpstreamError to be called + expectShouldFailover bool // for None path, whether shouldFailover triggers + }{ + { + name: "custom_codes_matched_429_failover", + account: &Account{ + ID: 200, + Type: AccountTypeAPIKey, + Platform: PlatformGemini, + Credentials: map[string]any{ + "custom_error_codes_enabled": true, + "custom_error_codes": []any{float64(429)}, + }, + }, + statusCode: 429, + respBody: []byte(`{"error":"rate limited"}`), + expectFailover: true, + expectHandleError: true, + }, + { + name: "custom_codes_skipped_500_no_failover", + account: &Account{ + ID: 201, + Type: AccountTypeAPIKey, + Platform: PlatformGemini, + Credentials: map[string]any{ + "custom_error_codes_enabled": true, + "custom_error_codes": []any{float64(429)}, + }, + }, + statusCode: 500, + respBody: []byte(`{"error":"internal"}`), + expectFailover: false, + expectHandleError: false, + }, + { + name: "temp_unschedulable_matched_failover", + account: &Account{ + ID: 202, + Type: AccountTypeAPIKey, + Platform: PlatformGemini, + Credentials: map[string]any{ + "temp_unschedulable_enabled": true, + "temp_unschedulable_rules": []any{ + map[string]any{ + "error_code": float64(503), + "keywords": []any{"overloaded"}, + "duration_minutes": float64(10), + }, + }, + }, + }, + statusCode: 503, + respBody: []byte(`overloaded`), + expectFailover: true, + expectHandleError: true, + }, + { + name: "no_policy_429_failover_via_shouldFailover", + account: &Account{ + ID: 203, + Type: AccountTypeAPIKey, + Platform: PlatformGemini, + }, + statusCode: 429, + respBody: []byte(`{"error":"rate limited"}`), + expectFailover: true, + expectHandleError: true, + expectShouldFailover: true, + }, + { + name: "no_policy_400_no_failover", + account: &Account{ + ID: 204, + Type: AccountTypeAPIKey, + Platform: PlatformGemini, + }, + statusCode: 400, + respBody: []byte(`{"error":"bad request"}`), + expectFailover: false, + expectHandleError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + repo := &geminiErrorPolicyRepo{} + rlSvc := NewRateLimitService(repo, nil, &config.Config{}, nil, nil) + svc := &GeminiMessagesCompatService{ + accountRepo: repo, + rateLimitService: rlSvc, + } + + writer := httptest.NewRecorder() + c, _ := gin.CreateTestContext(writer) + c.Request = httptest.NewRequest(http.MethodPost, "/v1/messages", nil) + + // Simulate the Claude compat error handling path (same logic as native). + // This mirrors the inline switch in handleClaudeCompat. + var handleErrorCalled bool + var gotFailover bool + + ctx := context.Background() + statusCode := tt.statusCode + respBody := tt.respBody + account := tt.account + headers := http.Header{} + + if svc.rateLimitService != nil { + switch svc.rateLimitService.CheckErrorPolicy(ctx, account, statusCode, respBody) { + case ErrorPolicySkipped: + // Skipped → return error directly (no handleGeminiUpstreamError, no failover) + gotFailover = false + handleErrorCalled = false + goto verify + case ErrorPolicyMatched, ErrorPolicyTempUnscheduled: + svc.handleGeminiUpstreamError(ctx, account, statusCode, headers, respBody) + handleErrorCalled = true + gotFailover = true + goto verify + } + } + + // ErrorPolicyNone → original logic + svc.handleGeminiUpstreamError(ctx, account, statusCode, headers, respBody) + handleErrorCalled = true + if svc.shouldFailoverGeminiUpstreamError(statusCode) { + gotFailover = true + } + + verify: + require.Equal(t, tt.expectFailover, gotFailover, "failover mismatch") + require.Equal(t, tt.expectHandleError, handleErrorCalled, "handleGeminiUpstreamError call mismatch") + + if tt.expectShouldFailover { + require.True(t, svc.shouldFailoverGeminiUpstreamError(statusCode), + "shouldFailoverGeminiUpstreamError should return true for status %d", statusCode) + } + }) + } +} + +// --------------------------------------------------------------------------- +// TestGeminiErrorPolicy_NilRateLimitService — verifies nil safety +// --------------------------------------------------------------------------- + +func TestGeminiErrorPolicy_NilRateLimitService(t *testing.T) { + svc := &GeminiMessagesCompatService{ + rateLimitService: nil, + } + + // When rateLimitService is nil, error policy is skipped → falls through to + // shouldFailoverGeminiUpstreamError (original logic). + // Verify this doesn't panic and follows expected behavior. + + ctx := context.Background() + account := &Account{ + ID: 300, + Type: AccountTypeAPIKey, + Platform: PlatformGemini, + Credentials: map[string]any{ + "custom_error_codes_enabled": true, + "custom_error_codes": []any{float64(429)}, + }, + } + + // The nil check should prevent CheckErrorPolicy from being called + if svc.rateLimitService != nil { + t.Fatal("rateLimitService should be nil for this test") + } + + // shouldFailoverGeminiUpstreamError still works + require.True(t, svc.shouldFailoverGeminiUpstreamError(429)) + require.False(t, svc.shouldFailoverGeminiUpstreamError(400)) + + // handleGeminiUpstreamError should not panic with nil rateLimitService + require.NotPanics(t, func() { + svc.handleGeminiUpstreamError(ctx, account, 500, http.Header{}, []byte(`error`)) + }) +} + +// --------------------------------------------------------------------------- +// geminiErrorPolicyRepo — minimal AccountRepository stub for Gemini error +// policy tests. Embeds mockAccountRepoForGemini and adds tracking. +// --------------------------------------------------------------------------- + +type geminiErrorPolicyRepo struct { + mockAccountRepoForGemini + setErrorCalls int + setRateLimitedCalls int + setTempCalls int +} + +func (r *geminiErrorPolicyRepo) SetError(_ context.Context, _ int64, _ string) error { + r.setErrorCalls++ + return nil +} + +func (r *geminiErrorPolicyRepo) SetRateLimited(_ context.Context, _ int64, _ time.Time) error { + r.setRateLimitedCalls++ + return nil +} + +func (r *geminiErrorPolicyRepo) SetTempUnschedulable(_ context.Context, _ int64, _ time.Time, _ string) error { + r.setTempCalls++ + return nil +} diff --git a/backend/internal/service/gemini_messages_compat_service.go b/backend/internal/service/gemini_messages_compat_service.go index 4e0442fd..d77f6f92 100644 --- a/backend/internal/service/gemini_messages_compat_service.go +++ b/backend/internal/service/gemini_messages_compat_service.go @@ -831,38 +831,47 @@ func (s *GeminiMessagesCompatService) Forward(ctx context.Context, c *gin.Contex if resp.StatusCode >= 400 { respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) - tempMatched := false + // 统一错误策略:自定义错误码 + 临时不可调度 if s.rateLimitService != nil { - tempMatched = s.rateLimitService.HandleTempUnschedulable(ctx, account, resp.StatusCode, respBody) - } - s.handleGeminiUpstreamError(ctx, account, resp.StatusCode, resp.Header, respBody) - if tempMatched { - upstreamReqID := resp.Header.Get(requestIDHeader) - if upstreamReqID == "" { - upstreamReqID = resp.Header.Get("x-goog-request-id") - } - upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(respBody)) - upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) - upstreamDetail := "" - if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { - maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes - if maxBytes <= 0 { - maxBytes = 2048 + switch s.rateLimitService.CheckErrorPolicy(ctx, account, resp.StatusCode, respBody) { + case ErrorPolicySkipped: + upstreamReqID := resp.Header.Get(requestIDHeader) + if upstreamReqID == "" { + upstreamReqID = resp.Header.Get("x-goog-request-id") } - upstreamDetail = truncateString(string(respBody), maxBytes) + return nil, s.writeGeminiMappedError(c, account, resp.StatusCode, upstreamReqID, respBody) + case ErrorPolicyMatched, ErrorPolicyTempUnscheduled: + s.handleGeminiUpstreamError(ctx, account, resp.StatusCode, resp.Header, respBody) + upstreamReqID := resp.Header.Get(requestIDHeader) + if upstreamReqID == "" { + upstreamReqID = resp.Header.Get("x-goog-request-id") + } + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(respBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: upstreamReqID, + Kind: "failover", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode, ResponseBody: respBody} } - appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ - Platform: account.Platform, - AccountID: account.ID, - AccountName: account.Name, - UpstreamStatusCode: resp.StatusCode, - UpstreamRequestID: upstreamReqID, - Kind: "failover", - Message: upstreamMsg, - Detail: upstreamDetail, - }) - return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode, ResponseBody: respBody} } + + // ErrorPolicyNone → 原有逻辑 + s.handleGeminiUpstreamError(ctx, account, resp.StatusCode, resp.Header, respBody) if s.shouldFailoverGeminiUpstreamError(resp.StatusCode) { upstreamReqID := resp.Header.Get(requestIDHeader) if upstreamReqID == "" { @@ -1249,14 +1258,9 @@ func (s *GeminiMessagesCompatService) ForwardNative(ctx context.Context, c *gin. if resp.StatusCode >= 400 { respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) - tempMatched := false - if s.rateLimitService != nil { - tempMatched = s.rateLimitService.HandleTempUnschedulable(ctx, account, resp.StatusCode, respBody) - } - s.handleGeminiUpstreamError(ctx, account, resp.StatusCode, resp.Header, respBody) - // Best-effort fallback for OAuth tokens missing AI Studio scopes when calling countTokens. // This avoids Gemini SDKs failing hard during preflight token counting. + // Checked before error policy so it always works regardless of custom error codes. if action == "countTokens" && isOAuth && isGeminiInsufficientScope(resp.Header, respBody) { estimated := estimateGeminiCountTokens(body) c.JSON(http.StatusOK, map[string]any{"totalTokens": estimated}) @@ -1270,30 +1274,46 @@ func (s *GeminiMessagesCompatService) ForwardNative(ctx context.Context, c *gin. }, nil } - if tempMatched { - evBody := unwrapIfNeeded(isOAuth, respBody) - upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(evBody)) - upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) - upstreamDetail := "" - if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { - maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes - if maxBytes <= 0 { - maxBytes = 2048 + // 统一错误策略:自定义错误码 + 临时不可调度 + if s.rateLimitService != nil { + switch s.rateLimitService.CheckErrorPolicy(ctx, account, resp.StatusCode, respBody) { + case ErrorPolicySkipped: + respBody = unwrapIfNeeded(isOAuth, respBody) + contentType := resp.Header.Get("Content-Type") + if contentType == "" { + contentType = "application/json" } - upstreamDetail = truncateString(string(evBody), maxBytes) + c.Data(resp.StatusCode, contentType, respBody) + return nil, fmt.Errorf("gemini upstream error: %d (skipped by error policy)", resp.StatusCode) + case ErrorPolicyMatched, ErrorPolicyTempUnscheduled: + s.handleGeminiUpstreamError(ctx, account, resp.StatusCode, resp.Header, respBody) + evBody := unwrapIfNeeded(isOAuth, respBody) + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(evBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(evBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: requestID, + Kind: "failover", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode, ResponseBody: respBody} } - appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ - Platform: account.Platform, - AccountID: account.ID, - AccountName: account.Name, - UpstreamStatusCode: resp.StatusCode, - UpstreamRequestID: requestID, - Kind: "failover", - Message: upstreamMsg, - Detail: upstreamDetail, - }) - return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode, ResponseBody: respBody} } + + // ErrorPolicyNone → 原有逻辑 + s.handleGeminiUpstreamError(ctx, account, resp.StatusCode, resp.Header, respBody) if s.shouldFailoverGeminiUpstreamError(resp.StatusCode) { evBody := unwrapIfNeeded(isOAuth, respBody) upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(evBody)) diff --git a/backend/internal/service/gemini_multiplatform_test.go b/backend/internal/service/gemini_multiplatform_test.go index 0c54dc39..fee0f1cf 100644 --- a/backend/internal/service/gemini_multiplatform_test.go +++ b/backend/internal/service/gemini_multiplatform_test.go @@ -133,9 +133,6 @@ func (m *mockAccountRepoForGemini) ListSchedulableByGroupIDAndPlatforms(ctx cont func (m *mockAccountRepoForGemini) SetRateLimited(ctx context.Context, id int64, resetAt time.Time) error { return nil } -func (m *mockAccountRepoForGemini) SetAntigravityQuotaScopeLimit(ctx context.Context, id int64, scope AntigravityQuotaScope, resetAt time.Time) error { - return nil -} func (m *mockAccountRepoForGemini) SetModelRateLimit(ctx context.Context, id int64, scope string, resetAt time.Time) error { return nil } @@ -265,29 +262,6 @@ func (m *mockGatewayCacheForGemini) DeleteSessionAccountID(ctx context.Context, return nil } -func (m *mockGatewayCacheForGemini) IncrModelCallCount(ctx context.Context, accountID int64, model string) (int64, error) { - return 0, nil -} - -func (m *mockGatewayCacheForGemini) GetModelLoadBatch(ctx context.Context, accountIDs []int64, model string) (map[int64]*ModelLoadInfo, error) { - return nil, nil -} - -func (m *mockGatewayCacheForGemini) FindGeminiSession(ctx context.Context, groupID int64, prefixHash, digestChain string) (uuid string, accountID int64, found bool) { - return "", 0, false -} - -func (m *mockGatewayCacheForGemini) SaveGeminiSession(ctx context.Context, groupID int64, prefixHash, digestChain, uuid string, accountID int64) error { - return nil -} - -func (m *mockGatewayCacheForGemini) FindAnthropicSession(ctx context.Context, groupID int64, prefixHash, digestChain string) (uuid string, accountID int64, found bool) { - return "", 0, false -} - -func (m *mockGatewayCacheForGemini) SaveAnthropicSession(ctx context.Context, groupID int64, prefixHash, digestChain, uuid string, accountID int64) error { - return nil -} // TestGeminiMessagesCompatService_SelectAccountForModelWithExclusions_GeminiPlatform 测试 Gemini 单平台选择 func TestGeminiMessagesCompatService_SelectAccountForModelWithExclusions_GeminiPlatform(t *testing.T) { diff --git a/backend/internal/service/gemini_session.go b/backend/internal/service/gemini_session.go index 859ae9f3..1780d1da 100644 --- a/backend/internal/service/gemini_session.go +++ b/backend/internal/service/gemini_session.go @@ -6,26 +6,11 @@ import ( "encoding/json" "strconv" "strings" - "time" "github.com/Wei-Shaw/sub2api/internal/pkg/antigravity" "github.com/cespare/xxhash/v2" ) -// Gemini 会话 ID Fallback 相关常量 -const ( - // geminiSessionTTLSeconds Gemini 会话缓存 TTL(5 分钟) - geminiSessionTTLSeconds = 300 - - // geminiSessionKeyPrefix Gemini 会话 Redis key 前缀 - geminiSessionKeyPrefix = "gemini:sess:" -) - -// GeminiSessionTTL 返回 Gemini 会话缓存 TTL -func GeminiSessionTTL() time.Duration { - return geminiSessionTTLSeconds * time.Second -} - // shortHash 使用 XXHash64 + Base36 生成短 hash(16 字符) // XXHash64 比 SHA256 快约 10 倍,Base36 比 Hex 短约 20% func shortHash(data []byte) string { @@ -79,35 +64,6 @@ func GenerateGeminiPrefixHash(userID, apiKeyID int64, ip, userAgent, platform, m return base64.RawURLEncoding.EncodeToString(hash[:12]) } -// BuildGeminiSessionKey 构建 Gemini 会话 Redis key -// 格式: gemini:sess:{groupID}:{prefixHash}:{digestChain} -func BuildGeminiSessionKey(groupID int64, prefixHash, digestChain string) string { - return geminiSessionKeyPrefix + strconv.FormatInt(groupID, 10) + ":" + prefixHash + ":" + digestChain -} - -// GenerateDigestChainPrefixes 生成摘要链的所有前缀(从长到短) -// 用于 MGET 批量查询最长匹配 -func GenerateDigestChainPrefixes(chain string) []string { - if chain == "" { - return nil - } - - var prefixes []string - c := chain - - for c != "" { - prefixes = append(prefixes, c) - // 找到最后一个 "-" 的位置 - if i := strings.LastIndex(c, "-"); i > 0 { - c = c[:i] - } else { - break - } - } - - return prefixes -} - // ParseGeminiSessionValue 解析 Gemini 会话缓存值 // 格式: {uuid}:{accountID} func ParseGeminiSessionValue(value string) (uuid string, accountID int64, ok bool) { @@ -139,15 +95,6 @@ func FormatGeminiSessionValue(uuid string, accountID int64) string { // geminiDigestSessionKeyPrefix Gemini 摘要 fallback 会话 key 前缀 const geminiDigestSessionKeyPrefix = "gemini:digest:" -// geminiTrieKeyPrefix Gemini Trie 会话 key 前缀 -const geminiTrieKeyPrefix = "gemini:trie:" - -// BuildGeminiTrieKey 构建 Gemini Trie Redis key -// 格式: gemini:trie:{groupID}:{prefixHash} -func BuildGeminiTrieKey(groupID int64, prefixHash string) string { - return geminiTrieKeyPrefix + strconv.FormatInt(groupID, 10) + ":" + prefixHash -} - // GenerateGeminiDigestSessionKey 生成 Gemini 摘要 fallback 的 sessionKey // 组合 prefixHash 前 8 位 + uuid 前 8 位,确保不同会话产生不同的 sessionKey // 用于在 SelectAccountWithLoadAwareness 中保持粘性会话 diff --git a/backend/internal/service/gemini_session_integration_test.go b/backend/internal/service/gemini_session_integration_test.go index 928c62cf..95b5f594 100644 --- a/backend/internal/service/gemini_session_integration_test.go +++ b/backend/internal/service/gemini_session_integration_test.go @@ -1,41 +1,14 @@ package service import ( - "context" "testing" "github.com/Wei-Shaw/sub2api/internal/pkg/antigravity" ) -// mockGeminiSessionCache 模拟 Redis 缓存 -type mockGeminiSessionCache struct { - sessions map[string]string // key -> value -} - -func newMockGeminiSessionCache() *mockGeminiSessionCache { - return &mockGeminiSessionCache{sessions: make(map[string]string)} -} - -func (m *mockGeminiSessionCache) Save(groupID int64, prefixHash, digestChain, uuid string, accountID int64) { - key := BuildGeminiSessionKey(groupID, prefixHash, digestChain) - value := FormatGeminiSessionValue(uuid, accountID) - m.sessions[key] = value -} - -func (m *mockGeminiSessionCache) Find(groupID int64, prefixHash, digestChain string) (uuid string, accountID int64, found bool) { - prefixes := GenerateDigestChainPrefixes(digestChain) - for _, p := range prefixes { - key := BuildGeminiSessionKey(groupID, prefixHash, p) - if val, ok := m.sessions[key]; ok { - return ParseGeminiSessionValue(val) - } - } - return "", 0, false -} - // TestGeminiSessionContinuousConversation 测试连续会话的摘要链匹配 func TestGeminiSessionContinuousConversation(t *testing.T) { - cache := newMockGeminiSessionCache() + store := NewDigestSessionStore() groupID := int64(1) prefixHash := "test_prefix_hash" sessionUUID := "session-uuid-12345" @@ -54,13 +27,13 @@ func TestGeminiSessionContinuousConversation(t *testing.T) { t.Logf("Round 1 chain: %s", chain1) // 第一轮:没有找到会话,创建新会话 - _, _, found := cache.Find(groupID, prefixHash, chain1) + _, _, _, found := store.Find(groupID, prefixHash, chain1) if found { t.Error("Round 1: should not find existing session") } - // 保存第一轮会话 - cache.Save(groupID, prefixHash, chain1, sessionUUID, accountID) + // 保存第一轮会话(首轮无旧 chain) + store.Save(groupID, prefixHash, chain1, sessionUUID, accountID, "") // 模拟第二轮对话(用户继续对话) req2 := &antigravity.GeminiRequest{ @@ -77,7 +50,7 @@ func TestGeminiSessionContinuousConversation(t *testing.T) { t.Logf("Round 2 chain: %s", chain2) // 第二轮:应该能找到会话(通过前缀匹配) - foundUUID, foundAccID, found := cache.Find(groupID, prefixHash, chain2) + foundUUID, foundAccID, matchedChain, found := store.Find(groupID, prefixHash, chain2) if !found { t.Error("Round 2: should find session via prefix matching") } @@ -88,8 +61,8 @@ func TestGeminiSessionContinuousConversation(t *testing.T) { t.Errorf("Round 2: expected accountID %d, got %d", accountID, foundAccID) } - // 保存第二轮会话 - cache.Save(groupID, prefixHash, chain2, sessionUUID, accountID) + // 保存第二轮会话,传入 Find 返回的 matchedChain 以删旧 key + store.Save(groupID, prefixHash, chain2, sessionUUID, accountID, matchedChain) // 模拟第三轮对话 req3 := &antigravity.GeminiRequest{ @@ -108,7 +81,7 @@ func TestGeminiSessionContinuousConversation(t *testing.T) { t.Logf("Round 3 chain: %s", chain3) // 第三轮:应该能找到会话(通过第二轮的前缀匹配) - foundUUID, foundAccID, found = cache.Find(groupID, prefixHash, chain3) + foundUUID, foundAccID, _, found = store.Find(groupID, prefixHash, chain3) if !found { t.Error("Round 3: should find session via prefix matching") } @@ -118,13 +91,11 @@ func TestGeminiSessionContinuousConversation(t *testing.T) { if foundAccID != accountID { t.Errorf("Round 3: expected accountID %d, got %d", accountID, foundAccID) } - - t.Log("✓ Continuous conversation session matching works correctly!") } // TestGeminiSessionDifferentConversations 测试不同会话不会错误匹配 func TestGeminiSessionDifferentConversations(t *testing.T) { - cache := newMockGeminiSessionCache() + store := NewDigestSessionStore() groupID := int64(1) prefixHash := "test_prefix_hash" @@ -135,7 +106,7 @@ func TestGeminiSessionDifferentConversations(t *testing.T) { }, } chain1 := BuildGeminiDigestChain(req1) - cache.Save(groupID, prefixHash, chain1, "session-1", 100) + store.Save(groupID, prefixHash, chain1, "session-1", 100, "") // 第二个完全不同的会话 req2 := &antigravity.GeminiRequest{ @@ -146,61 +117,29 @@ func TestGeminiSessionDifferentConversations(t *testing.T) { chain2 := BuildGeminiDigestChain(req2) // 不同会话不应该匹配 - _, _, found := cache.Find(groupID, prefixHash, chain2) + _, _, _, found := store.Find(groupID, prefixHash, chain2) if found { t.Error("Different conversations should not match") } - - t.Log("✓ Different conversations are correctly isolated!") } // TestGeminiSessionPrefixMatchingOrder 测试前缀匹配的优先级(最长匹配优先) func TestGeminiSessionPrefixMatchingOrder(t *testing.T) { - cache := newMockGeminiSessionCache() + store := NewDigestSessionStore() groupID := int64(1) prefixHash := "test_prefix_hash" - // 创建一个三轮对话 - req := &antigravity.GeminiRequest{ - SystemInstruction: &antigravity.GeminiContent{ - Parts: []antigravity.GeminiPart{{Text: "System prompt"}}, - }, - Contents: []antigravity.GeminiContent{ - {Role: "user", Parts: []antigravity.GeminiPart{{Text: "Q1"}}}, - {Role: "model", Parts: []antigravity.GeminiPart{{Text: "A1"}}}, - {Role: "user", Parts: []antigravity.GeminiPart{{Text: "Q2"}}}, - }, - } - fullChain := BuildGeminiDigestChain(req) - prefixes := GenerateDigestChainPrefixes(fullChain) - - t.Logf("Full chain: %s", fullChain) - t.Logf("Prefixes (longest first): %v", prefixes) - - // 验证前缀生成顺序(从长到短) - if len(prefixes) != 4 { - t.Errorf("Expected 4 prefixes, got %d", len(prefixes)) - } - // 保存不同轮次的会话到不同账号 - // 第一轮(最短前缀)-> 账号 1 - cache.Save(groupID, prefixHash, prefixes[3], "session-round1", 1) - // 第二轮 -> 账号 2 - cache.Save(groupID, prefixHash, prefixes[2], "session-round2", 2) - // 第三轮(最长前缀,完整链)-> 账号 3 - cache.Save(groupID, prefixHash, prefixes[0], "session-round3", 3) + store.Save(groupID, prefixHash, "s:sys-u:q1", "session-round1", 1, "") + store.Save(groupID, prefixHash, "s:sys-u:q1-m:a1", "session-round2", 2, "") + store.Save(groupID, prefixHash, "s:sys-u:q1-m:a1-u:q2", "session-round3", 3, "") - // 查找应该返回最长匹配(账号 3) - _, accID, found := cache.Find(groupID, prefixHash, fullChain) + // 查找更长的链,应该返回最长匹配(账号 3) + _, accID, _, found := store.Find(groupID, prefixHash, "s:sys-u:q1-m:a1-u:q2-m:a2") if !found { t.Error("Should find session") } if accID != 3 { t.Errorf("Should match longest prefix (account 3), got account %d", accID) } - - t.Log("✓ Longest prefix matching works correctly!") } - -// 确保 context 包被使用(避免未使用的导入警告) -var _ = context.Background diff --git a/backend/internal/service/gemini_session_test.go b/backend/internal/service/gemini_session_test.go index 8c1908f7..a034cddd 100644 --- a/backend/internal/service/gemini_session_test.go +++ b/backend/internal/service/gemini_session_test.go @@ -152,61 +152,6 @@ func TestGenerateGeminiPrefixHash(t *testing.T) { } } -func TestGenerateDigestChainPrefixes(t *testing.T) { - tests := []struct { - name string - chain string - want []string - wantLen int - }{ - { - name: "empty", - chain: "", - wantLen: 0, - }, - { - name: "single part", - chain: "u:abc123", - want: []string{"u:abc123"}, - wantLen: 1, - }, - { - name: "two parts", - chain: "s:xyz-u:abc", - want: []string{"s:xyz-u:abc", "s:xyz"}, - wantLen: 2, - }, - { - name: "four parts", - chain: "s:a-u:b-m:c-u:d", - want: []string{"s:a-u:b-m:c-u:d", "s:a-u:b-m:c", "s:a-u:b", "s:a"}, - wantLen: 4, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := GenerateDigestChainPrefixes(tt.chain) - - if len(result) != tt.wantLen { - t.Errorf("expected %d prefixes, got %d: %v", tt.wantLen, len(result), result) - } - - if tt.want != nil { - for i, want := range tt.want { - if i >= len(result) { - t.Errorf("missing prefix at index %d", i) - continue - } - if result[i] != want { - t.Errorf("prefix[%d]: expected %s, got %s", i, want, result[i]) - } - } - } - }) - } -} - func TestParseGeminiSessionValue(t *testing.T) { tests := []struct { name string @@ -442,40 +387,3 @@ func TestGenerateGeminiDigestSessionKey(t *testing.T) { } }) } - -func TestBuildGeminiTrieKey(t *testing.T) { - tests := []struct { - name string - groupID int64 - prefixHash string - want string - }{ - { - name: "normal", - groupID: 123, - prefixHash: "abcdef12", - want: "gemini:trie:123:abcdef12", - }, - { - name: "zero group", - groupID: 0, - prefixHash: "xyz", - want: "gemini:trie:0:xyz", - }, - { - name: "empty prefix", - groupID: 1, - prefixHash: "", - want: "gemini:trie:1:", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := BuildGeminiTrieKey(tt.groupID, tt.prefixHash) - if got != tt.want { - t.Errorf("BuildGeminiTrieKey(%d, %q) = %q, want %q", tt.groupID, tt.prefixHash, got, tt.want) - } - }) - } -} diff --git a/backend/internal/service/generate_session_hash_test.go b/backend/internal/service/generate_session_hash_test.go new file mode 100644 index 00000000..8aa358a5 --- /dev/null +++ b/backend/internal/service/generate_session_hash_test.go @@ -0,0 +1,1213 @@ +//go:build unit + +package service + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +// ============ 基础优先级测试 ============ + +func TestGenerateSessionHash_NilParsedRequest(t *testing.T) { + svc := &GatewayService{} + require.Empty(t, svc.GenerateSessionHash(nil)) +} + +func TestGenerateSessionHash_EmptyRequest(t *testing.T) { + svc := &GatewayService{} + require.Empty(t, svc.GenerateSessionHash(&ParsedRequest{})) +} + +func TestGenerateSessionHash_MetadataHasHighestPriority(t *testing.T) { + svc := &GatewayService{} + + parsed := &ParsedRequest{ + MetadataUserID: "session_123e4567-e89b-12d3-a456-426614174000", + System: "You are a helpful assistant.", + HasSystem: true, + Messages: []any{ + map[string]any{"role": "user", "content": "hello"}, + }, + } + + hash := svc.GenerateSessionHash(parsed) + require.Equal(t, "123e4567-e89b-12d3-a456-426614174000", hash, "metadata session_id should have highest priority") +} + +// ============ System + Messages 基础测试 ============ + +func TestGenerateSessionHash_SystemPlusMessages(t *testing.T) { + svc := &GatewayService{} + + withSystem := &ParsedRequest{ + System: "You are a helpful assistant.", + HasSystem: true, + Messages: []any{ + map[string]any{"role": "user", "content": "hello"}, + }, + } + withoutSystem := &ParsedRequest{ + Messages: []any{ + map[string]any{"role": "user", "content": "hello"}, + }, + } + + h1 := svc.GenerateSessionHash(withSystem) + h2 := svc.GenerateSessionHash(withoutSystem) + require.NotEmpty(t, h1) + require.NotEmpty(t, h2) + require.NotEqual(t, h1, h2, "system prompt should be part of digest, producing different hash") +} + +func TestGenerateSessionHash_SystemOnlyProducesHash(t *testing.T) { + svc := &GatewayService{} + + parsed := &ParsedRequest{ + System: "You are a helpful assistant.", + HasSystem: true, + } + hash := svc.GenerateSessionHash(parsed) + require.NotEmpty(t, hash, "system prompt alone should produce a hash as part of full digest") +} + +func TestGenerateSessionHash_DifferentSystemsSameMessages(t *testing.T) { + svc := &GatewayService{} + + parsed1 := &ParsedRequest{ + System: "You are assistant A.", + HasSystem: true, + Messages: []any{ + map[string]any{"role": "user", "content": "hello"}, + }, + } + parsed2 := &ParsedRequest{ + System: "You are assistant B.", + HasSystem: true, + Messages: []any{ + map[string]any{"role": "user", "content": "hello"}, + }, + } + + h1 := svc.GenerateSessionHash(parsed1) + h2 := svc.GenerateSessionHash(parsed2) + require.NotEqual(t, h1, h2, "different system prompts with same messages should produce different hashes") +} + +func TestGenerateSessionHash_SameSystemSameMessages(t *testing.T) { + svc := &GatewayService{} + + mk := func() *ParsedRequest { + return &ParsedRequest{ + System: "You are a helpful assistant.", + HasSystem: true, + Messages: []any{ + map[string]any{"role": "user", "content": "hello"}, + map[string]any{"role": "assistant", "content": "hi"}, + }, + } + } + + h1 := svc.GenerateSessionHash(mk()) + h2 := svc.GenerateSessionHash(mk()) + require.Equal(t, h1, h2, "same system + same messages should produce identical hash") +} + +func TestGenerateSessionHash_DifferentMessagesProduceDifferentHash(t *testing.T) { + svc := &GatewayService{} + + parsed1 := &ParsedRequest{ + System: "You are a helpful assistant.", + HasSystem: true, + Messages: []any{ + map[string]any{"role": "user", "content": "help me with Go"}, + }, + } + parsed2 := &ParsedRequest{ + System: "You are a helpful assistant.", + HasSystem: true, + Messages: []any{ + map[string]any{"role": "user", "content": "help me with Python"}, + }, + } + + h1 := svc.GenerateSessionHash(parsed1) + h2 := svc.GenerateSessionHash(parsed2) + require.NotEqual(t, h1, h2, "same system but different messages should produce different hashes") +} + +// ============ SessionContext 核心测试 ============ + +func TestGenerateSessionHash_DifferentSessionContextProducesDifferentHash(t *testing.T) { + svc := &GatewayService{} + + // 相同消息 + 不同 SessionContext → 不同 hash(解决碰撞问题的核心场景) + parsed1 := &ParsedRequest{ + Messages: []any{ + map[string]any{"role": "user", "content": "hello"}, + }, + SessionContext: &SessionContext{ + ClientIP: "192.168.1.1", + UserAgent: "Mozilla/5.0", + APIKeyID: 100, + }, + } + parsed2 := &ParsedRequest{ + Messages: []any{ + map[string]any{"role": "user", "content": "hello"}, + }, + SessionContext: &SessionContext{ + ClientIP: "10.0.0.1", + UserAgent: "curl/7.0", + APIKeyID: 200, + }, + } + + h1 := svc.GenerateSessionHash(parsed1) + h2 := svc.GenerateSessionHash(parsed2) + require.NotEmpty(t, h1) + require.NotEmpty(t, h2) + require.NotEqual(t, h1, h2, "same messages but different SessionContext should produce different hashes") +} + +func TestGenerateSessionHash_SameSessionContextProducesSameHash(t *testing.T) { + svc := &GatewayService{} + + mk := func() *ParsedRequest { + return &ParsedRequest{ + Messages: []any{ + map[string]any{"role": "user", "content": "hello"}, + }, + SessionContext: &SessionContext{ + ClientIP: "192.168.1.1", + UserAgent: "Mozilla/5.0", + APIKeyID: 100, + }, + } + } + + h1 := svc.GenerateSessionHash(mk()) + h2 := svc.GenerateSessionHash(mk()) + require.Equal(t, h1, h2, "same messages + same SessionContext should produce identical hash") +} + +func TestGenerateSessionHash_MetadataOverridesSessionContext(t *testing.T) { + svc := &GatewayService{} + + parsed := &ParsedRequest{ + MetadataUserID: "session_123e4567-e89b-12d3-a456-426614174000", + Messages: []any{ + map[string]any{"role": "user", "content": "hello"}, + }, + SessionContext: &SessionContext{ + ClientIP: "192.168.1.1", + UserAgent: "Mozilla/5.0", + APIKeyID: 100, + }, + } + + hash := svc.GenerateSessionHash(parsed) + require.Equal(t, "123e4567-e89b-12d3-a456-426614174000", hash, + "metadata session_id should take priority over SessionContext") +} + +func TestGenerateSessionHash_NilSessionContextBackwardCompatible(t *testing.T) { + svc := &GatewayService{} + + withCtx := &ParsedRequest{ + Messages: []any{ + map[string]any{"role": "user", "content": "hello"}, + }, + SessionContext: nil, + } + withoutCtx := &ParsedRequest{ + Messages: []any{ + map[string]any{"role": "user", "content": "hello"}, + }, + } + + h1 := svc.GenerateSessionHash(withCtx) + h2 := svc.GenerateSessionHash(withoutCtx) + require.Equal(t, h1, h2, "nil SessionContext should produce same hash as no SessionContext") +} + +// ============ 多轮连续会话测试 ============ + +func TestGenerateSessionHash_ContinuousConversation_HashChangesWithMessages(t *testing.T) { + svc := &GatewayService{} + + ctx := &SessionContext{ClientIP: "1.2.3.4", UserAgent: "test", APIKeyID: 1} + + // 模拟连续会话:每增加一轮对话,hash 应该不同(内容累积变化) + round1 := &ParsedRequest{ + System: "You are a helpful assistant.", + HasSystem: true, + Messages: []any{ + map[string]any{"role": "user", "content": "hello"}, + }, + SessionContext: ctx, + } + + round2 := &ParsedRequest{ + System: "You are a helpful assistant.", + HasSystem: true, + Messages: []any{ + map[string]any{"role": "user", "content": "hello"}, + map[string]any{"role": "assistant", "content": "Hi there!"}, + map[string]any{"role": "user", "content": "How are you?"}, + }, + SessionContext: ctx, + } + + round3 := &ParsedRequest{ + System: "You are a helpful assistant.", + HasSystem: true, + Messages: []any{ + map[string]any{"role": "user", "content": "hello"}, + map[string]any{"role": "assistant", "content": "Hi there!"}, + map[string]any{"role": "user", "content": "How are you?"}, + map[string]any{"role": "assistant", "content": "I'm doing well!"}, + map[string]any{"role": "user", "content": "Tell me a joke"}, + }, + SessionContext: ctx, + } + + h1 := svc.GenerateSessionHash(round1) + h2 := svc.GenerateSessionHash(round2) + h3 := svc.GenerateSessionHash(round3) + + require.NotEmpty(t, h1) + require.NotEmpty(t, h2) + require.NotEmpty(t, h3) + require.NotEqual(t, h1, h2, "different conversation rounds should produce different hashes") + require.NotEqual(t, h2, h3, "each new round should produce a different hash") + require.NotEqual(t, h1, h3, "round 1 and round 3 should differ") +} + +func TestGenerateSessionHash_ContinuousConversation_SameRoundSameHash(t *testing.T) { + svc := &GatewayService{} + + ctx := &SessionContext{ClientIP: "1.2.3.4", UserAgent: "test", APIKeyID: 1} + + // 同一轮对话重复请求(如重试)应产生相同 hash + mk := func() *ParsedRequest { + return &ParsedRequest{ + System: "You are a helpful assistant.", + HasSystem: true, + Messages: []any{ + map[string]any{"role": "user", "content": "hello"}, + map[string]any{"role": "assistant", "content": "Hi there!"}, + map[string]any{"role": "user", "content": "How are you?"}, + }, + SessionContext: ctx, + } + } + + h1 := svc.GenerateSessionHash(mk()) + h2 := svc.GenerateSessionHash(mk()) + require.Equal(t, h1, h2, "same conversation state should produce identical hash on retry") +} + +// ============ 消息回退测试 ============ + +func TestGenerateSessionHash_MessageRollback(t *testing.T) { + svc := &GatewayService{} + + ctx := &SessionContext{ClientIP: "1.2.3.4", UserAgent: "test", APIKeyID: 1} + + // 模拟消息回退:用户删掉最后一轮再重发 + original := &ParsedRequest{ + System: "System prompt", + HasSystem: true, + Messages: []any{ + map[string]any{"role": "user", "content": "msg1"}, + map[string]any{"role": "assistant", "content": "reply1"}, + map[string]any{"role": "user", "content": "msg2"}, + map[string]any{"role": "assistant", "content": "reply2"}, + map[string]any{"role": "user", "content": "msg3"}, + }, + SessionContext: ctx, + } + + // 回退到 msg2 后,用新的 msg3 替代 + rollback := &ParsedRequest{ + System: "System prompt", + HasSystem: true, + Messages: []any{ + map[string]any{"role": "user", "content": "msg1"}, + map[string]any{"role": "assistant", "content": "reply1"}, + map[string]any{"role": "user", "content": "msg2"}, + map[string]any{"role": "assistant", "content": "reply2"}, + map[string]any{"role": "user", "content": "different msg3"}, + }, + SessionContext: ctx, + } + + hOrig := svc.GenerateSessionHash(original) + hRollback := svc.GenerateSessionHash(rollback) + require.NotEqual(t, hOrig, hRollback, "rollback with different last message should produce different hash") +} + +func TestGenerateSessionHash_MessageRollbackSameContent(t *testing.T) { + svc := &GatewayService{} + + ctx := &SessionContext{ClientIP: "1.2.3.4", UserAgent: "test", APIKeyID: 1} + + // 回退后重新发送相同内容 → 相同 hash(合理的粘性恢复) + mk := func() *ParsedRequest { + return &ParsedRequest{ + System: "System prompt", + HasSystem: true, + Messages: []any{ + map[string]any{"role": "user", "content": "msg1"}, + map[string]any{"role": "assistant", "content": "reply1"}, + map[string]any{"role": "user", "content": "msg2"}, + }, + SessionContext: ctx, + } + } + + h1 := svc.GenerateSessionHash(mk()) + h2 := svc.GenerateSessionHash(mk()) + require.Equal(t, h1, h2, "rollback and resend same content should produce same hash") +} + +// ============ 相同 System、不同用户消息 ============ + +func TestGenerateSessionHash_SameSystemDifferentUsers(t *testing.T) { + svc := &GatewayService{} + + // 两个不同用户使用相同 system prompt 但发送不同消息 + user1 := &ParsedRequest{ + System: "You are a code reviewer.", + HasSystem: true, + Messages: []any{ + map[string]any{"role": "user", "content": "Review this Go code"}, + }, + SessionContext: &SessionContext{ + ClientIP: "1.1.1.1", + UserAgent: "vscode", + APIKeyID: 1, + }, + } + user2 := &ParsedRequest{ + System: "You are a code reviewer.", + HasSystem: true, + Messages: []any{ + map[string]any{"role": "user", "content": "Review this Python code"}, + }, + SessionContext: &SessionContext{ + ClientIP: "2.2.2.2", + UserAgent: "vscode", + APIKeyID: 2, + }, + } + + h1 := svc.GenerateSessionHash(user1) + h2 := svc.GenerateSessionHash(user2) + require.NotEqual(t, h1, h2, "different users with different messages should get different hashes") +} + +func TestGenerateSessionHash_SameSystemSameMessageDifferentContext(t *testing.T) { + svc := &GatewayService{} + + // 这是修复的核心场景:两个不同用户发送完全相同的 system + messages(如 "hello") + // 有了 SessionContext 后应该产生不同 hash + user1 := &ParsedRequest{ + System: "You are a helpful assistant.", + HasSystem: true, + Messages: []any{ + map[string]any{"role": "user", "content": "hello"}, + }, + SessionContext: &SessionContext{ + ClientIP: "1.1.1.1", + UserAgent: "Mozilla/5.0", + APIKeyID: 10, + }, + } + user2 := &ParsedRequest{ + System: "You are a helpful assistant.", + HasSystem: true, + Messages: []any{ + map[string]any{"role": "user", "content": "hello"}, + }, + SessionContext: &SessionContext{ + ClientIP: "2.2.2.2", + UserAgent: "Mozilla/5.0", + APIKeyID: 20, + }, + } + + h1 := svc.GenerateSessionHash(user1) + h2 := svc.GenerateSessionHash(user2) + require.NotEqual(t, h1, h2, "CRITICAL: same system+messages but different users should get different hashes") +} + +// ============ SessionContext 各字段独立影响测试 ============ + +func TestGenerateSessionHash_SessionContext_IPDifference(t *testing.T) { + svc := &GatewayService{} + + base := func(ip string) *ParsedRequest { + return &ParsedRequest{ + Messages: []any{ + map[string]any{"role": "user", "content": "test"}, + }, + SessionContext: &SessionContext{ + ClientIP: ip, + UserAgent: "same-ua", + APIKeyID: 1, + }, + } + } + + h1 := svc.GenerateSessionHash(base("1.1.1.1")) + h2 := svc.GenerateSessionHash(base("2.2.2.2")) + require.NotEqual(t, h1, h2, "different IP should produce different hash") +} + +func TestGenerateSessionHash_SessionContext_UADifference(t *testing.T) { + svc := &GatewayService{} + + base := func(ua string) *ParsedRequest { + return &ParsedRequest{ + Messages: []any{ + map[string]any{"role": "user", "content": "test"}, + }, + SessionContext: &SessionContext{ + ClientIP: "1.1.1.1", + UserAgent: ua, + APIKeyID: 1, + }, + } + } + + h1 := svc.GenerateSessionHash(base("Mozilla/5.0")) + h2 := svc.GenerateSessionHash(base("curl/7.0")) + require.NotEqual(t, h1, h2, "different User-Agent should produce different hash") +} + +func TestGenerateSessionHash_SessionContext_APIKeyIDDifference(t *testing.T) { + svc := &GatewayService{} + + base := func(keyID int64) *ParsedRequest { + return &ParsedRequest{ + Messages: []any{ + map[string]any{"role": "user", "content": "test"}, + }, + SessionContext: &SessionContext{ + ClientIP: "1.1.1.1", + UserAgent: "same-ua", + APIKeyID: keyID, + }, + } + } + + h1 := svc.GenerateSessionHash(base(1)) + h2 := svc.GenerateSessionHash(base(2)) + require.NotEqual(t, h1, h2, "different APIKeyID should produce different hash") +} + +// ============ 多用户并发相同消息场景 ============ + +func TestGenerateSessionHash_MultipleUsersSameFirstMessage(t *testing.T) { + svc := &GatewayService{} + + // 模拟 5 个不同用户同时发送 "hello" → 应该产生 5 个不同的 hash + hashes := make(map[string]bool) + for i := 0; i < 5; i++ { + parsed := &ParsedRequest{ + Messages: []any{ + map[string]any{"role": "user", "content": "hello"}, + }, + SessionContext: &SessionContext{ + ClientIP: "192.168.1." + string(rune('1'+i)), + UserAgent: "client-" + string(rune('A'+i)), + APIKeyID: int64(i + 1), + }, + } + h := svc.GenerateSessionHash(parsed) + require.NotEmpty(t, h) + require.False(t, hashes[h], "hash collision detected for user %d", i) + hashes[h] = true + } + require.Len(t, hashes, 5, "5 different users should produce 5 unique hashes") +} + +// ============ 连续会话粘性:多轮对话同一用户 ============ + +func TestGenerateSessionHash_SameUserGrowingConversation(t *testing.T) { + svc := &GatewayService{} + + ctx := &SessionContext{ClientIP: "1.2.3.4", UserAgent: "browser", APIKeyID: 42} + + // 模拟同一用户的连续会话,每轮 hash 不同但同用户重试保持一致 + messages := []map[string]any{ + {"role": "user", "content": "msg1"}, + {"role": "assistant", "content": "reply1"}, + {"role": "user", "content": "msg2"}, + {"role": "assistant", "content": "reply2"}, + {"role": "user", "content": "msg3"}, + {"role": "assistant", "content": "reply3"}, + {"role": "user", "content": "msg4"}, + } + + prevHash := "" + for round := 1; round <= len(messages); round += 2 { + // 构建前 round 条消息 + msgs := make([]any, round) + for j := 0; j < round; j++ { + msgs[j] = messages[j] + } + parsed := &ParsedRequest{ + System: "System", + HasSystem: true, + Messages: msgs, + SessionContext: ctx, + } + h := svc.GenerateSessionHash(parsed) + require.NotEmpty(t, h, "round %d hash should not be empty", round) + + if prevHash != "" { + require.NotEqual(t, prevHash, h, "round %d hash should differ from previous round", round) + } + prevHash = h + + // 同一轮重试应该相同 + h2 := svc.GenerateSessionHash(parsed) + require.Equal(t, h, h2, "retry of round %d should produce same hash", round) + } +} + +// ============ 多轮消息内容结构化测试 ============ + +func TestGenerateSessionHash_MultipleUserMessages(t *testing.T) { + svc := &GatewayService{} + + ctx := &SessionContext{ClientIP: "1.2.3.4", UserAgent: "test", APIKeyID: 1} + + // 5 条用户消息(无 assistant 回复) + parsed := &ParsedRequest{ + Messages: []any{ + map[string]any{"role": "user", "content": "first"}, + map[string]any{"role": "user", "content": "second"}, + map[string]any{"role": "user", "content": "third"}, + map[string]any{"role": "user", "content": "fourth"}, + map[string]any{"role": "user", "content": "fifth"}, + }, + SessionContext: ctx, + } + + h := svc.GenerateSessionHash(parsed) + require.NotEmpty(t, h) + + // 修改中间一条消息应该改变 hash + parsed2 := &ParsedRequest{ + Messages: []any{ + map[string]any{"role": "user", "content": "first"}, + map[string]any{"role": "user", "content": "CHANGED"}, + map[string]any{"role": "user", "content": "third"}, + map[string]any{"role": "user", "content": "fourth"}, + map[string]any{"role": "user", "content": "fifth"}, + }, + SessionContext: ctx, + } + + h2 := svc.GenerateSessionHash(parsed2) + require.NotEqual(t, h, h2, "changing any message should change the hash") +} + +func TestGenerateSessionHash_MessageOrderMatters(t *testing.T) { + svc := &GatewayService{} + + ctx := &SessionContext{ClientIP: "1.2.3.4", UserAgent: "test", APIKeyID: 1} + + parsed1 := &ParsedRequest{ + Messages: []any{ + map[string]any{"role": "user", "content": "alpha"}, + map[string]any{"role": "user", "content": "beta"}, + }, + SessionContext: ctx, + } + parsed2 := &ParsedRequest{ + Messages: []any{ + map[string]any{"role": "user", "content": "beta"}, + map[string]any{"role": "user", "content": "alpha"}, + }, + SessionContext: ctx, + } + + h1 := svc.GenerateSessionHash(parsed1) + h2 := svc.GenerateSessionHash(parsed2) + require.NotEqual(t, h1, h2, "message order should affect the hash") +} + +// ============ 复杂内容格式测试 ============ + +func TestGenerateSessionHash_StructuredContent(t *testing.T) { + svc := &GatewayService{} + + ctx := &SessionContext{ClientIP: "1.2.3.4", UserAgent: "test", APIKeyID: 1} + + // 结构化 content(数组形式) + parsed := &ParsedRequest{ + Messages: []any{ + map[string]any{ + "role": "user", + "content": []any{ + map[string]any{"type": "text", "text": "Look at this"}, + map[string]any{"type": "text", "text": "And this too"}, + }, + }, + }, + SessionContext: ctx, + } + + h := svc.GenerateSessionHash(parsed) + require.NotEmpty(t, h, "structured content should produce a hash") +} + +func TestGenerateSessionHash_ArraySystemPrompt(t *testing.T) { + svc := &GatewayService{} + + ctx := &SessionContext{ClientIP: "1.2.3.4", UserAgent: "test", APIKeyID: 1} + + // 数组格式的 system prompt + parsed := &ParsedRequest{ + System: []any{ + map[string]any{"type": "text", "text": "You are a helpful assistant."}, + map[string]any{"type": "text", "text": "Be concise."}, + }, + HasSystem: true, + Messages: []any{ + map[string]any{"role": "user", "content": "hello"}, + }, + SessionContext: ctx, + } + + h := svc.GenerateSessionHash(parsed) + require.NotEmpty(t, h, "array system prompt should produce a hash") +} + +// ============ SessionContext 与 cache_control 优先级 ============ + +func TestGenerateSessionHash_CacheControlOverridesSessionContext(t *testing.T) { + svc := &GatewayService{} + + // 当有 cache_control: ephemeral 时,使用第 2 级优先级 + // SessionContext 不应影响结果 + parsed1 := &ParsedRequest{ + System: []any{ + map[string]any{ + "type": "text", + "text": "You are a tool-specific assistant.", + "cache_control": map[string]any{"type": "ephemeral"}, + }, + }, + HasSystem: true, + Messages: []any{ + map[string]any{"role": "user", "content": "hello"}, + }, + SessionContext: &SessionContext{ + ClientIP: "1.1.1.1", + UserAgent: "ua1", + APIKeyID: 100, + }, + } + parsed2 := &ParsedRequest{ + System: []any{ + map[string]any{ + "type": "text", + "text": "You are a tool-specific assistant.", + "cache_control": map[string]any{"type": "ephemeral"}, + }, + }, + HasSystem: true, + Messages: []any{ + map[string]any{"role": "user", "content": "hello"}, + }, + SessionContext: &SessionContext{ + ClientIP: "2.2.2.2", + UserAgent: "ua2", + APIKeyID: 200, + }, + } + + h1 := svc.GenerateSessionHash(parsed1) + h2 := svc.GenerateSessionHash(parsed2) + require.Equal(t, h1, h2, "cache_control ephemeral has higher priority, SessionContext should not affect result") +} + +// ============ 边界情况 ============ + +func TestGenerateSessionHash_EmptyMessages(t *testing.T) { + svc := &GatewayService{} + + parsed := &ParsedRequest{ + Messages: []any{}, + SessionContext: &SessionContext{ + ClientIP: "1.1.1.1", + UserAgent: "test", + APIKeyID: 1, + }, + } + + // 空 messages + 只有 SessionContext 时,combined.Len() > 0 因为有 context 写入 + h := svc.GenerateSessionHash(parsed) + require.NotEmpty(t, h, "empty messages with SessionContext should still produce a hash from context") +} + +func TestGenerateSessionHash_EmptyMessagesNoContext(t *testing.T) { + svc := &GatewayService{} + + parsed := &ParsedRequest{ + Messages: []any{}, + } + + h := svc.GenerateSessionHash(parsed) + require.Empty(t, h, "empty messages without SessionContext should produce empty hash") +} + +func TestGenerateSessionHash_SessionContextWithEmptyFields(t *testing.T) { + svc := &GatewayService{} + + // SessionContext 字段为空字符串和零值时仍应影响 hash + withEmptyCtx := &ParsedRequest{ + Messages: []any{ + map[string]any{"role": "user", "content": "test"}, + }, + SessionContext: &SessionContext{ + ClientIP: "", + UserAgent: "", + APIKeyID: 0, + }, + } + withoutCtx := &ParsedRequest{ + Messages: []any{ + map[string]any{"role": "user", "content": "test"}, + }, + } + + h1 := svc.GenerateSessionHash(withEmptyCtx) + h2 := svc.GenerateSessionHash(withoutCtx) + // 有 SessionContext(即使字段为空)仍然会写入分隔符 "::" 等 + require.NotEqual(t, h1, h2, "empty-field SessionContext should still differ from nil SessionContext") +} + +// ============ 长对话历史测试 ============ + +func TestGenerateSessionHash_LongConversation(t *testing.T) { + svc := &GatewayService{} + + ctx := &SessionContext{ClientIP: "1.2.3.4", UserAgent: "test", APIKeyID: 1} + + // 构建 20 轮对话 + messages := make([]any, 0, 40) + for i := 0; i < 20; i++ { + messages = append(messages, map[string]any{ + "role": "user", + "content": "user message " + string(rune('A'+i)), + }) + messages = append(messages, map[string]any{ + "role": "assistant", + "content": "assistant reply " + string(rune('A'+i)), + }) + } + + parsed := &ParsedRequest{ + System: "System prompt", + HasSystem: true, + Messages: messages, + SessionContext: ctx, + } + + h := svc.GenerateSessionHash(parsed) + require.NotEmpty(t, h) + + // 再加一轮应该不同 + moreMessages := make([]any, len(messages)+2) + copy(moreMessages, messages) + moreMessages[len(messages)] = map[string]any{"role": "user", "content": "one more"} + moreMessages[len(messages)+1] = map[string]any{"role": "assistant", "content": "ok"} + + parsed2 := &ParsedRequest{ + System: "System prompt", + HasSystem: true, + Messages: moreMessages, + SessionContext: ctx, + } + + h2 := svc.GenerateSessionHash(parsed2) + require.NotEqual(t, h, h2, "adding more messages to long conversation should change hash") +} + +// ============ Gemini 原生格式 session hash 测试 ============ + +func TestGenerateSessionHash_GeminiContentsProducesHash(t *testing.T) { + svc := &GatewayService{} + + // Gemini 格式: contents[].parts[].text + parsed := &ParsedRequest{ + Messages: []any{ + map[string]any{ + "role": "user", + "parts": []any{ + map[string]any{"text": "Hello from Gemini"}, + }, + }, + }, + SessionContext: &SessionContext{ + ClientIP: "1.2.3.4", + UserAgent: "gemini-cli", + APIKeyID: 1, + }, + } + + h := svc.GenerateSessionHash(parsed) + require.NotEmpty(t, h, "Gemini contents with parts should produce a non-empty hash") +} + +func TestGenerateSessionHash_GeminiDifferentContentsDifferentHash(t *testing.T) { + svc := &GatewayService{} + + ctx := &SessionContext{ClientIP: "1.2.3.4", UserAgent: "gemini-cli", APIKeyID: 1} + + parsed1 := &ParsedRequest{ + Messages: []any{ + map[string]any{ + "role": "user", + "parts": []any{ + map[string]any{"text": "Hello"}, + }, + }, + }, + SessionContext: ctx, + } + parsed2 := &ParsedRequest{ + Messages: []any{ + map[string]any{ + "role": "user", + "parts": []any{ + map[string]any{"text": "Goodbye"}, + }, + }, + }, + SessionContext: ctx, + } + + h1 := svc.GenerateSessionHash(parsed1) + h2 := svc.GenerateSessionHash(parsed2) + require.NotEqual(t, h1, h2, "different Gemini contents should produce different hashes") +} + +func TestGenerateSessionHash_GeminiSameContentsSameHash(t *testing.T) { + svc := &GatewayService{} + + ctx := &SessionContext{ClientIP: "1.2.3.4", UserAgent: "gemini-cli", APIKeyID: 1} + + mk := func() *ParsedRequest { + return &ParsedRequest{ + Messages: []any{ + map[string]any{ + "role": "user", + "parts": []any{ + map[string]any{"text": "Hello"}, + }, + }, + map[string]any{ + "role": "model", + "parts": []any{ + map[string]any{"text": "Hi there!"}, + }, + }, + }, + SessionContext: ctx, + } + } + + h1 := svc.GenerateSessionHash(mk()) + h2 := svc.GenerateSessionHash(mk()) + require.Equal(t, h1, h2, "same Gemini contents should produce identical hash") +} + +func TestGenerateSessionHash_GeminiMultiTurnHashChanges(t *testing.T) { + svc := &GatewayService{} + + ctx := &SessionContext{ClientIP: "1.2.3.4", UserAgent: "gemini-cli", APIKeyID: 1} + + round1 := &ParsedRequest{ + Messages: []any{ + map[string]any{ + "role": "user", + "parts": []any{map[string]any{"text": "hello"}}, + }, + }, + SessionContext: ctx, + } + + round2 := &ParsedRequest{ + Messages: []any{ + map[string]any{ + "role": "user", + "parts": []any{map[string]any{"text": "hello"}}, + }, + map[string]any{ + "role": "model", + "parts": []any{map[string]any{"text": "Hi!"}}, + }, + map[string]any{ + "role": "user", + "parts": []any{map[string]any{"text": "How are you?"}}, + }, + }, + SessionContext: ctx, + } + + h1 := svc.GenerateSessionHash(round1) + h2 := svc.GenerateSessionHash(round2) + require.NotEmpty(t, h1) + require.NotEmpty(t, h2) + require.NotEqual(t, h1, h2, "Gemini multi-turn should produce different hashes per round") +} + +func TestGenerateSessionHash_GeminiDifferentUsersSameContentDifferentHash(t *testing.T) { + svc := &GatewayService{} + + // 核心场景:两个不同用户发送相同 Gemini 格式消息应得到不同 hash + user1 := &ParsedRequest{ + Messages: []any{ + map[string]any{ + "role": "user", + "parts": []any{map[string]any{"text": "hello"}}, + }, + }, + SessionContext: &SessionContext{ + ClientIP: "1.1.1.1", + UserAgent: "gemini-cli", + APIKeyID: 10, + }, + } + user2 := &ParsedRequest{ + Messages: []any{ + map[string]any{ + "role": "user", + "parts": []any{map[string]any{"text": "hello"}}, + }, + }, + SessionContext: &SessionContext{ + ClientIP: "2.2.2.2", + UserAgent: "gemini-cli", + APIKeyID: 20, + }, + } + + h1 := svc.GenerateSessionHash(user1) + h2 := svc.GenerateSessionHash(user2) + require.NotEqual(t, h1, h2, "CRITICAL: different Gemini users with same content must get different hashes") +} + +func TestGenerateSessionHash_GeminiSystemInstructionAffectsHash(t *testing.T) { + svc := &GatewayService{} + + ctx := &SessionContext{ClientIP: "1.2.3.4", UserAgent: "gemini-cli", APIKeyID: 1} + + // systemInstruction 经 ParseGatewayRequest 解析后存入 parsed.System + withSys := &ParsedRequest{ + System: []any{ + map[string]any{"text": "You are a coding assistant."}, + }, + Messages: []any{ + map[string]any{ + "role": "user", + "parts": []any{map[string]any{"text": "hello"}}, + }, + }, + SessionContext: ctx, + } + withoutSys := &ParsedRequest{ + Messages: []any{ + map[string]any{ + "role": "user", + "parts": []any{map[string]any{"text": "hello"}}, + }, + }, + SessionContext: ctx, + } + + h1 := svc.GenerateSessionHash(withSys) + h2 := svc.GenerateSessionHash(withoutSys) + require.NotEqual(t, h1, h2, "systemInstruction should affect the hash") +} + +func TestGenerateSessionHash_GeminiMultiPartMessage(t *testing.T) { + svc := &GatewayService{} + + ctx := &SessionContext{ClientIP: "1.2.3.4", UserAgent: "gemini-cli", APIKeyID: 1} + + // 多 parts 的消息 + parsed := &ParsedRequest{ + Messages: []any{ + map[string]any{ + "role": "user", + "parts": []any{ + map[string]any{"text": "Part 1"}, + map[string]any{"text": "Part 2"}, + map[string]any{"text": "Part 3"}, + }, + }, + }, + SessionContext: ctx, + } + + h := svc.GenerateSessionHash(parsed) + require.NotEmpty(t, h, "multi-part Gemini message should produce a hash") + + // 不同内容的多 parts + parsed2 := &ParsedRequest{ + Messages: []any{ + map[string]any{ + "role": "user", + "parts": []any{ + map[string]any{"text": "Part 1"}, + map[string]any{"text": "CHANGED"}, + map[string]any{"text": "Part 3"}, + }, + }, + }, + SessionContext: ctx, + } + + h2 := svc.GenerateSessionHash(parsed2) + require.NotEqual(t, h, h2, "changing a part should change the hash") +} + +func TestGenerateSessionHash_GeminiNonTextPartsIgnored(t *testing.T) { + svc := &GatewayService{} + + ctx := &SessionContext{ClientIP: "1.2.3.4", UserAgent: "gemini-cli", APIKeyID: 1} + + // 含非 text 类型 parts(如 inline_data),应被跳过但不报错 + parsed := &ParsedRequest{ + Messages: []any{ + map[string]any{ + "role": "user", + "parts": []any{ + map[string]any{"text": "Describe this image"}, + map[string]any{"inline_data": map[string]any{"mime_type": "image/png", "data": "base64..."}}, + }, + }, + }, + SessionContext: ctx, + } + + h := svc.GenerateSessionHash(parsed) + require.NotEmpty(t, h, "Gemini message with mixed parts should still produce a hash from text parts") +} + +func TestGenerateSessionHash_GeminiMultiTurnHashNotSticky(t *testing.T) { + svc := &GatewayService{} + + ctx := &SessionContext{ClientIP: "10.0.0.1", UserAgent: "gemini-cli", APIKeyID: 42} + + // 模拟同一 Gemini 会话的三轮请求,每轮 contents 累积增长。 + // 验证预期行为:每轮 hash 都不同,即 GenerateSessionHash 不具备跨轮粘性。 + // 这是 by-design 的——Gemini 的跨轮粘性由 Digest Fallback(BuildGeminiDigestChain)负责。 + round1Body := []byte(`{ + "systemInstruction": {"parts": [{"text": "You are a coding assistant."}]}, + "contents": [ + {"role": "user", "parts": [{"text": "Write a Go function"}]} + ] + }`) + round2Body := []byte(`{ + "systemInstruction": {"parts": [{"text": "You are a coding assistant."}]}, + "contents": [ + {"role": "user", "parts": [{"text": "Write a Go function"}]}, + {"role": "model", "parts": [{"text": "func hello() {}"}]}, + {"role": "user", "parts": [{"text": "Add error handling"}]} + ] + }`) + round3Body := []byte(`{ + "systemInstruction": {"parts": [{"text": "You are a coding assistant."}]}, + "contents": [ + {"role": "user", "parts": [{"text": "Write a Go function"}]}, + {"role": "model", "parts": [{"text": "func hello() {}"}]}, + {"role": "user", "parts": [{"text": "Add error handling"}]}, + {"role": "model", "parts": [{"text": "func hello() error { return nil }"}]}, + {"role": "user", "parts": [{"text": "Now add tests"}]} + ] + }`) + + hashes := make([]string, 3) + for i, body := range [][]byte{round1Body, round2Body, round3Body} { + parsed, err := ParseGatewayRequest(body, "gemini") + require.NoError(t, err) + parsed.SessionContext = ctx + hashes[i] = svc.GenerateSessionHash(parsed) + require.NotEmpty(t, hashes[i], "round %d hash should not be empty", i+1) + } + + // 每轮 hash 都不同——这是预期行为 + require.NotEqual(t, hashes[0], hashes[1], "round 1 vs 2 hash should differ (contents grow)") + require.NotEqual(t, hashes[1], hashes[2], "round 2 vs 3 hash should differ (contents grow)") + require.NotEqual(t, hashes[0], hashes[2], "round 1 vs 3 hash should differ") + + // 同一轮重试应产生相同 hash + parsed1Again, err := ParseGatewayRequest(round2Body, "gemini") + require.NoError(t, err) + parsed1Again.SessionContext = ctx + h2Again := svc.GenerateSessionHash(parsed1Again) + require.Equal(t, hashes[1], h2Again, "retry of same round should produce same hash") +} + +func TestGenerateSessionHash_GeminiEndToEnd(t *testing.T) { + svc := &GatewayService{} + + // 端到端测试:模拟 ParseGatewayRequest + GenerateSessionHash 完整流程 + body := []byte(`{ + "model": "gemini-2.5-pro", + "systemInstruction": { + "parts": [{"text": "You are a coding assistant."}] + }, + "contents": [ + {"role": "user", "parts": [{"text": "Write a Go function"}]}, + {"role": "model", "parts": [{"text": "Here is a function..."}]}, + {"role": "user", "parts": [{"text": "Now add error handling"}]} + ] + }`) + + parsed, err := ParseGatewayRequest(body, "gemini") + require.NoError(t, err) + parsed.SessionContext = &SessionContext{ + ClientIP: "10.0.0.1", + UserAgent: "gemini-cli/1.0", + APIKeyID: 42, + } + + h := svc.GenerateSessionHash(parsed) + require.NotEmpty(t, h, "end-to-end Gemini flow should produce a hash") + + // 同一请求再次解析应产生相同 hash + parsed2, err := ParseGatewayRequest(body, "gemini") + require.NoError(t, err) + parsed2.SessionContext = &SessionContext{ + ClientIP: "10.0.0.1", + UserAgent: "gemini-cli/1.0", + APIKeyID: 42, + } + + h2 := svc.GenerateSessionHash(parsed2) + require.Equal(t, h, h2, "same request should produce same hash") + + // 不同用户发送相同请求应产生不同 hash + parsed3, err := ParseGatewayRequest(body, "gemini") + require.NoError(t, err) + parsed3.SessionContext = &SessionContext{ + ClientIP: "10.0.0.2", + UserAgent: "gemini-cli/1.0", + APIKeyID: 99, + } + + h3 := svc.GenerateSessionHash(parsed3) + require.NotEqual(t, h, h3, "different user with same Gemini request should get different hash") +} diff --git a/backend/internal/service/model_rate_limit_test.go b/backend/internal/service/model_rate_limit_test.go index a51e6909..b79b9688 100644 --- a/backend/internal/service/model_rate_limit_test.go +++ b/backend/internal/service/model_rate_limit_test.go @@ -318,110 +318,6 @@ func TestGetModelRateLimitRemainingTime(t *testing.T) { } } -func TestGetQuotaScopeRateLimitRemainingTime(t *testing.T) { - now := time.Now() - future10m := now.Add(10 * time.Minute).Format(time.RFC3339) - past := now.Add(-10 * time.Minute).Format(time.RFC3339) - - tests := []struct { - name string - account *Account - requestedModel string - minExpected time.Duration - maxExpected time.Duration - }{ - { - name: "nil account", - account: nil, - requestedModel: "claude-sonnet-4-5", - minExpected: 0, - maxExpected: 0, - }, - { - name: "non-antigravity platform", - account: &Account{ - Platform: PlatformAnthropic, - Extra: map[string]any{ - antigravityQuotaScopesKey: map[string]any{ - "claude": map[string]any{ - "rate_limit_reset_at": future10m, - }, - }, - }, - }, - requestedModel: "claude-sonnet-4-5", - minExpected: 0, - maxExpected: 0, - }, - { - name: "claude scope rate limited", - account: &Account{ - Platform: PlatformAntigravity, - Extra: map[string]any{ - antigravityQuotaScopesKey: map[string]any{ - "claude": map[string]any{ - "rate_limit_reset_at": future10m, - }, - }, - }, - }, - requestedModel: "claude-sonnet-4-5", - minExpected: 9 * time.Minute, - maxExpected: 11 * time.Minute, - }, - { - name: "gemini_text scope rate limited", - account: &Account{ - Platform: PlatformAntigravity, - Extra: map[string]any{ - antigravityQuotaScopesKey: map[string]any{ - "gemini_text": map[string]any{ - "rate_limit_reset_at": future10m, - }, - }, - }, - }, - requestedModel: "gemini-3-flash", - minExpected: 9 * time.Minute, - maxExpected: 11 * time.Minute, - }, - { - name: "expired scope rate limit", - account: &Account{ - Platform: PlatformAntigravity, - Extra: map[string]any{ - antigravityQuotaScopesKey: map[string]any{ - "claude": map[string]any{ - "rate_limit_reset_at": past, - }, - }, - }, - }, - requestedModel: "claude-sonnet-4-5", - minExpected: 0, - maxExpected: 0, - }, - { - name: "unsupported model", - account: &Account{ - Platform: PlatformAntigravity, - }, - requestedModel: "gpt-4", - minExpected: 0, - maxExpected: 0, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := tt.account.GetQuotaScopeRateLimitRemainingTime(tt.requestedModel) - if result < tt.minExpected || result > tt.maxExpected { - t.Errorf("GetQuotaScopeRateLimitRemainingTime() = %v, want between %v and %v", result, tt.minExpected, tt.maxExpected) - } - }) - } -} - func TestGetRateLimitRemainingTime(t *testing.T) { now := time.Now() future15m := now.Add(15 * time.Minute).Format(time.RFC3339) @@ -442,45 +338,19 @@ func TestGetRateLimitRemainingTime(t *testing.T) { maxExpected: 0, }, { - name: "model remaining > scope remaining - returns model", + name: "model rate limited - 15 minutes", account: &Account{ Platform: PlatformAntigravity, Extra: map[string]any{ modelRateLimitsKey: map[string]any{ "claude-sonnet-4-5": map[string]any{ - "rate_limit_reset_at": future15m, // 15 分钟 - }, - }, - antigravityQuotaScopesKey: map[string]any{ - "claude": map[string]any{ - "rate_limit_reset_at": future5m, // 5 分钟 + "rate_limit_reset_at": future15m, }, }, }, }, requestedModel: "claude-sonnet-4-5", - minExpected: 14 * time.Minute, // 应返回较大的 15 分钟 - maxExpected: 16 * time.Minute, - }, - { - name: "scope remaining > model remaining - returns scope", - account: &Account{ - Platform: PlatformAntigravity, - Extra: map[string]any{ - modelRateLimitsKey: map[string]any{ - "claude-sonnet-4-5": map[string]any{ - "rate_limit_reset_at": future5m, // 5 分钟 - }, - }, - antigravityQuotaScopesKey: map[string]any{ - "claude": map[string]any{ - "rate_limit_reset_at": future15m, // 15 分钟 - }, - }, - }, - }, - requestedModel: "claude-sonnet-4-5", - minExpected: 14 * time.Minute, // 应返回较大的 15 分钟 + minExpected: 14 * time.Minute, maxExpected: 16 * time.Minute, }, { @@ -499,22 +369,6 @@ func TestGetRateLimitRemainingTime(t *testing.T) { minExpected: 4 * time.Minute, maxExpected: 6 * time.Minute, }, - { - name: "only scope rate limited", - account: &Account{ - Platform: PlatformAntigravity, - Extra: map[string]any{ - antigravityQuotaScopesKey: map[string]any{ - "claude": map[string]any{ - "rate_limit_reset_at": future5m, - }, - }, - }, - }, - requestedModel: "claude-sonnet-4-5", - minExpected: 4 * time.Minute, - maxExpected: 6 * time.Minute, - }, { name: "neither rate limited", account: &Account{ diff --git a/backend/internal/service/openai_gateway_service.go b/backend/internal/service/openai_gateway_service.go index fbe81cb4..6c4fe256 100644 --- a/backend/internal/service/openai_gateway_service.go +++ b/backend/internal/service/openai_gateway_service.go @@ -580,10 +580,6 @@ func (s *OpenAIGatewayService) SelectAccountWithLoadAwareness(ctx context.Contex } } } else { - type accountWithLoad struct { - account *Account - loadInfo *AccountLoadInfo - } var available []accountWithLoad for _, acc := range candidates { loadInfo := loadMap[acc.ID] @@ -618,6 +614,7 @@ func (s *OpenAIGatewayService) SelectAccountWithLoadAwareness(ctx context.Contex return a.account.LastUsedAt.Before(*b.account.LastUsedAt) } }) + shuffleWithinSortGroups(available) for _, item := range available { result, err := s.tryAcquireAccountSlot(ctx, item.account.ID, item.account.Concurrency) diff --git a/backend/internal/service/openai_gateway_service_test.go b/backend/internal/service/openai_gateway_service_test.go index 159b0afb..ae69a986 100644 --- a/backend/internal/service/openai_gateway_service_test.go +++ b/backend/internal/service/openai_gateway_service_test.go @@ -204,30 +204,6 @@ func (c *stubGatewayCache) DeleteSessionAccountID(ctx context.Context, groupID i return nil } -func (c *stubGatewayCache) IncrModelCallCount(ctx context.Context, accountID int64, model string) (int64, error) { - return 0, nil -} - -func (c *stubGatewayCache) GetModelLoadBatch(ctx context.Context, accountIDs []int64, model string) (map[int64]*ModelLoadInfo, error) { - return nil, nil -} - -func (c *stubGatewayCache) FindGeminiSession(ctx context.Context, groupID int64, prefixHash, digestChain string) (uuid string, accountID int64, found bool) { - return "", 0, false -} - -func (c *stubGatewayCache) SaveGeminiSession(ctx context.Context, groupID int64, prefixHash, digestChain, uuid string, accountID int64) error { - return nil -} - -func (c *stubGatewayCache) FindAnthropicSession(ctx context.Context, groupID int64, prefixHash, digestChain string) (uuid string, accountID int64, found bool) { - return "", 0, false -} - -func (c *stubGatewayCache) SaveAnthropicSession(ctx context.Context, groupID int64, prefixHash, digestChain, uuid string, accountID int64) error { - return nil -} - func TestOpenAISelectAccountWithLoadAwareness_FiltersUnschedulable(t *testing.T) { now := time.Now() resetAt := now.Add(10 * time.Minute) diff --git a/backend/internal/service/ops_account_availability.go b/backend/internal/service/ops_account_availability.go index a649e7b5..da66ec4d 100644 --- a/backend/internal/service/ops_account_availability.go +++ b/backend/internal/service/ops_account_availability.go @@ -66,7 +66,6 @@ func (s *OpsService) GetAccountAvailabilityStats(ctx context.Context, platformFi } isAvailable := acc.Status == StatusActive && acc.Schedulable && !isRateLimited && !isOverloaded && !isTempUnsched - scopeRateLimits := acc.GetAntigravityScopeRateLimits() if acc.Platform != "" { if _, ok := platform[acc.Platform]; !ok { @@ -85,14 +84,6 @@ func (s *OpsService) GetAccountAvailabilityStats(ctx context.Context, platformFi if hasError { p.ErrorCount++ } - if len(scopeRateLimits) > 0 { - if p.ScopeRateLimitCount == nil { - p.ScopeRateLimitCount = make(map[string]int64) - } - for scope := range scopeRateLimits { - p.ScopeRateLimitCount[scope]++ - } - } } for _, grp := range acc.Groups { @@ -117,14 +108,6 @@ func (s *OpsService) GetAccountAvailabilityStats(ctx context.Context, platformFi if hasError { g.ErrorCount++ } - if len(scopeRateLimits) > 0 { - if g.ScopeRateLimitCount == nil { - g.ScopeRateLimitCount = make(map[string]int64) - } - for scope := range scopeRateLimits { - g.ScopeRateLimitCount[scope]++ - } - } } displayGroupID := int64(0) @@ -157,9 +140,6 @@ func (s *OpsService) GetAccountAvailabilityStats(ctx context.Context, platformFi item.RateLimitRemainingSec = &remainingSec } } - if len(scopeRateLimits) > 0 { - item.ScopeRateLimits = scopeRateLimits - } if isOverloaded && acc.OverloadUntil != nil { item.OverloadUntil = acc.OverloadUntil remainingSec := int64(time.Until(*acc.OverloadUntil).Seconds()) diff --git a/backend/internal/service/ops_realtime_models.go b/backend/internal/service/ops_realtime_models.go index 33029f59..a19ab355 100644 --- a/backend/internal/service/ops_realtime_models.go +++ b/backend/internal/service/ops_realtime_models.go @@ -50,24 +50,22 @@ type UserConcurrencyInfo struct { // PlatformAvailability aggregates account availability by platform. type PlatformAvailability struct { - Platform string `json:"platform"` - TotalAccounts int64 `json:"total_accounts"` - AvailableCount int64 `json:"available_count"` - RateLimitCount int64 `json:"rate_limit_count"` - ScopeRateLimitCount map[string]int64 `json:"scope_rate_limit_count,omitempty"` - ErrorCount int64 `json:"error_count"` + Platform string `json:"platform"` + TotalAccounts int64 `json:"total_accounts"` + AvailableCount int64 `json:"available_count"` + RateLimitCount int64 `json:"rate_limit_count"` + ErrorCount int64 `json:"error_count"` } // GroupAvailability aggregates account availability by group. type GroupAvailability struct { - GroupID int64 `json:"group_id"` - GroupName string `json:"group_name"` - Platform string `json:"platform"` - TotalAccounts int64 `json:"total_accounts"` - AvailableCount int64 `json:"available_count"` - RateLimitCount int64 `json:"rate_limit_count"` - ScopeRateLimitCount map[string]int64 `json:"scope_rate_limit_count,omitempty"` - ErrorCount int64 `json:"error_count"` + GroupID int64 `json:"group_id"` + GroupName string `json:"group_name"` + Platform string `json:"platform"` + TotalAccounts int64 `json:"total_accounts"` + AvailableCount int64 `json:"available_count"` + RateLimitCount int64 `json:"rate_limit_count"` + ErrorCount int64 `json:"error_count"` } // AccountAvailability represents current availability for a single account. @@ -85,11 +83,10 @@ type AccountAvailability struct { IsOverloaded bool `json:"is_overloaded"` HasError bool `json:"has_error"` - RateLimitResetAt *time.Time `json:"rate_limit_reset_at"` - RateLimitRemainingSec *int64 `json:"rate_limit_remaining_sec"` - ScopeRateLimits map[string]int64 `json:"scope_rate_limits,omitempty"` - OverloadUntil *time.Time `json:"overload_until"` - OverloadRemainingSec *int64 `json:"overload_remaining_sec"` - ErrorMessage string `json:"error_message"` - TempUnschedulableUntil *time.Time `json:"temp_unschedulable_until,omitempty"` + RateLimitResetAt *time.Time `json:"rate_limit_reset_at"` + RateLimitRemainingSec *int64 `json:"rate_limit_remaining_sec"` + OverloadUntil *time.Time `json:"overload_until"` + OverloadRemainingSec *int64 `json:"overload_remaining_sec"` + ErrorMessage string `json:"error_message"` + TempUnschedulableUntil *time.Time `json:"temp_unschedulable_until,omitempty"` } diff --git a/backend/internal/service/ops_retry.go b/backend/internal/service/ops_retry.go index fbc800f2..23a524ad 100644 --- a/backend/internal/service/ops_retry.go +++ b/backend/internal/service/ops_retry.go @@ -12,6 +12,7 @@ import ( "strings" "time" + "github.com/Wei-Shaw/sub2api/internal/domain" "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" "github.com/gin-gonic/gin" @@ -528,7 +529,7 @@ func (s *OpsService) selectAccountForRetry(ctx context.Context, reqType opsRetry func extractRetryModelAndStream(reqType opsRetryRequestType, errorLog *OpsErrorLogDetail, body []byte) (model string, stream bool, err error) { switch reqType { case opsRetryTypeMessages: - parsed, parseErr := ParseGatewayRequest(body) + parsed, parseErr := ParseGatewayRequest(body, domain.PlatformAnthropic) if parseErr != nil { return "", false, fmt.Errorf("failed to parse messages request body: %w", parseErr) } @@ -596,7 +597,7 @@ func (s *OpsService) executeWithAccount(ctx context.Context, reqType opsRetryReq if s.gatewayService == nil { return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "gateway service not available"} } - parsedReq, parseErr := ParseGatewayRequest(body) + parsedReq, parseErr := ParseGatewayRequest(body, domain.PlatformAnthropic) if parseErr != nil { return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "failed to parse request body"} } diff --git a/backend/internal/service/ratelimit_service.go b/backend/internal/service/ratelimit_service.go index 47286deb..63732dee 100644 --- a/backend/internal/service/ratelimit_service.go +++ b/backend/internal/service/ratelimit_service.go @@ -62,6 +62,32 @@ func (s *RateLimitService) SetTokenCacheInvalidator(invalidator TokenCacheInvali s.tokenCacheInvalidator = invalidator } +// ErrorPolicyResult 表示错误策略检查的结果 +type ErrorPolicyResult int + +const ( + ErrorPolicyNone ErrorPolicyResult = iota // 未命中任何策略,继续默认逻辑 + ErrorPolicySkipped // 自定义错误码开启但未命中,跳过处理 + ErrorPolicyMatched // 自定义错误码命中,应停止调度 + ErrorPolicyTempUnscheduled // 临时不可调度规则命中 +) + +// CheckErrorPolicy 检查自定义错误码和临时不可调度规则。 +// 自定义错误码开启时覆盖后续所有逻辑(包括临时不可调度)。 +func (s *RateLimitService) CheckErrorPolicy(ctx context.Context, account *Account, statusCode int, responseBody []byte) ErrorPolicyResult { + if account.IsCustomErrorCodesEnabled() { + if account.ShouldHandleErrorCode(statusCode) { + return ErrorPolicyMatched + } + slog.Info("account_error_code_skipped", "account_id", account.ID, "status_code", statusCode) + return ErrorPolicySkipped + } + if s.tryTempUnschedulable(ctx, account, statusCode, responseBody) { + return ErrorPolicyTempUnscheduled + } + return ErrorPolicyNone +} + // HandleUpstreamError 处理上游错误响应,标记账号状态 // 返回是否应该停止该账号的调度 func (s *RateLimitService) HandleUpstreamError(ctx context.Context, account *Account, statusCode int, headers http.Header, responseBody []byte) (shouldDisable bool) { diff --git a/backend/internal/service/scheduler_shuffle_test.go b/backend/internal/service/scheduler_shuffle_test.go new file mode 100644 index 00000000..78ac5f57 --- /dev/null +++ b/backend/internal/service/scheduler_shuffle_test.go @@ -0,0 +1,318 @@ +//go:build unit + +package service + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +// ============ shuffleWithinSortGroups 测试 ============ + +func TestShuffleWithinSortGroups_Empty(t *testing.T) { + shuffleWithinSortGroups(nil) + shuffleWithinSortGroups([]accountWithLoad{}) +} + +func TestShuffleWithinSortGroups_SingleElement(t *testing.T) { + accounts := []accountWithLoad{ + {account: &Account{ID: 1, Priority: 1}, loadInfo: &AccountLoadInfo{LoadRate: 10}}, + } + shuffleWithinSortGroups(accounts) + require.Equal(t, int64(1), accounts[0].account.ID) +} + +func TestShuffleWithinSortGroups_DifferentGroups_OrderPreserved(t *testing.T) { + now := time.Now() + earlier := now.Add(-1 * time.Hour) + + accounts := []accountWithLoad{ + {account: &Account{ID: 1, Priority: 1, LastUsedAt: &earlier}, loadInfo: &AccountLoadInfo{LoadRate: 10}}, + {account: &Account{ID: 2, Priority: 1, LastUsedAt: &now}, loadInfo: &AccountLoadInfo{LoadRate: 20}}, + {account: &Account{ID: 3, Priority: 2, LastUsedAt: &earlier}, loadInfo: &AccountLoadInfo{LoadRate: 10}}, + } + + // 每个元素都属于不同组(Priority 或 LoadRate 或 LastUsedAt 不同),顺序不变 + for i := 0; i < 20; i++ { + cpy := make([]accountWithLoad, len(accounts)) + copy(cpy, accounts) + shuffleWithinSortGroups(cpy) + require.Equal(t, int64(1), cpy[0].account.ID) + require.Equal(t, int64(2), cpy[1].account.ID) + require.Equal(t, int64(3), cpy[2].account.ID) + } +} + +func TestShuffleWithinSortGroups_SameGroup_Shuffled(t *testing.T) { + now := time.Now() + // 同一秒的时间戳视为同一组 + sameSecond := time.Unix(now.Unix(), 0) + sameSecond2 := time.Unix(now.Unix(), 500_000_000) // 同一秒但不同纳秒 + + accounts := []accountWithLoad{ + {account: &Account{ID: 1, Priority: 1, LastUsedAt: &sameSecond}, loadInfo: &AccountLoadInfo{LoadRate: 10}}, + {account: &Account{ID: 2, Priority: 1, LastUsedAt: &sameSecond2}, loadInfo: &AccountLoadInfo{LoadRate: 10}}, + {account: &Account{ID: 3, Priority: 1, LastUsedAt: &sameSecond}, loadInfo: &AccountLoadInfo{LoadRate: 10}}, + } + + // 多次执行,验证所有 ID 都出现在第一个位置(说明确实被打乱了) + seen := map[int64]bool{} + for i := 0; i < 100; i++ { + cpy := make([]accountWithLoad, len(accounts)) + copy(cpy, accounts) + shuffleWithinSortGroups(cpy) + seen[cpy[0].account.ID] = true + // 无论怎么打乱,所有 ID 都应在候选中 + ids := map[int64]bool{} + for _, a := range cpy { + ids[a.account.ID] = true + } + require.True(t, ids[1] && ids[2] && ids[3]) + } + // 至少 2 个不同的 ID 出现在首位(随机性验证) + require.GreaterOrEqual(t, len(seen), 2, "shuffle should produce different orderings") +} + +func TestShuffleWithinSortGroups_NilLastUsedAt_SameGroup(t *testing.T) { + accounts := []accountWithLoad{ + {account: &Account{ID: 1, Priority: 1, LastUsedAt: nil}, loadInfo: &AccountLoadInfo{LoadRate: 0}}, + {account: &Account{ID: 2, Priority: 1, LastUsedAt: nil}, loadInfo: &AccountLoadInfo{LoadRate: 0}}, + {account: &Account{ID: 3, Priority: 1, LastUsedAt: nil}, loadInfo: &AccountLoadInfo{LoadRate: 0}}, + } + + seen := map[int64]bool{} + for i := 0; i < 100; i++ { + cpy := make([]accountWithLoad, len(accounts)) + copy(cpy, accounts) + shuffleWithinSortGroups(cpy) + seen[cpy[0].account.ID] = true + } + require.GreaterOrEqual(t, len(seen), 2, "nil LastUsedAt accounts should be shuffled") +} + +func TestShuffleWithinSortGroups_MixedGroups(t *testing.T) { + now := time.Now() + earlier := now.Add(-1 * time.Hour) + sameAsNow := time.Unix(now.Unix(), 0) + + // 组1: Priority=1, LoadRate=10, LastUsedAt=earlier (ID 1) — 单元素组 + // 组2: Priority=1, LoadRate=20, LastUsedAt=now (ID 2, 3) — 双元素组 + // 组3: Priority=2, LoadRate=10, LastUsedAt=earlier (ID 4) — 单元素组 + accounts := []accountWithLoad{ + {account: &Account{ID: 1, Priority: 1, LastUsedAt: &earlier}, loadInfo: &AccountLoadInfo{LoadRate: 10}}, + {account: &Account{ID: 2, Priority: 1, LastUsedAt: &now}, loadInfo: &AccountLoadInfo{LoadRate: 20}}, + {account: &Account{ID: 3, Priority: 1, LastUsedAt: &sameAsNow}, loadInfo: &AccountLoadInfo{LoadRate: 20}}, + {account: &Account{ID: 4, Priority: 2, LastUsedAt: &earlier}, loadInfo: &AccountLoadInfo{LoadRate: 10}}, + } + + for i := 0; i < 20; i++ { + cpy := make([]accountWithLoad, len(accounts)) + copy(cpy, accounts) + shuffleWithinSortGroups(cpy) + + // 组间顺序不变 + require.Equal(t, int64(1), cpy[0].account.ID, "group 1 position fixed") + require.Equal(t, int64(4), cpy[3].account.ID, "group 3 position fixed") + + // 组2 内部可以打乱,但仍在位置 1 和 2 + mid := map[int64]bool{cpy[1].account.ID: true, cpy[2].account.ID: true} + require.True(t, mid[2] && mid[3], "group 2 elements should stay in positions 1-2") + } +} + +// ============ shuffleWithinPriorityAndLastUsed 测试 ============ + +func TestShuffleWithinPriorityAndLastUsed_Empty(t *testing.T) { + shuffleWithinPriorityAndLastUsed(nil) + shuffleWithinPriorityAndLastUsed([]*Account{}) +} + +func TestShuffleWithinPriorityAndLastUsed_SingleElement(t *testing.T) { + accounts := []*Account{{ID: 1, Priority: 1}} + shuffleWithinPriorityAndLastUsed(accounts) + require.Equal(t, int64(1), accounts[0].ID) +} + +func TestShuffleWithinPriorityAndLastUsed_SameGroup_Shuffled(t *testing.T) { + accounts := []*Account{ + {ID: 1, Priority: 1, LastUsedAt: nil}, + {ID: 2, Priority: 1, LastUsedAt: nil}, + {ID: 3, Priority: 1, LastUsedAt: nil}, + } + + seen := map[int64]bool{} + for i := 0; i < 100; i++ { + cpy := make([]*Account, len(accounts)) + copy(cpy, accounts) + shuffleWithinPriorityAndLastUsed(cpy) + seen[cpy[0].ID] = true + } + require.GreaterOrEqual(t, len(seen), 2, "same group should be shuffled") +} + +func TestShuffleWithinPriorityAndLastUsed_DifferentPriority_OrderPreserved(t *testing.T) { + accounts := []*Account{ + {ID: 1, Priority: 1, LastUsedAt: nil}, + {ID: 2, Priority: 2, LastUsedAt: nil}, + {ID: 3, Priority: 3, LastUsedAt: nil}, + } + + for i := 0; i < 20; i++ { + cpy := make([]*Account, len(accounts)) + copy(cpy, accounts) + shuffleWithinPriorityAndLastUsed(cpy) + require.Equal(t, int64(1), cpy[0].ID) + require.Equal(t, int64(2), cpy[1].ID) + require.Equal(t, int64(3), cpy[2].ID) + } +} + +func TestShuffleWithinPriorityAndLastUsed_DifferentLastUsedAt_OrderPreserved(t *testing.T) { + now := time.Now() + earlier := now.Add(-1 * time.Hour) + + accounts := []*Account{ + {ID: 1, Priority: 1, LastUsedAt: nil}, + {ID: 2, Priority: 1, LastUsedAt: &earlier}, + {ID: 3, Priority: 1, LastUsedAt: &now}, + } + + for i := 0; i < 20; i++ { + cpy := make([]*Account, len(accounts)) + copy(cpy, accounts) + shuffleWithinPriorityAndLastUsed(cpy) + require.Equal(t, int64(1), cpy[0].ID) + require.Equal(t, int64(2), cpy[1].ID) + require.Equal(t, int64(3), cpy[2].ID) + } +} + +// ============ sameLastUsedAt 测试 ============ + +func TestSameLastUsedAt(t *testing.T) { + now := time.Now() + sameSecond := time.Unix(now.Unix(), 0) + sameSecondDiffNano := time.Unix(now.Unix(), 999_999_999) + differentSecond := now.Add(1 * time.Second) + + t.Run("both nil", func(t *testing.T) { + require.True(t, sameLastUsedAt(nil, nil)) + }) + + t.Run("one nil one not", func(t *testing.T) { + require.False(t, sameLastUsedAt(nil, &now)) + require.False(t, sameLastUsedAt(&now, nil)) + }) + + t.Run("same second different nanoseconds", func(t *testing.T) { + require.True(t, sameLastUsedAt(&sameSecond, &sameSecondDiffNano)) + }) + + t.Run("different seconds", func(t *testing.T) { + require.False(t, sameLastUsedAt(&now, &differentSecond)) + }) + + t.Run("exact same time", func(t *testing.T) { + require.True(t, sameLastUsedAt(&now, &now)) + }) +} + +// ============ sameAccountWithLoadGroup 测试 ============ + +func TestSameAccountWithLoadGroup(t *testing.T) { + now := time.Now() + sameSecond := time.Unix(now.Unix(), 0) + + t.Run("same group", func(t *testing.T) { + a := accountWithLoad{account: &Account{Priority: 1, LastUsedAt: &now}, loadInfo: &AccountLoadInfo{LoadRate: 10}} + b := accountWithLoad{account: &Account{Priority: 1, LastUsedAt: &sameSecond}, loadInfo: &AccountLoadInfo{LoadRate: 10}} + require.True(t, sameAccountWithLoadGroup(a, b)) + }) + + t.Run("different priority", func(t *testing.T) { + a := accountWithLoad{account: &Account{Priority: 1, LastUsedAt: &now}, loadInfo: &AccountLoadInfo{LoadRate: 10}} + b := accountWithLoad{account: &Account{Priority: 2, LastUsedAt: &now}, loadInfo: &AccountLoadInfo{LoadRate: 10}} + require.False(t, sameAccountWithLoadGroup(a, b)) + }) + + t.Run("different load rate", func(t *testing.T) { + a := accountWithLoad{account: &Account{Priority: 1, LastUsedAt: &now}, loadInfo: &AccountLoadInfo{LoadRate: 10}} + b := accountWithLoad{account: &Account{Priority: 1, LastUsedAt: &now}, loadInfo: &AccountLoadInfo{LoadRate: 20}} + require.False(t, sameAccountWithLoadGroup(a, b)) + }) + + t.Run("different last used at", func(t *testing.T) { + later := now.Add(1 * time.Second) + a := accountWithLoad{account: &Account{Priority: 1, LastUsedAt: &now}, loadInfo: &AccountLoadInfo{LoadRate: 10}} + b := accountWithLoad{account: &Account{Priority: 1, LastUsedAt: &later}, loadInfo: &AccountLoadInfo{LoadRate: 10}} + require.False(t, sameAccountWithLoadGroup(a, b)) + }) + + t.Run("both nil LastUsedAt", func(t *testing.T) { + a := accountWithLoad{account: &Account{Priority: 1, LastUsedAt: nil}, loadInfo: &AccountLoadInfo{LoadRate: 0}} + b := accountWithLoad{account: &Account{Priority: 1, LastUsedAt: nil}, loadInfo: &AccountLoadInfo{LoadRate: 0}} + require.True(t, sameAccountWithLoadGroup(a, b)) + }) +} + +// ============ sameAccountGroup 测试 ============ + +func TestSameAccountGroup(t *testing.T) { + now := time.Now() + + t.Run("same group", func(t *testing.T) { + a := &Account{Priority: 1, LastUsedAt: nil} + b := &Account{Priority: 1, LastUsedAt: nil} + require.True(t, sameAccountGroup(a, b)) + }) + + t.Run("different priority", func(t *testing.T) { + a := &Account{Priority: 1, LastUsedAt: nil} + b := &Account{Priority: 2, LastUsedAt: nil} + require.False(t, sameAccountGroup(a, b)) + }) + + t.Run("different LastUsedAt", func(t *testing.T) { + later := now.Add(1 * time.Second) + a := &Account{Priority: 1, LastUsedAt: &now} + b := &Account{Priority: 1, LastUsedAt: &later} + require.False(t, sameAccountGroup(a, b)) + }) +} + +// ============ sortAccountsByPriorityAndLastUsed 集成随机化测试 ============ + +func TestSortAccountsByPriorityAndLastUsed_WithShuffle(t *testing.T) { + t.Run("same priority and nil LastUsedAt are shuffled", func(t *testing.T) { + accounts := []*Account{ + {ID: 1, Priority: 1, LastUsedAt: nil}, + {ID: 2, Priority: 1, LastUsedAt: nil}, + {ID: 3, Priority: 1, LastUsedAt: nil}, + } + + seen := map[int64]bool{} + for i := 0; i < 100; i++ { + cpy := make([]*Account, len(accounts)) + copy(cpy, accounts) + sortAccountsByPriorityAndLastUsed(cpy, false) + seen[cpy[0].ID] = true + } + require.GreaterOrEqual(t, len(seen), 2, "identical sort keys should produce different orderings after shuffle") + }) + + t.Run("different priorities still sorted correctly", func(t *testing.T) { + now := time.Now() + accounts := []*Account{ + {ID: 3, Priority: 3, LastUsedAt: &now}, + {ID: 1, Priority: 1, LastUsedAt: &now}, + {ID: 2, Priority: 2, LastUsedAt: &now}, + } + + sortAccountsByPriorityAndLastUsed(accounts, false) + require.Equal(t, int64(1), accounts[0].ID) + require.Equal(t, int64(2), accounts[1].ID) + require.Equal(t, int64(3), accounts[2].ID) + }) +} diff --git a/backend/internal/service/wire.go b/backend/internal/service/wire.go index 05371022..87ca7897 100644 --- a/backend/internal/service/wire.go +++ b/backend/internal/service/wire.go @@ -275,4 +275,5 @@ var ProviderSet = wire.NewSet( NewUsageCache, NewTotpService, NewErrorPassthroughService, + NewDigestSessionStore, ) diff --git a/deploy/docker-compose.yml b/deploy/docker-compose.yml index 033731ac..f1d19f84 100644 --- a/deploy/docker-compose.yml +++ b/deploy/docker-compose.yml @@ -47,13 +47,15 @@ services: # ======================================================================= # Database Configuration (PostgreSQL) + # Default: uses local postgres container + # External DB: set DATABASE_HOST and DATABASE_SSLMODE in .env # ======================================================================= - - DATABASE_HOST=postgres - - DATABASE_PORT=5432 + - DATABASE_HOST=${DATABASE_HOST:-postgres} + - DATABASE_PORT=${DATABASE_PORT:-5432} - DATABASE_USER=${POSTGRES_USER:-sub2api} - DATABASE_PASSWORD=${POSTGRES_PASSWORD:?POSTGRES_PASSWORD is required} - DATABASE_DBNAME=${POSTGRES_DB:-sub2api} - - DATABASE_SSLMODE=disable + - DATABASE_SSLMODE=${DATABASE_SSLMODE:-disable} # ======================================================================= # Redis Configuration @@ -128,8 +130,6 @@ services: # Examples: http://host:port, socks5://host:port - UPDATE_PROXY_URL=${UPDATE_PROXY_URL:-} depends_on: - postgres: - condition: service_healthy redis: condition: service_healthy networks: @@ -141,35 +141,6 @@ services: retries: 3 start_period: 30s - # =========================================================================== - # PostgreSQL Database - # =========================================================================== - postgres: - image: postgres:18-alpine - container_name: sub2api-postgres - restart: unless-stopped - ulimits: - nofile: - soft: 100000 - hard: 100000 - volumes: - - postgres_data:/var/lib/postgresql/data - environment: - - POSTGRES_USER=${POSTGRES_USER:-sub2api} - - POSTGRES_PASSWORD=${POSTGRES_PASSWORD:?POSTGRES_PASSWORD is required} - - POSTGRES_DB=${POSTGRES_DB:-sub2api} - - TZ=${TZ:-Asia/Shanghai} - networks: - - sub2api-network - healthcheck: - test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-sub2api} -d ${POSTGRES_DB:-sub2api}"] - interval: 10s - timeout: 5s - retries: 5 - start_period: 10s - # 注意:不暴露端口到宿主机,应用通过内部网络连接 - # 如需调试,可临时添加:ports: ["127.0.0.1:5433:5432"] - # =========================================================================== # Redis Cache # =========================================================================== @@ -209,8 +180,6 @@ services: volumes: sub2api_data: driver: local - postgres_data: - driver: local redis_data: driver: local diff --git a/frontend/public/wechat-qr.jpg b/frontend/public/wechat-qr.jpg new file mode 100644 index 0000000000000000000000000000000000000000..659068d835e86ccd58f5a429f270cce802590ec9 GIT binary patch literal 151392 zcmeFZcT`jDx-U8*Gy#e9PLLu9B7&gwfJzr>(n}Cfnt*_GFbIOu1q1{!AVpA`^xk{# zz4zWbgyc-W^{utnUT2&!?!I^bao0Xy7%yWcg!#6o{GQ+Qyc0W$odvEb$tlVKI5+@+ z1O5Qm3E&|>gik<7fKNn7KzN0S=n5$%DJcmFDIGZl86^`PGczL{BLj^67ANdF&vgdI z8$w(>xA_GG1z0%miQT&+cI%G7o!>XXA-Zyfl$ey3l$7=kD4x58!D- zaM%D`avTUb4z>}1f#bx-`Qrur`NF}4;DG}sxexc|ZfQtix;Nn5>@$vA$Z+nA( z2k^-8DXt615KyW=BV==+5_}u+jfnk0ULCbY-!8{pQ%Bz`#5A;Y^bDLgxVUff2npZ2 zFCr@T@R6*Xyn>>V=2If_VC^G6(bWH5$ zxcKCh)U@yE89#pJ7Zes1mz0*3*EcjaHMg|3wf7GU4h@ftj*ZXFFDx!CudJ@E@9iHP z9vz>Yo}K@m7Y+dV+qA&nf1B99%!?eH7cL$i1ds6dyl`+`!3H76!@n*-Kp~?}_{@Ql zP4F!d)q{v{d39IV?`rH)n>zLp({KpQaqj(|+8;Cfe>O4S|5r2n$He|)UgH2M1P9za z2sr=;PR_ZLeTe^Rf33k^GVqrS{3Qc_$-rMS@RtnyB?JHSW#B3m18@NQFwH)g!xg36 zcUym0sJ(;3f5r0x#e1Yhxs&-VguLwEl zGPj}7U2QkFE$FPBbwM-+Ev~qh^(IFwAD5e-KZ&E#QN3txbb$8Dtq%(3n%QN>{T2`XDzY88y z)Z|GP7ARuuS;X?!YKg`t&TScJnNs8+Wgm8g_%Pu;b&8{Gc< ztX#7+zHD-4w8@=dwM|@3)@SMpfek9VhE94)H*#qX#w|k+OVj{#Zb2D8(e0;TqYJ@fjA_cxtH46{`B2 zYd5QgKA?1f+Li9O^@@Fc$@R1K^bfa&*Xhg-O>NSlj9u(vUsztnTeq(#FETgDPvzjb*}PGJTh8sm$u3#rg?5u$H;BJ$PD$R{0>?}VnzHY#^SWJw#)Q$szGW5`&5Wui4VNF@?zbEU7+yhmy@|@aR~(g_a=x3R?)keFw_ajrbW`rr*>-nllXM0$5q z;4x<+X$!MBxwgF>yYk0r)109uMYEr>mz(^Lhcz;+lV5AcK)<^EEROfD zU?PpQmXzC9Xl8%dCDQuHsaQd!ayd;)?VA~)tAkqYT*{jK{_xhSA^S1rtBC80m@ zsfc-dQ^U0#-X`WUE-SYFcn#EHWz+QcNmT)3&sLLnGD8dY1Tcg{YA>W2P`BcHt(YBN32A0VKBT(VID)i z*uYejC1g94;Ra6!ijGYF8@0|c0tseFJlIWVT0051=2JL3A0 zmD-iumEut+P&)ThGTfP65I1;ZL(9yv_nXVC%h8V!y^<0_NpIdOcb|%>NKq3L6WA>d z#MBSDa}&;K!Am60aFZ!3dY^t6p>^DRV5&YitP%I^4RujqpfmxG@|O&BHPpBzTNBeP zdh0e_+0J-QT{;{2^3_f8c;#z>4!jdow}VSa5*DbxIkCADnsh7xrs=+e_Bc9#=hJ+H zT|qI;^VX`k_OCqWd&h`kv)R1~skombZx~TmI^-!8yJ=sDo(PW<#*U-q0@= zf}JTjP^Go=#)V9-vGlSWpdu}pokB&xbjMDo5F=t1ze{nRwv8` zek;oN-o8rXpaZ*G{DBoXc%oY ziv{RMHyn)G9FkYa>rPAwVM2~LgAtK*2W$Nm9f{0V*}LZpP5NFHNec8s20uG`9=J6vs?HZI)auCTVi-eM%CWZsbA_(Xx8h0uSc|BXi8Z#>|V)-d|v~}w{#O{4BNB2{d}LS zM5SIxna#lG3}R@kNTqZi$sTIe{!qMvE-<7P&>5W;>b|#7Uy|+^oTk;&hD!Au95%j2 zp3Jy>5ERv-v=Nn?EsC+<(GhPfw*F>DBY6)`^!3^MvnCbhRQ2zx^SC+gkNa(3v>Fb? z<30!w*Uoc#SwW@29_iws`e-U4jAuXNfy|=j8#O4C)(!D%jagM<8U+zUZq)2dTN5qW zGC~<&O76Fd4RAbc?Rxlg9saEU_V;khNN(L6ZwIa0!kntnWOK$g3#!+ql-{hKecv_Cwg`tJss$As{$17*VbpnBV#0AiPK~^$owq|` zYpufhHG_6#+cVDa^JWd=mHuBbEP@_1>q6nFO`vKH{HdC#YWA9AEYR}x?vsl)yqI72 z^S2Ke56z5sa;#m(r6OE2ri(}UOs$wMaC$vazLl`q=+Ks+YSwiWXANOTaObD;NCBbL z*_*SwX?tr;fHM}L!*D;GTjlC|E&3?FGBaP1YR?*qIAswdn^HA#yGl7nkoNPGV8!rj zTDayX_hXk9jJ}+xctQ6vGo18jp>o%?E?APcQAZ3_^h%g};d52Ole-avLS+|?EK?#q zUQpcy07sgI|3b>Gb4Isq&PdL7VB&|3;H92;%c(QV*kM-g*rzl7;|j4;s7jrMU`u_!L(veKdQkGVjgUQPAF*}+)OL@nF;GJAbI!BU zAlB+guf6*NB6!CuYtk@fS++KY%9mN8tvrrIM+B+4T$f+6b2RHpV(+%y4D!tXl_J3N zGs!$dQm`=$USa%8ObxeBmI_e+AK26^lX?lGEyb?pL{olvVicg)9713IzO_;%!?{#x7l)t{*D@II+ z<*A0!Kz_`mE6bRq4*-2;Q1}OS!_^U3fZ@0yo0>(1g9Q&7?-a-_!R${IndL`Bwv1#s z0gE*Tt(f3+nRKh+4SO5htx(!u8-x3b5|N(6{#5_N1@P;{)XgaS-SGB=0bI1J2z3Ni zRwSOnmv(bfCa1HZBD0my)2)JuFq_5aO5H8x>ECRZu7CRY1gd1-PZWmJa9`N?{B;0Z z_nfOMUw=N=9!o6K7vm+p*zivlRNlrKtNfOsV+o&?qe7TG1|6n0XWKq{Q0f++XQyUg z53*gK^nG%3!!hhpBE}Aj-R^+Yu23>e)Ih z^YTM$@yOA;HgVN8WmR!Yr4ezT52Bi$=IWzZ?#Sb^c+r5-R=NzodT4se-x z5Knf-535%aFk3H%sF|?v-Vjy#HD74PW|Yoa+cta^|!VXLMP-?F4PkP?yxS7K%EWOsiSu zIO&Qd7>04D2pbP?ll>zsUS7_xH z6P%Dx%>GcrjP0`y)shenIh{v^`3210I)|43C{wM)FYHLrEJ%z5|4{=z{ZY2d^T=Bv zFUl&cP(nnGHgb&LiFELSmaD&CLi7AVSX4Pje3gpj6An{O#5Z?3p0KPI+>GgU?Hk=j zC9e{iM;rb6b5!u0r2CXAqD$Ed?1Usm`O?gb8KbXsgHC7Ihkg!u27$R+zpoGbXeC(K z{WIC@K%n*_Q}guciY#nL__>>tLcsCVa{1(iIq%YT?JAk72QWQwWZ?ZkQDeq^9}9d4 zfgEOCK2&ux+U6QsDG8J3AbVA{SC?Jkmh5}yV{&uKFS@L7dSojfd?N!3^r`@Ak>kT! z6I>tYKOJ*mfxV}JHnw!csOu&vNqFE}EeR*Lm!x+Lu|VnJ0e?Viy?@U|xb)Sc0hQsM zf{iG}lAOEENQN~}9nm>B|CU&66eibW6AQTVg;fvr9VZbjg0gd}r5>p&&5NI%T=L~Y zQ#tCy;7X(jIMQ>olEW>7?QaV_v(ysTym=|a)srVxU)^Bq^=X#Mj&!)Rgj_WqHi$DH zmTfI{WF+?i@Ngy9A6v4NB%W^R<_+^{KCLSKq(t-V-F5etl_vv7pI?5DFkaTnHuyvq zXQKLY+2_`&%T+PuxEsF?8XE6-)eCfcOGSUn(i>kVVvUHlFnO#pg9V24SYU&5Mh`Y# zef0jRrnz(?i4+?eI{Z|PtJ@R%%5TXYA&qSIxhu@QJsNf%HK}cY^1;BsVzVT z%&I|Ff7`Nu=O(0P30bn?c5OBEQGZks_un1-{6<~tTe_-fC!rAjFE708y^ zGjicSrg|%^HK?u|EDQ4|sNsr@uu~s)I929N#P@k?vK~n5Mj@iQeM0udwCzy)#pGIF z)-*bJr(4-VAn|L3D%VYkI`2%a8wjVXey%$hRVf<60^~;T17;(6En?K3wvC<>4+L>C zzR_A3Pk;Ra#@*7ZeOpJ|OsQF}hClXu=d{nrr=J!(&bi8aZ!Hghyh#{IFhF;m?imRk z%5x$=*pGbv28QY4rS)r;1`C-$#$+6uo8cFi4i>{KqPxgn!D_ngI zH(y%742R77vu(Fh!^ZLD%z5bc-C5G(fy_C?84gG-#>dUpoDnEv0i$q?Z;`!BK}^S! zYliq!r6@AFs}=@pVod>uc@^*5x!~~WKWu|_Q-=LeS{`UthFERS&XJ+_^~#Eh3i(|b zlrKj|AyKDTK+I2Jz>Y6xrM&W!@~s9KfBK>%K5TUDM(T);U|g)I!bB42N1&?RKG&7q zZuBmd#;TSCAvjwn=nh5LLxNre+CWxqt{3>aS9{sX#IU071{M9R`}w3hCbF5;dEC`` z#cw=o?cR&_dU4n5agK^x3Clh=S9M(aWBpAG45G1s9z%xVLJbx$D6$19r>%Qi~}II7#U4h#GqO!6Xh zR3fV>mHnG!jS|q%=gdeKxv^zu=AMAM7D<=~nH(&Cl5YEAKwh{vehfK86S95&IwyRy z7z-TK^It4#OLi5fK7U=}+xkdxw243(%(V>S=rqvAd|zKuxZK>Q@M8{8H6F^%F(%== zr!)25K0+!TO(@jG_}EBll=t0TT-{QHaqUslU9AC*n#vLvhhuY5%O1qOhZ@22;SGM$ zI-FZC%IbIr7jg(!Z=Rfx8@g%0xg(a&tUIO@vTwdVyD{9qZb3lz$=zBUcF?6Wh!$mT zkcjDbzl8f4G(w@0X5sFJG^^Geo3=zo(LzS_d{ja~^j-E7jo*F| z-iV+iB*->koreKm%W_-nO-ER5N`$}bMf-dpSr8VAcG0Dm^YJ78arMD$g zVdm9Tw{_+=w|J1S)zEF+)2==w6`oTz+6S@BrFK3Q+`#I4Bncn$dBVH`TDShjVS=E z@;GIyPQHvQs`#p0=So8h;r*_%)VvtkWWeW#YDfYJmW@J?f_t%1o{f5&Hn|@Esh&~U zvZKA_5Hg7Zj)WH--}LK?iE8!Qe|URy+Xl?*QKgz6@`d?HUg)?k)G(}}i26b?8B1~dzYKmbEg!-& z-iZ%;Dbqq}PnuWMbVZF%&AxUa|H(2I=o3^U?8O2U)!b`RkeKQ5WqN*luCwYM5}-A4 z`sHEUtjEEcAp$Lg3J0^omnWAPrFo<^^o)BCIV#0}d1a$51F{8uZw&88yQix-q}%&? zx=aWcW|6ccmQOkpj*PGuIZ53)UHNuu(@4wk*hPmSc!D{yjU+=W>NE|;TwqU&9(&NsciYHA8#O|Z32aaVt5psGu>7Rv2bjf1kXu#X?3*7F zaz~GH&aUjRyJ|Ck$PKVrM~=P6C}v`T(Fcf2d~V3_-w6xe(9`feO66VVPmVJIWo2LU zsIwkJO1CEu)i*YpT(k~cx1M1EcA7veV4RO!eT~zHY*1W-9rDj0XBKk?=1LkbxmqF( z>#V3^TwNBBW6LIb0~Z@%Ni5KGmZH5WkDbjTEWk;#fvJlGH@Gzevd;7WA$NYasLQzL zLI3R{6^&AvaZQly;)W8wYP*d@#F55X2ClS(`DAqfOpF7LW>wO z?6r)$sf*zQYP%#!Q%872UEOg`PPOLcm!mJi&_J4Fa;Ay(R~=LtinZebvL<2{H$za3 zBSVG%T{%>oT}GIaEm#$0WEZ&G)6B5tJ7X2ku|JVlUy(0gF-d5maIz*}FL`6c=rk~8 zZQQqDqJ#HthR9SNeXUeuMwfRim3kfVLtMN^bXw{@_<$E!j!A4WPiRk8gTZS6E4=#t zcn`fK$w$sUhNF$V-&NhTge>%&YQnD}SB;{u04}H?^Pm#?-`yj`_>aMNZ@@6Os~eJi zq$K|3!|;Fkr;SZ9?*F(I5`6_MF9}Y;Q1vvz&S}j-TIe(KwETiB2sl0Xmk;4{kI^z< zU7CyP-~K80*J=_p{P(9I#qkZy)&DVwKl+3J#~}acZ}w70sk(0`?bi?=M^SlTkdPxO zli$RkXBgD_8$$WbAOmOcsd10K!2B^|bO{*|V_zKyi)iz-q@c}mJixmK9fMg$H(WE(a zkg!)uvnYpz*~8p8*lbI9jGX%1@k=DdWP=|T2p<`IbAON2m}v!adVKCS&xf;85a8iH0(h zukf}@CWUI!0xFfvW~^HLltg{6I?(c5BH}Xli955&8;RLka{>v-{>kil(18)*vHu>f z_3?^fLBSVQzQF!*wn*C}4Fh`SS00D08^*bN;qzB^S%U43hriFCpOzbJV*zb{hZwt+ zj9KR+Dx8BI^v&E4pjo_+!K(wMX2f+DF$WJU3}-Od&_BwzDn5Vp;*o_Oi_2^C+HnQI zf+K1b5an5;;19_Q%J%hg;R)$a}NtRpsQCFRL_>u zkh9{*z1KKv$O6#CY#G$Wl>2X26_aFMekcxlNH;{{*m;FsboK7rFvnLC2lMyqYG~RP zuF4(A6&O9~KX)xx)OZ^5!-XS-_s$~apgFl;T`NoauJXKOOZdy2xxp=M-V2KFifnX2 zvZgMxlpoz#!@Qlk2}!_8vas>tFY}pG;tj_!edBrx!<{1vMvD=;@9pS-vRN`W`UMey z35LEI{sDe?y9R^*@b_Bx%@BDpJI{s_evkEbU^=0AJVHTNHy%vNephW)qlNZDj74@kJqoqEUe3z}JT)aEOQI|c%j}^CF4PEh{>XSb!YC_W zRO9VdtbZTdUWwH5r8@zt%?McYFxymzZL5+kpu{#ja2vq5uOUv!X3$1}x2mzs-o;#Z z-uzMCU?E=#rQVMZ;}q0ibypnoSRGa!THXwGc}bpbWm`+}KM!FbQwyw_8I~&CI@!gsoI58#KHsfbSXkYhf z+ju+h{fVHoR9tekK{zP-J`tq9tn7S3Yo%*{9d@su^VVTl{hC-9@`tlcBk6Llm8X85 zOclU;V}hZ=Df(k^K)GsG{h&I_OhH>%e6X_<7dcsTUV1#6XXog#ONdZ67WhM!;Aabe zm!%^~>NzYxBz1Qkb#qVOQvWiGafdv#5EO5Ao*0w5&x_QAc&}MWnHD|$|0RoS=-Q-8 z4~4kttEk&{b?z(h%{~d@cy3C9%*RC~pir}ryyR+KED+$omySN@ui0e=3w76|n7v*P z7pc0DAPw8dtz}TRs%u+>3UT&b-(!eQ z`*(DTgVhI`zt~jL+nL9R;&Ly}X$5lizPdXYRioc)W@BI;`LxN@stw!+c2EeslLP9=&Q6MD4Qp79{4se z)GlYlVKvql1+lr7M9{`Hiqpovs>F?h&*!$%j0irNh7P6l9V+W~&1%J19O4E-Sm?GHf2?wU@#kVkDhKao%!j?jM6rU6YOE=7^u9Ps`6v~AdBW#n5v6pw7jD? zH$*#VPZVi&mEQari@!G;uC%={!A^Y~Xc)LsK=e?9!~#eSAIa*VJ?N>GBn;z!fdwwk zKYI1$q5!XIpT&)tma+=bw3IO zYO4q^-&FI%2+`0JT+S7G#GWogI()xLZy~@G`k9c+ius9XOA=QR;K2#<#Vo29YRI&Ux_cab@7(*05J`ekAgXKB5f9!BBln@ZJD1L-7)7m0_=UK# zZI(e-j`c4iUAc?204j4U!e^-3lbx`Hk#w}$E_Gt~>qkW>Ip6&Wr}0iPb|wq%N|)+%yG1q&o|+RnWqh`X>?3G)I!{!>@P*k z)3chqW<{k8t`xn3?RLe3MYB zaBfC5h1dP}wG7Vn9S+CYa+?^;bB-ImPnxtO1tJwUB0Wt@XIRj9Vk?0;&MK6eLG8RP zf?cHRJ%PHBO$wW;B9d1`7I1x}9CLrvynvqF*hh|_YtD&v$gu$FSQfJPO$+8y($nIt z7w5CtDqKmf$AW?Zf(rZh9*0TeSCx|ya6?i+n-6Kl0@jE#F729QO7Mh+YzN}@$OQaa zCHwOQ1es#$Okv2TDXLwjFWA0|QBY zvVa?W%K{6qVgWKNFro+EO{YgPb2B#xgP~_3FsgGMf?JxYNKnaaBgQ7q&O{LTnT7UBO}R z_=HRSVOx|n{NV;@+)?GQfSP?Zv@jNUrfh%7P_#C)+f`n3Nw|sJ85ofPYl3I*x_LGZ zmlv1(X!2zKa^+hPzWL=U>km^;NVoXMyUnt{74`p|?SgS*`e*i^-Tq zBy;=?rBbjpSE3Qz4fRSByXS`Qcv3gD2*2ZS4aP|3g&q44pUEGCa5VB8{L-Ti9qMg_ z00&UB{}kNTTwQoX5|m%kGU*76As2U5(ShT%mwsEINz@o_&9VoJe_FqO134zpKFadF z{9Sue8H~70sucfj?KrNdXSl9NNp-hz{b!@|1S+NXBnfJiTq%!XP?pC6N%+5Ms^63t z@^6%w8exgLF{q_~T4Zm~B1eY*X^eL+(4TO&26aCCon`ui^hx=^w<$DqvzSraTU(SnKUKp=YmyN*8*)(U|e`gK90mFNg$4)??D(ES&8zIioT3Tn`PS^kE3Ff84ghrGBuoC1yX%78pqwE*HK@j3e(A^OWm5PT8@0sRirETXOpJG4eq z$`1U2Mdt2?6}Mc&8GN6-(Za)L@i8HGjtxf}J$I#UcZ=0_32g9>*;l+4F%}(&=Sgl@ zw{@5zVd^8bvD<|%&kZ=Nl3P-LQCS!?FC8~d-zxy&NO013_MhcHBlm)10h12K5K)kMog-S5PV$39h>u+C7Ue?jwKdEw73kx^7X#U>fj6eC*kw6FYA=@w zMVNhRkJ=zi^0m-2UJu$~k1% z-aEf91VH_sLlnLbEt;Y@?~v`=F|H^1fZrHd+r(+g4GvN;pd!-~8zG^vK zvnvG(VinhM7DzC4fHB4|BxcZKF9^yJ&81#w7tBeu1%^MoYN6r|DUrh&g{xc;wJx#anOS_~X`+!Y%jO9l_XmSWdtUS#LYvbIu@~i>3qJR+A2hGz)fuob4%2 zdEA9F{hayUEl=Hec2NFK<9^~iyGW1b3|IV_Ty^wibj_k7XeBn#pr=etg3=DZ;c%gVhx*;V7-M!MbJu7j z-;^P&a(b;sI1XBB;f@*#H<;3}Fb-W=`G+`(JRb$qvX<(*Mv|{)An?UcmawB{)k}t> z$v;gceF|7moV!I0%7hh$M}V^M`_9(~d< zM>j6aqb(9l9e8l*{HBe$_I+RE;*;K*O&Dkjl4FW3A&_~d{D-sfYaJxz5+2Vxc(2!K zu$dAG?*X|bZ#cj(RAP_+(ihB*XzDb2uL6H_qW@spFCNa3=Bw-uNJr{exD*){Fhx3g zjx-$|+t=luabiG(nyZfP0$hZtTh8O)Q}Fh*j2!r28)glxdEXTV(E~FW8(e~3&-Eb| zC<(vhim9&2{Rp?%IpZydT~;F3B#MqM|C90kXG8p7z4n5D28GadmlC7Y?T-QB?G2?1 z;%dp$E8qYu9)R!@aY@PP?Jte*7QTf5gA@#Cs1X{-RqCywd3X`%C(dD)-in}~P$TUA z=f0_Hb}umUJ6FM6kj4lo@!2NYpo8BzgH_)D4XzNE423wym;b#H5(vDUwiXa1?D-{u zuT5m?2r#$dd+RNy6V8(9^;;eWdMJ8{i~&a`=4>8bXd{JGK($yBwCt9b)*P5x;S;)a zRjDZR+6p2mU395!tM@}C47aW!@U2FiMrZ1rE$-Gy+-4Cq6EQb&BoaQ!{$mFmQR2G< z?-xE58(E7&3 zxcrh_zd&#BZG7Bz91_FqtydK_R|ln2O*^A zI7XLLI7_hIo;M^Qog1Hv!2Bk(AJvYPdrx@@fVBTk#tj zmA>6m3IuwKDIf2=TE-bAc^nC^cyJUbH9Xy{9qSh4?0!erQU?)PYrG?1aCZw z9-cNJj8v#Q(~IuC$v$ZJ(pv@a7UNEO{r5-5boTCrWQqA9Y2-1InpP<8USZzoqJg5Rd&hJ8+Pt2ni;h6s+0&^0 zY6dhb2Mv zB@Hw3y0dHVE5x|bF+;R|%{rEmMwElrVUmt2^3^@>snMMg`{80QcQ9y?*(;y8T|)YH zmLs)SM7~*S`<2_ysZKYnV5qC9q{)bf`^D?9HLf6Y&9`hHjC`b09DksBH+}-(kSuEO zu|SWC4oG%^?L*E?-5ZL2FZxvEi`6`lkg|xyXg@`EmQ88;+F6HQ9T{1`eBGG6Pw2~A z`%D`#?{}Rj(-x=K5qTO4t)%kwcoyTC$a?!Z_&LLC1ey*+piMfhH57I^{1TqIZO(VQ z*k-!uEJUwswP%2g2h@6b6|$3NQhQB?@T0s(W7*#Ofb84POUm|WqDc1yVkAPX2>O6n6`6ZiP)pA1GHSY<4%rg(-3g;ha#% z23Owqc4jV${UDV2C6J7QJJ;0JY?bPsinWT6n)HDRcMx}-s|zbn20cequW2>0BEx+h zIi7aMR2Erp`n}kvRUorSp44ggU2r;B;MGOhCn3@nhn*N8(E~Vwo~od^z#vvG6r|I^ zrQg%Cc)UjznrSE`_9X8alIQku*!U8sNg0IWri6Kz&0NTc>TdD6r|O4l7EdeV8=j}8 zrlG#-3A{ItStAaUpg-TRyV*AwU+=qjns1q%{mevrZCN+u5$f&4TU5fxm+$~M;fEeH zX9RL~eg@48Jk{3h$z7BM^jBKFSZ-mVm7tCFSDfFTxt)9^HkR~<}q!9VnrT3(& zhGAE|L=h@)I!2tjW{n<*rn$41R6P&WU1ZzTkVY#C;cx{@F`)&@_o$be3$mNO8+T&N zi#IZ4$%a{(HlIZ#tn6i7@CP7>FcUy#cI=ib z%A>!dECdzV6Jksq6;Cprew4xU6~%K&f>v$W&KLQL@Ga;jhdxN=u2mB!jxVk8bB(2) zAw3iiZku+Gr@^0G3IppIyaC(_C4h&qBGZ%6qO__sXqu?mSTBJsr8?Ix5D57hHy&d#3?IX=e81B@ygB?OD?%ri7(h=*$^z92=0uH^3;p`)3wk8TlV1 ze&tcJ-*6Cc5nBh}=dwf|LqR3(Q#KF|olQKWEQDVWf*5}_Eju1q`B&5xeLT~cc*cd| z1o6624<^X^G}4FSpPvj=-E|7GI2<(%VBhQTxZIG;%^5$$zA_q&rHoU~;(<+TkYyWG z24R;LpDg@55D6kNA1Q+0gk8;LV$EXCD)M*_3#9Ikj2!I~{wpB+Z+i?8)aUmh#cd2O zvQ4kIW+oO3JZeKc(`l*s&x}C^kV}4<3D)HxdO@hau6l`ERbDB747&daBnT68f55?< z_*wlwuRsX~hHSl@tGOTsf%fW97V*p}NW8G01c31JrVSQI(e{y=`90k>ke>wEO>nZK z5BM+fH(H8Zb zpNC@s>E9eWc()IyQ#O(_^;<5yy2e%#r8Cx~3AZvj9X4^`{$BP72>fjQO>pN&O0S|eqjr_a6`c5@NZ`xbWm=&3xxg>0@=!G z#)?i(PDCtVKth(6x1Jiq^mc@PVTf zVw52*S_t-!&O3g0o?^u?{UysveLdcJ(n(LDqOq(NQtdPVYT51&Z8@G=UlG>+p#mbN7%@i)78X-gTN@-G*=dw2Odjc~=gMC^&r-5Ij2?wC7$ znSMaf5tY`6(sZ$HSA?l|0Un9I*-J&cZ-VWe7-fa--I3>bc}M-7HIdZpaFZad9=lGc zA3JI0%~sZv^j=(4G>E#itQ?&sX?DR>{&QVp_S;fMO9drTrS!SUHW8(CSM&t1+}aKQkZm`+>z}3X=Bu)=pPeuLM4It97mvwI(ru*( zaHT40aGRJ952&QgkbU;EO(OQz7j%VN!{-*;F?RY*$xH}_fwm5D(4z(B+@AlgivcUm zvXI25QIGdtgwv#_OuGM45M`!^IzJm7E~QJt&!Jt2&N@Mo2)Ayrx~E=YjFyQidLd_a zNPGpZw4IRz_2KFMY9o1cNK^c(@}P@VVL>M5g(N_I+_tOZPak|g;I*k@+l~6$$i4B% zmIem0xYxG}r=ThYrE1bKBnk_!poq_aH5u zC@QSdPk*E-liuE&xYhr-U65UqB!jEh?l~;f+sW8zx21U+e?*uu6Gp5npJqfwA%rkf zb`lH;2~G5*-?`itAMOcz!?wyov`?uUqaE+G-+=qx3+c8Gz(N7%LY=6@amqRijNW?9 zDOnBL$OT2R7Zbi9d243XskM!fD)Oe>7(0B6^n}Ux3VNR#vY7bcxP%5JF_Kc@KlgE_ zbI#Bt+TvFF>Qi6I%!J&sA{g0fd{b@d(4mnO#^f7b?7bhn_;??2Ocdxj7N$WZtS6u2 zKc_B4pn+g45SRoNIkiYbB#s?u7F=-5SXLHR-jV&a{Yk>&Mc<5M=5nldXe&FO$HX_Nz<>K`oB3BG@sT+Eikn^gviKK%=$iHjc|vNQUUOLb z=-UVlw{-3PLrLQ83j=c+_2x%E+Zj9Y%uF;-0@uR3eWd7fQs!(6PmtsilFWswyqsvk zUor`7r7yTfxj8O$et|RWJq4Z1gHMQ4F3^uLtiFM{fjEBn$2|V8-QifWDC8@D_38%Z znlCdZ&$k^CSjZ$Dq?u^944ylHu?k0~$ zo@vGPW^fH|z4X57l*p3J$UJ`srYneQqO6nV-g*+z_Pxt~XJPW1O<|XXV%FN8WmlG>E5+Id`RAUBr7_S` z3~!)#yODEAXzFpvr(i}PA4Ks_an{drNiMVaZgUQJ!JV0&^giTROO{Ggg+-sg6kPgw4<6rpLYab=d49U!H${arQ41)i|~cg*kr>VbW5X-O-8cN zhUuMG&Qq^i*vp{(_zTK;WN;;H?$kEB(#W1BWm#%C6JuEBPj6^LeL=H zpFO!GR{PT+;nr+^u@RG>XG{+qzT)*xk?2e>wsR8acoHslGQL#JfSsY>i_*89ZRMn6Giq>6eTH3_AS{`l*-O1$sR(sVJuPhEtISwWSL~kPL?6EN60#4 zXT~xHW8UXE>iyZi-*cUFuJgUl`ThRrGFKNf^LoAR?fJML_v4y8Tj4Y^gKKj&?Lyd;h8QrIKNO9fFpk%o;2k=t~q$a|F1Zs&` z&uPTop`W1Od5;ber23nGO!+DF?*$<7>wlDp>5Q%K()O>{gO-u*z}>$fxrzJzq+FUw z+y50P5iCgWe~0M&j^g1X^<>q37`ZNsLXsTK?;=YQ&uq7*vWLoF%tsh%JPwsKf2Vy} zUm$$f3P@QfsAaPB-kKC)Z4zo7=NM$mxNu`wqlp(AKzDeX1x`B6BMd-U1gI4oj?@#A zAdM|-g=@rXa-frRt*#6dQJ_U4j{3qh4I~S<&?LxPN)%_{x)VAM4l#T!^_xOV2qKjv zkZDu@Z{Ew1qyQe_UK5IN61-P$K6q4z)x%Kp`T&2Yb|jyx?hdS{N2+*EcO1K`4+#0+ z6tz>HuovBKUY%FD@Zn>7hlK!JgWMd%&qtGjSH}HE*NSE9zjU^g4*ds*0y%-u@NK8G z)9li=w&0+h#xwD!nL^EhiyG-BE0Y2ATGS7Y4QLPa+5LAUzI0S9s4CP>is#XVRRd}zsRuoQffLcnygY89y8=@%(pyZZ;!+Br zuf&Do5#ZT@s^S`@gHCyN5}T-|C>lEsb$D~dLH>Qy!a4R8#LMr3$7=g zrO%W|(=zD(bhKlt&~RKp&`4G?<_MHaG=R5u9Rwh=OCAs_+v%$KZ>WeMrgdg`u+?cCs3UGgoHv1pPJHAy~e? z(p)OLIoJq>ti}x^*YX1kUYg$pfXRqXqt|%trL=q-)+isAtj9-A`)hqmun#^Ag(=yQHRTT?oKVW5e7)h!{p~KDyrsqXDhFAO}Z~48gQIfLbR?PyuE8Ld}U)gvR{NUAK0>{&O{Y!g6$OTR{AC@MzzAJ9(>ch-x@!bsy=MpiA zRfUo9Zn-127kMO0f_@3!XSwnD_Kj4kxxmyTjhHl$?qLV6P)|AqD*WaiXyqT{LTzoP z?H=;>@kA_dOvxL68E32Xv4xei=YB20&|7szT*J<>O{QsQ*1=d7T_L4^x_5vuVFL&g zYyad+a19NlmAXGKL!Gc@JtZSiG4l47Cy&92bNYHmMMD(`SK5{ql6IEJ$0=PjH0Ywy zPLOUvTooaoi4_?>R=~-a^`cfp0E{n#*xh`w!_iBmpVVPi##0aJ4~XX%%5>i{BD-0WarB#98Z)%-O@5C_0@;=We08n*9dUqknec(WaWO)cv24q2$iM6v z@i_8Ks)}v?lN#s_{gIln>f#|9o;Pcec5`$%#W3Z$CXQ}0M2lizbo}t8OfbHWlG%k4 zKoE^`Jcg=wA-zKj0k>&FoU*N|Elir1Bs^AUPg)%8Q6p*5rAK5eKG0| zDUGMf_X(p-$6X9Kj8Wr73&epPv3KYgA;pkMqP-ja3@ z+C?Y5-)q&L1wRC8U!spkk_{r zM+6$`P7lc4OIl6L(a9a@W>zWx&SA*(FGkfJ|0lbY{lle-$nA=F{4`?Q0hL^~$?btz z?wvYj*F@kb$|y9fjkTpa8_fPHYob~t;St}3BTo)&>csLUxz`~g7ZPv_5$^WTvuajLHWQdJ18uDIU}Gw5C0Y#)vlTJjYBiN4LP z!SB6+;9Z(vKt9M^)HQ5RpD+y)cr@r#if^lI;LVllHsOLlP~vi90pKx6e2Vi1An_r9 zcOGO*N;}j7M6^a^J@8@kUgHVebB_{oPz3OWgLrn3c)NF5L3j9fw4popf9suwhU@9- zXA+4ha;^G?#5y?y*Wa?+nx5cnElKG7`R2)_wZ)z2BNrQ7yPQOQsL(|%)7`R4*(Sp#R(e``x3{r416?WR3{ zK~}JTK{RL4==p!8J^#OXO*6?sDa7jkofvu_zt*6$BP4Tb4}|^sH$ux7AVz-^0{-Kt z+^0ez6v3NaEFqBUbvn%wK&N>C~UV zq+5N>inb@=2i3(+{pR&oR0Nj)j#}oq){++D?nAZWLs^+AB1r$S9-Z2@A|a{b!UPzLfeu^1lpa(_ocK?L!?)B=sk*bOy-$%ST5z+3(c(^_w9Y!QedL`&gQu3LT{Vi z`S*|2V$|^#SDEj>}MTg5+SG_cG0ErOLSqCV+OEd8d?>*&TxHLIz0L7kn; z4oIQYkYriGeJ^&PU$$Ib{z|1J%7A_6tA^C|hj@bRH zlkJeQ0SBG6DWw|@{L7XWL%bH;Phs}@n5IH+3}sFePCf&^pdN+p3fbjyedpizTNetj z8zZJWbPjpgni_2x-K}m9;61W&mN#D4oAC%gf#-A4>!DcRfwXmP2h{S}Q{=D&KdE{^ z!LtB-X!ss!tA_~1w&P(Th2kl8H$%H*TaPzgyx6@rt#qH4AP`RAw+c_%sToqiO^|QE z{}n+V1DNl>x-|{F#cL%kxRm~zg|5|}#Jgbgz>Au^f^a=vUx_IJLd^HZ9annpx(#_R zcML#}`MuWqVEclRb;{|mbUE3*>+S9Z#-mF~_gE7|uMVH{A08HKkdjyP!X|Bx5P3$) z3gF^B^e1R)A(tr0`X=ng)hJ~xRS4ICJN|o^|4nxUd`kgf`u|3EoIRjB!vCf_ezQ0r zguwnLggo{;dJ{%@*MB86P+qf~{0p)OS`My%K`tcxeOLsnLcq%V|C&{J_sxxz!u5}n zfw$7>M=s@iW-ca-M5c+UxY$o$cYVmH{zR7NxjUDB4;j9nQ7X{S=Xl)Ew9!k-**@Sp z(&86wO!%%#l+jL6te?Vr5SO0gtVkQ*Q8;!KMSMO(j{Pr)+!lgF2bfWHhJYXXzv1u5 z(@)PecSP}+W|x&ZT4fnYU;bk9%c4hP(mC0c3o8&8YuA0=I-T}EyBO$X%JiY~4Whd- zzd2_XW*@J_Gj?!HbU_>^sY(VRIrHoidN6W>G3bO?=948dxdL31tX)!FS7HY19yx}S z6|64GDRrcN8Mz1}J@Zp&Kr0g4arheVcgoy_!vVX^H3^^${N85FuD2?~z|`L}6l{&) zBw1O0812qoq|Y#I6Cc5Xh5rZR~!H`tpnD4_em@X~bhN@cv5h2Y1ZWaWvDcXsnRw zwGPu!XO{=XgUgV@GnMv2f{;fRxvgqaZS7lM0XMj$Ml+8at~B6wcIM=QDBzyRWj_UPOWfaYdyE|nCit{ z%t)phLr*)cvWXY-NiLtm>9C?)z|4c$B1U$(gL7s*uMtLM{drbKC3EeqXh2=17X)GX z1?N;?HzEGwH2!vJ<>7z>G$E$b`Q%G&Z4Gt?nZUM{$gPDfP>;ZU&n%LeSUH{r=#G1r ziE)-wy?atl%Mc%gXI2&HL_(fFvKXwh9#XVQJC4aWxfJ4NrI1$h+H6`k z)SmwW8+HvN>3s5M#t>M?2*)zxuxf4%o53Q?*YZfg878!nq-d;8-l`WM z-f%FHyM0AO1rxAZ*Dp$5P6@fQ6p<;h0x-*xxT!ZdrJA(10fnjVOaI3%y@&PR#jT&CiP)3cxd$?jr0sX4=-u0*)97x7 z%ec@jcSL2NPtxV`(C-~D9_tnuL|>+kYgp8fW^1g(Cr%yO4wtYlOJQ8S^6YEztz#Vy zw?0T&@}5YuNS@2Z$ZaC)NMmcuxe+QN4@?@=FST8>1AP0eNv+Xb9NM)k++9&~jCLUq|kQXb3ak>r7RFCr8-%XJS1zG3*L& zKxWbJ04}B=O01Q$$TPCu&b(P#{ANW@5UVM9@?my|Xk4r}1LU%_v;aVJ{&9sIlq*#D z^6v9m5RdR@d|IG`o872uz+{OkZ}_a?tI3btB@12hb{|2*OLjUZ@I@q2_*_B--H6A` zspE?GMK64$s(oqEA2alWAkj#&!3RwQkT^zy-!eG5%S{goMB|w(E$ZRTYU9wG>t5!5 z9yuhjAu!Nd6c4B@$G{0xUlX@9saKZmQQmYHe`vPg*&uV_*TVcnPkyyS_huk;o&u?_ zvC>>n(j`X#Wc4%&kZDvXe|87gkA;O@lCyblu%w@X zO&v-L@sT(Pvn(6vR>v^kbA3ol=S=HU^zW5(rr5u;s=ax0|C2j41TqPc*A(D`z*X-c zK&AF&jxxZgLXVmhhm%yj*BaJ1YpC|Cc%HFnJglXfOLk6G;ufUo zwWjdCE+bv0ejtsf>{ggP&1kEYXmitFS(<-!@{0H>DjSI{L@uMO>e=Q(HZ#Ex9OdGN zdf-eXb;;)>_PnKpp0mw^FW-qjy|(Ipj~1IuJ&a**6_pw4%)v@V{fL!Q<^{w}N+f_R zlH~W~m*xyA$YV7n0gcA4g$}1Uz%FSNw*d)y9vgt&s&YUbg_Q(K$yxPH9c8mIY`<&mc(<0*#2 zw`Fmf*qEN3G;sUDJnt1v0Q-S+82%(UH?Yi1K;(S#Jvwv-9NfOe{aje#q)(?V4Lgmb zyG=dguy`dVQdHCjls*5YfI9En_qA!-wX?%-@Z;nz4uJGgl=GE*#gt;}CXO7Pq!{|J zva-BfS9tZ|&di;dmp@LlaZS$oL0V&Y|48WU>&}14kAh#fdtwKS~`j*VW*_gJ` zaUXb$8}OK1P=5r2`ePl`AKak+*m)fKzsE1-M-yZ4zl3#e#NEQZI(3}$xsdQrOh@eH zrn*9QyEYf}!kxGf+wHZb%DDVyBVX*Z?G3el$AwU#sZ|})bnI(bY}fO|0DBXe-X0N`oH3zvTVA8v9lP!k=$F`+kBO`CuzMn6l6mP!#}dN z7lS&`CEBYt?JFp&y)dUeP&ha}ID1v&xf^>MNqL3`lZ4;iPZqz{^5Uh)y%YT%+JZE5 zQgy`L(7p8<@20;XkQ({-i-he&nV{C}lfpHlH$$0KMxV*}d18EnYl9C)0Tm4QbLwZZ zS^#KHtltzs3v{y(PBo_m?W#xdS0pWJUPSZm`1T(S=!S0}mSaj)&zZIh2fk_F3or{s zA7G`iepmj2JnANbr~==2sUSbq@4;q2Rb;H8$@Vy<72B_KOm0KraaA;A-JFz;O{e*| zyCpIBWcwrp+xJlc#KgA1X~jJ{Q`G$X!tN`Ib@eIgE{=!B3I>PnTV zG_MLlGO79qx$j9^yO@5v$kDP=qdk+Q%n~4C**=3EOu!IadkWXgljihls)D%0zVv(neCl(Zyi%*`^0H1AZ|xNZ!f46eum!O$)yPA%6MO zdWZm#%e)=59^8geO_K8yo0rD{gpgGJ`+@YnANE z4F$3v5w*epnnqyVz$N^Yd1&>B=xe zTsne}H~0edDWli`L*%bowgY(xXj7d$7GriA0Q2$PmkND9cl#H&fly1#=lPrQoxYQM;Fnwnmx&VYqaS)%xy&Q2}q0 zz=g4Cr^EUP=OhTx!>#DW6q@u(2PF1C(+S=Pl2~vEs3cg{_V;f5#OX8&(|p+_$rRV7 z-Ax$%6)fF*t#yaZS0-ju9wx5}@hYT1aY$!;H~B_+JrDu20OqENuoy}rsSxGgd35NR z-pYy7uD)=+`)J@CmQ#zSRf(tix&C%H)HY__};Ax72ty-zy}|j1L1TF zMoZMXg?sXX@lCeC*G$(>X|-b5L*148zzljq&%ecd=&eQ72+}-17zB?p-Bku53X6_f zTK(pDfbC*U|GH$$1)7LwiAS9-S54-Tk~FfdAk*g3y}8Y)n18%m5r zJV0s?_1_T}t)D#MUUtl!{P~5-<^t$qPU9=Jn6`bH!8@ z39C#%k6A!X;>;v(!J7y(C~jTUkgf6P-RL@ zgmV-U!f;r_`hEdByIkybP5re?3oM$@;zlWX^*i5?_RD>;vR4gKT0&-$_Gqvl_Xq}WNBZXE+h^)i> zNgmh`!?D+g?z3NX>CvB{$W8p-zZ8#mY?0RkI^%dk*|}`j4WFb{W9tgnNdB1k`|3*2 z3SF=;$8JT27&Xn{2#+KweG)oP?)xmo$HmSTxC5HiGIYN2TRF?yg|OPW1c@0D#SE_ZOnkfbd5EomlhbfjW!pUUUbe9uh>0um4Q)a+y; z!*Kh?T2!DZ!5aY(Tizi2^>wE8u6 z11DwfrDf2?9LfGwhTB#jkdO}FcDX+OC<|iD*l;3}NL?q4F0MO*;YZPz3TEl486Zvv z!$cLN(isoZ=6_pTwF_G~U4p zcj^y`ny5c`YF;t^)FS*$boy39H&38HLWE=2m|zsCiqp>R3};yj(N(AQwCl&G!&2R9 zjrNSbUy|U~U05wDLLg)7b$y`efSd_&lKSBEQ_E52mLs3)MSE^;Ms_x?0GNe#f@Gw6 zr0OGAe$kJOv~Fy<)-;Rs-7Iz}T$fcd{E2hEL{+9Kgl5BukKZC=#A~Zt-V^=WFzkwS zyFj7ODNo1C`|&mJgYU;J&$nFV@+YLbMBQ}R{8jff{g@)-;?M|Bi5&{KblCvo(JJpP zAnOzf>Q;o?P$TP=njvR(X7R{X2*soO>bC#K`1;iUO0oZc@S0)8&Y*vj`>LtBepO`O z$mv9PIUfiY5NK;J(jlKx1Qq-)Tijm|D&RBILXrFV!--8*ywJdBJ3RGgkrPUAdk;N!L&x)nDBg&5Z`z!oUYx z-fS5kAo7NxxJV~o)K4KRbX91QV-MAE=xxI|_hZ?T_r}aRy2$MGW4jhlek`wSKWErE`ssYIT-Y58f@x`q3dfSjc@n7Rc@x#Te?K~ zY)x%aZ9{`p><7QjK#x-%%$$uuZ8aKL^8(+r}7v zlSzz}k6gaT^(aVJsr#C9GMB3aII%A`EGzKcc>C7QKr7}4sokI+&i;0~uAD7lq{n|* z?iFZe0qU;{h4hvYsz{;(+=-V(71zIS?mONL_zQBQDspwu^3d6vlVoVLPt=*ZYNnLnth=U6oU`hdZQ zHR!hG*6~KUMAMnSX7thSgtgWwJ|3S<39)Dwh`l8V7!JAXA`a3_CZi?-C0Q?<)_DVo zLnkMoa*!Q?rsA1YRNp!H9w^1$3*$1cJ=ByvOPgksRkU%E3S&LNAjkR6_V%L8<3I-e zXDe0b8DbTh!XME+6qV(29&j;@_@hBU0qx9;@w}w73TK?LI-iaicbF14wjIdadGHDSk8eJ7mDoyR18{X4|R6gO3OGU_-k!0?_ zR2+HM6s23+g+;xx-hFz?lr(|kkB>PkF2(^1?Wj$ab*7%%M>=*4zs z_sK``IdHeBN)fuvoO=c(0krpmQe5Vcsk^R$F0jQV3G)R=T`Ugp_6%MJiO%Fo{Gse`>Ntl94pfir$hKc zsLwD!=pZa-g%85a{NdIR(af_ zVgvW;1o6RvQyNoo%`&f^3CXgAi9QmJ+90h2ttI$5|By6!DlOrHLl;0;!TsOw=}2QK<_h3>4p5h*n`t z&w~VR(S-9n*KaydoG^5(@CbSO%=6hC50EC><@h`9e;N#M2snL8{0puE)#6TIM22#v3c&N{RLT=08DzJ9HPDNN!al;;3Ev#O+CV6N4ac5NR>_+(J*jnV){CE5iM(r^%uW0q*=%a830*_G-^gWh<+5*NS zAT(5SdOrxDR zd20-VX4xo_l}&hjHl$r3PyCCE65R9apvA3cuNnzVr#g)t<7XY_}`wN9P_E2QzdMXQu9* z@c+5FuqXs2pOW-boPVJ6J4mCC?Ps$`HMhTtD|2`NOh#fBWDLm{a;m>={+J)s%_t}4+cvzmbK(J?%ZIn`D&y= zmkMlN{ThrH=b2xIMSUu%_7J6+>nsB$i>hTtU|M^-a=(z2JL zzqyGpwf462Y|PUO((W?8uM^F4Fv#w_`8*#=%hU)?Qa?vlE8i1Qx8iOOitv?`MFp&% zXqSi3#QON1tJEQyOuzt2`GE4?Ig|>@Z@-{JxFWdWA&o?rI+{tp8>mf81d*Jlx>;|@ zDZsyc;nBs z?v4zqm-Sb)yPRh;Xi}T6*^Tw!_7F&y*h*n(K;9;1zxSXjbeOGhxVDQ*9#t2-7PGNY z+_>mL!?m%X#J$fl3Gj-(LDc3&6V#+R<+k+JaUiev%X#P2lsoSytTH2))SNm4<*qu3 z!iofGe!;_J6f&|%%7cC6?^AoXepXyzmhwmiv+v#kihR+Ybd~ZE`yhHFhXxd+h;!LQ ztL766SCcFjgGXl)?j@foNNWxZOmUjpuY9R`|Pvs_MWn829ocgiZ|A4RR~f2=5?t;x9Gg{U7r?rKDp)@He9|UMVhCX z%a^6kU&<{k!kWk#oT|`f0*%>P#ZjdEP9f3N;WEw~g1)#pJUjm3}#B zj=J!)&xh5=%~xMe8Mb=bPfo&2+@@>R$Q(xCK7+~^cad_R{r4`-j|38nmJC@}#fc87 zV>xmnuUih^S#%P0?hoH$d9inGfg>of*?V*;>~ZAK9j0~%!P!Uj975*10n*%FAmddb zFB5a#6Sr0!Vt+1dn(|>(zHqGCAy$%G{;>REzG!vY zrx%iMYl#&&tF5kH``{njysJL}COf!sFEs0V$lTT=zIimpW37>9=UVT&hBt7zO7Zbk zsF=q8OWCRF1U~`RCe+b@X-nbJ+qG#I9Wq~?j#ON`Lh*rmh4>~^lNMm!^%N_T=?>Td z({-1Kd6|nc>QQAcw>b6vevWmx&@;tce6sxYtNLw}3b66Ibg;96MjO8xclLWS%CgG8 zyxY1DYTdY3n9L9$i_PULdzvaqT*~(NZT#Mg??-13j6ZOVcSqa#=k5&94|si5aT_*^ zFd%nVoy*Sfpfdkn$kS&WmiSrY=Yw+5Ty}1CmBwF{K#$Z|ahKH!999}BE#{9N`psVM zp{@pQx6VDd$n2!%Ydu=^aak2&-wT#+$YBT}y=BK-rsi**8`R1Ze$qF6PuqZ3oz{O~ z^5ep2YCzqc^-)E)`U`ugR-SYHq|@)d<=cL8@IQQCc~D*cq&I$t+FL&+ZA7oTBKX{& zkGu_iMLOH-b__917hdI45C`Bb&NnaFj&NucbT)6&z(X3iV$4$Z?ph~qGcR~GUo9eB z?G--(mI)8P6^a?4S?ZT_D@ttm3&QP+K0ypPc5Y4EN4UsW!_u=UzkOJS;Y5%3q&wPJ z8ZbtV6oVC_s0#$tu%kx3BMZXz*Ko^oy+Zr`n4k^A=pddP8aY?2ysFRnBrs}j&r z;Kr#pjhJr5mwa=`sVQ_?YL74wq>cQHjT@=67n~S#{8~3a48r=zHte1s5cvz@EfgDV z{9>eElAR{7p<94UKM}yiH-`VkK&2dqN7V;J6wVqVMn|eg7CQKTcB-*n?ybrNk)I|t zCtV_E@8*0`xS1G7(@6V%nlpnFMv7f`k?|fjV-uVa`fAmuDq6WkT6de1j18|QdX^v$ zZ$M9A&-zJm`FF{Nr+#1X1Cn#J>oW%)EU;XhY0H|hTbD0i(qj3HFOuDW>X#_%nE!x( zveo;iGJ$4zRmfRkPk~4UnFTWrJj3q&)TG@^V^ZwbL@Jg?w6oB!E|GI!#Ry0)O<*j(L&|4N(>(zugMO@jg$L0TqWbw?b3BzsHF{N zB~R1m&W}Dzw(18Z`u*9zAf;wer~;${pLT_JLTQz8UcA@K?_6RQd973Z74nD7ZPH4E z6J1aYl2s=O&(<&Nr>#&CV5Ze-cKyh^)B7S#sq8*pzbq4eyes8oLuIkD-@iM-mzE<= zg}>Aaef5B-{{R=JUjNhXtT4-vzDZpNjE5%6k$vlDk&^Pt3^QSNg~^-H!DE^LLc&y||oHGgB$^X2*mn z=G_d;n~lOeLs1j!wejx*S@YhPPfRAUDvFC*#4Gk}gZim8X*D8uzN)PC?FS=GJNGA} zk0M^E4DoLAMk;eLwM~Cewv*_@H`40xf4Y2K)g%GPFK~bJ;VvTgjA*A7j2SMBT(63# zffYYVm^srPnXT5}>Jpf$E}=U?i0&Vs7Ju7R!KBtj(^hesaLH3@L@eqC7>_F&29+j!TXQbEoQaa#S>p(BP1KD zq-qCBF80Ni6TbeidASs&Br56*^jO_o^`C=>Ny!3b2U*};Ref+hn`=Gi;#&6GbNwPm z*Zj@Q$bR>+_YnhsYruvBiQJ0r#|tQo%QTt3muSrTD=)ui2C4k_WS98lHefwaPJ8%oEl7! zY0Y!Cx zrYVw{c~KbUf`EmPugnJxF+@OlW_RcT1f}AL+_e;*g^y zvoXtEx>hg=+$s7Yr_O7xU!crWpCn$GxKA$$C$dsP^|qH#JCYvXaRYBw&luyJqmU?! z>7b30a*Np$7-JKt$ivpUrZM~SE%;KGh7?7QjdyqIf!Zihu2@Qn+xw|UI^#$v4)yOT zR^JWRG#&XC3mO;7$Epnw#-!E}O_jtb|5`8%sC^j2<<$cXMEdr+(Tjx0M9h9Kh#8%T zPZ&3`d~`cB2Rgviwt~q;%dSuCfq;MQ`v(MU^Vo2jE}b z=HynA!pf`5AMn*1;clO*yACN#EN#ylFzIYPtOxE-4wmIVb9?;^0|T0=88zerq?Y3RU@1bQKm`T1 z!zNL`yQQ$|^M6P|B!BJ43fINST(@TD9pAK9^jkTYtxt62rV zc1qK3vD2F4+qi~EVCM+f#Q}DXCQrt0@9o>H6{m%9S^B+H7M%4q3QqA<4lZSK27Rbzu0^Fr|ordQ!!DFw!GCA%@- z(;n1oMnpr)sluxZ9CJ4#Vdt|<^QA+sny){bahXV!pQoAenjoTpw$R!O#Xr(Q6=0bH zQW?G>qWF8k(U2BybaF229X|D~ccI}i(XKBZt0RW|n)I*C8&3-HXmV>q*yWO`soV)4 z0fQ&-`k!H?rDie2ZKN;JrNHBhtgyQBsC-M(>!jX~g=b2vz_OD^ltk##h_5H!RZs*_ zyC*mXodc^f{H_Awd)Ur-KsorKfY?7GwCIIf^Q&jrnqXK;G#amG9%iC9R<$=Yo%wk` zEH=xJ4#KTD_nywD)sOYRu{~IVq74{T`RB&9rTOltdWJI48L=y`ns||MYBa;x#^Fz`w^LIT3<-*DOOuCit(KTn-<1nNPS}Kf-+-I!W?_Fa21qq;2h7KIREY^hj z^5*w90ZX;uU~!ID+CTR!XaWYKenn(FK%!`nuv`zJCKpV;7JOta<*~O8I-|3WPRR=Obh$X7e}1j+6zdYlnxjmFt0Q+CInkAP`yLlS5&%_~1;> zcf-fDy9icQhPnpW3#MTwEgwU9w?SmVJ%vjAUO2W-kER&o!DCrO9Yf8Gx=rj7KxpYX zhMYy1d5ATf0s;XYX-6gy>T}~wGFlg{Z#4H69frhIKk+G9F+1Xp)4_Z+X+P;D`1$(7 zcLu9xjuKZTFH0#*!fhcNi@T?=lB8(ZQJ|U24eyK_`{b@4EY|KF(E&`b z!X^C~<`$i0!x^-=DaG zI}Ght$^OIG>i7@FR@VfF0yFXLeSAu5N%|Bs9It^S$U{1MHU z2%qzN;5gY9mk-p0cPMNxvs;vV3M_I{1Q^r7?xeK|TGA<>$@;_Xb{|R~t`iWYnn$^I zq;FHl^&~Sow|bobwY{4Bd7-|ov;ZFQkKZo|+q+KX8T0urE=pS{pCQN^`a=hvNjLJv z>A?c(liP@uwX5cR*vJPW(H zE((Jwo-PtfFMJ`{KIvLQPgYFrlFre<8s3hOfuhUONdJ|C}JccwrzEzEttnR-V zL3q4_oSbqg3u)kURB7zK6RD)2TLcUiSZXa$w*VJ;{S`=rij+hc_hj7N5iH!Y40x&;$+u4TST9inK^~7tfPoHbOR>V(p+f3mYe*GbUx#x4lbF@b`g0njmXS*g&aXxXmif6NGv#cvX}-t01sT0vKK83H$=W65KPhQ1747+S9^*8tS<%w%4MWH&kz#wj-FBMxY0vgj$Qq_Mf$2U_l|krdozRkRU`Gw1=@l?O=@x zhoP+TTd5Uyp%Ut^v+cS{a4-!P*X8UV4RZht6>8Zu;xj+mgOuQYPxwW<<0iHD4*m3{-jSV4G zz_@(sqwsmN+m9h_+gBV+Qgaxz75vg{x5xAwPDvu5^;7hbyQiNDXGT0>XZ$k1#7paT z^u|gzKckb^ZD<>dM8z(#ejMIa?SiwIhDZS>kWY8tFHjXml`cX(*VzJNUnC_^0&q}6bc)+w_(W$4N zR=gMdl)Kb4+gfGkq8^|VC@WsiU-`Y_b)E{0WFl?ANTw^A1a$;k_7Icx%}Y_-DlS%e zBLzl5=^NbBY!N-l4Af5#FMuU?BXznfW@CGTF4NqOmwcOiQ^Q#s*@7#_ATJ*~%O&U`8t9x>G_wclOnMsh4JwS(%Jg!)nuN3I7zPLFQw zV?X9fkq|N{HlK5T`LsCkSW;9xpZ3ZejW@Nj#r$PdVy)ZvnF&DLCB<3SA1p=Mr1}MB zM^eSezkn_N-leCC+rt)>M61u;CNrT#l^zG-fnk3_$dl+gf~6jn5+8(S zU`rsqRi|KKmnN>gnlY;E(k^26IHOk@N+Ntt18Q{ zZkI=%`Aomf{j$w+j+f~N=;PR7t9B&`cY&9>m;oz(1@3?#P14|B5Np(N%@5kUXH$}8 ze3!2!UP+9zUTwb2F7G5`#V&9A3RMaZYYy#G&i+J`aEFFk{XLl7jnlmi{mF9}PpfbN z*AiLj=*WNM+4Gk(ysyk#3?8|$L!td27mU9*l4t=vTMN{6m6GJh*lNl&X6V5`{6qwT z)se*hVRV7xZ0Rl6-ok@rjEA4H+*gK5v-59m|K7z2#VYR1{n?+l_V+%HFi^30tv?x{ z+DdDLhe-0smDIktTV_y3FI>nU9e#bjd8L6rTJ11KUM$r~N(WBz91;g3K4uGKcxh>9 z-&eJA3pe&>o=h(vPX*Hf*isphWmtMVlau0rRd(ao>>UF@ajBHy$CB~jIbqmdV0aSs^DE*OV+RkNRg zytge{TUhk=BttWXQ?0A5a+39@s1Huzl>xbjd?9k3wWh59aXAA+4CVx%=r&at7aYm4 zPNNF(3B#tnj3&A?=#8HX=rZ=tZN$2d?p$;u;@` z-+)pS&6hL+sL8UvA>?mEtBEw&a*CB=ZVjynKPs<}7rtM3J5!zS@=?r1Wp=Q2k~IX$JKjT=p0TR})6<$(e1HP!zS%QDN@GeEU9 zzC#UVf+nXcRRlnm=>NGu(`^A%kU|KLQQhjkvTB0_f>cE3UE`wTtw#g0HM^8~1H_V3 zU(d}hZcPk3l5YqP?FWK{*c)>jI&crnw4X-5*_s&PY-U~@g1s9aR?b;%KH|dUc{Orb zg(_}{Cg*1IdNv2&IKy8kqeGPwY87YCo|x!tM*SM=NT_Pn^F+OoAF zs$@c5xjGr!BrWejv%!LeQ@kHcj>+hdX+Uv$%Jjv7IMM_RH3Y&S*d=w6YSZxxEWD~B z6lus4gf<#YsL{LHH!C0HXO*+MuNL^5b=iSv1>a#a{ny! zmEe{{eGH*$u4%Z&P&lgxW5e*tt|=h|;qEll_=Tjw+GhiS;{j;7=aNg1uX%RGT`j zQK9-gbaoE1TBU|=oVlc_i%nPFJ>g5276_b&ntACbpnh*fwLuUWCP=2%zTeZ~(rqzed0M~a}7h&1WFiF5=BJwbX&kP;wd zeGj_!+IxR<&dmAEocZSa!&$lpP2Tsp+jHO7eO-#z1`Ip1V|=p1@DIcy7LJ9T?7nx4iofoE*ZdNuKeoz|p`8iIOxLUroN$T8-VlnBCTOXHL~d&N_k{s0%y?t- zP@GvrBc<#Eqnj)TKgXF}k1Ho@E{3@TU|P8Gv4n7Q%F|mz&7WJtUGmGnqYjW-%?I}a z;GI}^q#rM?|ASX{r`9`M@Hp{7P)x&kk)ef9#ku*!*D4CJ;We^6vb0ryG|6k?p8k6J-`pXmXls%-f{5RXaRpga>aH2_$V@=z%Pj5uD@)fP_;*Ht)gY^O z<=P;4)&CU$gZ|mW0ReJY%TzW8THZvD+rTZim3}Za;qa|D*b%$w-{q9ErHkWTCNI?W z$>yF=wsxe`{xEQKu^_1t6xq|F9nTh#GXK2qtol=uxp6*lAH@h4CuE4DP zfQu(QS=c_><@q*5{zQ%VPm2Cms{X(7UxW~#EYvRs2*UmNl?j32?Iml=RGIjPVoou5 z{-aob3PeWpf+SY&gc|VNkoSSA9@H25OE4w^{gl`K&fMLaCb@uj^slniRWCGH@YB?C z27B?1Eh|{`1rX!bJMR0bRE})BYo{OU+!DWCkFI~m425!`R z)KA+!*Cu7>hcQ2ui}55X?%s=*RmFuj~}uWdwU1{oMB>M$J+{(eZ(dehlhepABk`}NmqCVYBVIf z62kN1hJm#}r1#r~JlDq`TwFAqGtlpygH#xB=y`r_UaaVBY5p2o&*by-G`6SWguMun zmMv0Z?`|2{ZdMX<0^9pvgfMGm4E3oscjND3!KkmS!M&uDxx)9^nar_jr*<3pjvzjl zoYznh=L9|1E(pYB4wL3pNC-0n)=UB_Tz1G2U>s0l9Bp8G_dYQ{mHI|>?9TN^(}7gJ zZk)8jxm;(w9oyqL)=qWBG&w-Cps0T;F$R0;Wk3KrrN#z@PDKf`lkHCHtn#XIWW1JHz(c@8 zhrMV3UDGrup2U#=7SW&>`Z*Qs#j@h!7jup#>I`bs&aCA^ukS_l_K;d}TA~#!d0w1W z)2BVshB%aCs|lRVE9Cfj_kB70Z(q+0d3`J9gAjjW8PfOCF@&anVkVj6^LwsZtZeD%@0M0Rv9cFKQU9 zk0ySUWY(yWeqv+4sxdS3#gb%H0sTQRt&nA7!iis1I>N$oaDaX&Bo=qEj=;1l#YmL2 z;7{Avo<{RN{yjN!^+$c_>LpqJ1if1RBOikT(LYN)r*u6_aA5W(xE)~j!MGnU=q^Pp zT8i^^Nd(pKkutP$7s{eJ1@RI(%O}+PAiAGgDu8`L76KUYGL<<0LQ|n>j{ch)aZ2bj z4?iZB>$|9sC@m&nf`|D zi}EM~4f%JL_dj|c|9BAVnvIZJRNc32>{b$};n`pLEfB`&XIo+er>LB9)%7YxRH@a_ zuH_$^TdMtduSd>DBHMDX)Z_~8m!qAqT?tzr91j3nFsX23=N$t*j>seI-OWr4*@GuP zrLjJJ)p`7#H}6_+4|#^C%pe>22V5y;WE6EKpV}cNE6;wX)4B5VhG3Fq&)`HzwgXTo zXThsr6%nzLZ=a8gZI^C4<6frQu z@!GYgr%P8wDL6M$RfXIb<8Qm~66SA=GUG8}_PBq2Wj>jNPaTTcnieNo&2k<~?Dwtr zN8*~za8am!3;o*W6tm`PiR;?}<4c;k(QaoyazKc>T}~C_AU-Yu{hr)Uy|XUHBr0Du zxk;-SiymAsM=$g<54P8dmL7kJwT{)w(nxVg@Ia&=xQhF>%L(kCef zzldlo!%X}n#KPXaG1iGw=DhDOs3 zGbWHI=V`2NI+=9XUENmuzdHV6k*R{xxwK611%lyBh ziDtlNXe4f8o|Aq1)mD|6umQWDr`_{pwhQ8f-2~GQ(^n~%$$w`XD1q%G$TnVoXPYkk zozrZy)!_QEn;YT#jHaL?xm?Z036rBa9q0rVanceL6dZ!c!f|)-4?wyUs{lP_z;Otu z$oJ;L@pSptSu~E+Hxsmi+(A*C0s3p^S1|%9C>XiMLalVGqunb}HcA0zHDu@_zJ(+hdT+bm{R<<2 z7J<|rX8W(KqKwyf^kSG7QQ`(!rS#)k`(0)`fiPCowSYZc{fKE7qGe9Ype%7{B^Wk& zV;?N|r^Jm5$5lzi7CJPUy3W@z4&45{=#zDW9CM0v{QdrH;dl~dyqP}~+#+k*vO}x0 zi0f*b(+&}PY;QU~3%wnY`NDsuck#8j*2cA1*A%S8d}ec0)vpqpqtwwY2l6A$1PP&@ z7D$OKs|>5+7gmytqfM}3-~C_CSv*eQ^kN@undLSUM;c~narA#ljaBHSfV@@?ef$`z zo1bM{0_ZNt$D6G#6@C}HI3m?q@!4aBlfK2$x|P5+6{eynF(8-V{qc1`zpM9fdN#fK zQ)=l~n?ONvnY0{2AZutg`R#HKzl>d+Kx{^D$J3Oyce2w4=|2pF0N7c6VxoR2-Wk>B zotLIe|6(fdabMkzI;ZQ`WZn~sR-okMCkxbodK4KGiik`_9BY6)IC$=aukrbLEkon%dOOvQ2HNO7!dc@-O1M zyz{=;)^)Q2uY*lK)QnZTk9w+A?hJ?CrobB=NpJd>I&aU;wP#Mp50a3&zNWPX0U>)= z95jam!$DbARAA@@;R|1DTq+B0t8cMU@_*=Q-MF!61=xY) z|F8q?T`FtHtn@xWKL|*q?dk1e%9M!EHQB~2LkbTSMvkTOj#J& zyz=aCjnAf!xGN}ojm|96>M?f@EwRuzD$U}WZ&N{EVpA2dL7OUEou@OR-@gn)14hgx zCnQa#L>aUid|4fMS?GI77?WPP5O0(7`qiRIkI=s%Dht~<9R>YU2L*S*-wvt|xTj&! zdnXS%*ux?|zF>eEryJB8eDO%ooO$$*a45BfATQUIj=|t$f(VvmUC!gZotT(1`lscq zub;0%glW}h$)FBdhd-Ma(h%^K=iu%J83ggjsF~Bx86}C`+e)=6HvM6|`qLspe{tgNI;`T&gS8s^Rd(d$q~>HK+hcLb$6&0w+0M(Iy3D1e0EEg&I*kh> z6d2^!>7!EJR%7b!OwO(`UY&LQMVA5gb6i~X29G8>^q6hgO9$SGHNx$z>}PdK`#L!- za`f+d{jEI;@d@RrFm1CYjG}H?tJ176-uW5|DGa=E-)fzHvdEryB9rI}2rl*7ZebUU<*kV`j=a0Saa)Ik z)QO_i^tG1)rw3aIT@vX8htc>t3d6==ky{jCvcRukNCKHp11TP`4q zN=O25c0C@ERi5Rg%_JWQRJ!rI%fEfv<-h#9%m1l(eVY}30n;4i9{$~(b?aSSyuPI^ zP@?Q6dWH1vBJ+oo!2Vz2fItl3N+@i{(OeMpMLbVivck(?mwx6*H|bfvO}q`#zwUEr5ACC&Au zBICKXn4gu&BPEd>5VVly00k;he?l^o<169E*akRfd7zw9MlXs3$|=!kzF#>s?7}2K z0GSd9Am;)pZ1Cl(?Zt8$t7Df&te1oJX*lCQJ$vjMEH^|t_PbePdh9jP*0WKHGGjgG zbfY;c4(}5vcUG9ayVIO!tPD`^Ot?+erwshasgKpLqt3V_yV`FT`{oag!9yuC3*8Yu zttyjaZHmDk@x|F}&oeU(sZ%2KOa?F7~mIMa$t)p%?XGw`<+eeaIVk7E# zlHk?l;|wN)6T`|BV5l;ya1ZpKw!t7d0}!>em7hxGsM^7B(luc0xo^**^BG72*h#MN zMGo*!vGbyNCM1b4fR7`&7}K|)u^%(^1xw8=L7D!ohCTn%dApr-&7ST%+-mY_I9p7n zyRX4#EVc9mvHs5HKp|qIRZ15#8#esBo#K8wH^qtK(P}u#gT9N$+eb8r~eQXFY0KXO2 z9x3n2?Lxx?NO9hIxKJ3*LO=XrbC|&SfGMWdSDeK2mwFV?N_!JtQ>}G+F>VE)#`9d+ zW#ai?y>wIR2#Fpd?|AwzDj`o`j}?@WMkU z{!?90g#&S*H#E_bS-fp)vru0XiyD8JM)^=&i-XMYK@ZrTM<`4*QQ%Zf#>L!a$M_l#)K|JpCS2_Lm7Tsn5e;V;nqcyj-yLYfHE(*%~m`_Iz~#;=>D{K z+w|0FsI^OqoZXV;3=|Kz>pBv6-MasVr>CQZ-2qDtok=TeU!QQOKKCLz(R)?erAkX2^)?cpt@&wV zLPCORp@4i*5ESiyqqcri8nq#c1J2_Z7Pg8}B>pISV=f_r*Vt$O>Pyr)^dzs_`LWTv zXk=|O&VYv;1R{qhuzw3e+ky($m5pIAJ!<1QI!-@~++#1yTis7HNO!a-yj5=8%Bq>< zbA}{xpbiTMnx_|ompgoJN!;q>mc8=SK%MKxna>ovIW$ON+QH6Q&c-@z+bic*r6qct z;?;aP>Uo}#3seAtf{3 zLS|asIsR~nEa_Sk+MnhrkS{+Kc9n$Tq2Zt$EcFEjRt*fyGL22eE#W$KS>fv&oaX>z z553O=ThKpe@sq>BTv76qdod0);zx$R1ghCXuZBJ)8t7tU8-R0;`^U8_hlU+ZtD=m= z@_(^dk{bkt7AlBAdI#^&RuWC&L3T^H_qP>JxZa(*!Bj%m>d4;7_=1T}ojB-+!8L62 zOO_CnKcl}R3Y=;YNk9X`scyo(h3tc}<~RBqK30)#G`Ig)U*i|2+ntV{oUtlw3YP^7 zBsv3FNBthM++KRQmkmP7BEnj^VmSnmWEO+! z4}7JL3GI31umEn1&e%cPEl+yWJHg7=f3|-{m{Jo(W3zK|Xa`QaXH8t#ZG2n+F934< zHlyRnhT~4KldEtIRr*}V;KbowU6RV{Da~`ToDRGp12Ti3a-grfZy(j{zW|R7bNmYe zdjOY1fL`k1e=1Bft=8Gcl%-nGWQJZo({PrKP94H^24cc>sYE`G)$94VH}7xt+~I3l z^rz~%L9L|BJ$$p-R;W1|m}$5DGZdG=5=u2RQuxQrk7LNBBvFN$s>1NpRKuv|g~+Rq zW?r5BNT$V)fiT7WXF5Dht%1_TOK@ZjK?4ZAwxYqLPtl*Mz$;2^T)@jM)4YwKIpJDR z^;Pq$MD*+Yc?0PV7!A&~gPnPhsgJ3uQnc0~OaE1e*2ZSbpe_)DMD7hvS`cnu+&&5Z z2^#O^hL1pw=$;3lLYm<}O1DEztiPpfN4H?D2Uq91odqt|YKLYl5`|7TKqNAJPrx$c zU@UA$e{cI}lZOy_8>QnPhem{Pb7Bj2rRyqPyIt8?za?*-`?6WjyUdAR2Y45WxnMAW zEBo;GL4uCW2FOhpu7is=tQ5cjlA9c)4m83lIiNHv)kg+N=OU3aPBK5^JS?UYd!?4L zuHalTZR;JP^ze3^}j=?1PGz|S!XOj z2L3O1&7oNnme(5JEw#Mu7Na|Om(IewD)Zu023lUL%Ly(tME1V~#{O;u|1SWu|0n;S zgz>QtsB8qt3s@N|?~?Jv7A8-$`fTivG)Slwnku-QZ&jzeM#|msw29}r)l+SHtzN-0 z+bteW6CKa_eh=g%`=jO2Ext%8HnA=N2-L-P;UFgWbzX2uAvM%Og@TIDfs<}1@<V+v%RG^XrzPJEq8{8=IsmVzymCI=d9vX; ziNypt8-nUUYOA^GS<>j5vD8}Y`Knl!i1$d3aM9Ni>yem`5a0~~TV8l?_ZJCu?NW&q zU6jbqM;;0C@riLCO7!9m7<8}?3}bD~Zw0DyK6#rN^;6iMJ4B?LJAZt8qxG4qVC5iU0U01(N-slAs@PtYQMhgztaPPh-0fyQs#0rw_<~f7c@aGMyj52Rj&4&Au*uz`V9VOnT8MKoM6aDVHg&P%C?v4xgy55_Mu^j11)|nH; zDlDXN=3Y5ao7^BZh|?UW*!yanNtu?t$m`yp7q+5bdsG&ch|4qa^H{*R7!pA(VO#N~ z0cTdGYRk}nC52;Ct+%Zwx|_R&VHXiAKXj&;Bcfz$zP2)HA&bQ@R`W-(Fi9(^o76$6 zAItm6L4W@i(%nOj(S*x-e5Q~KziNll^WtVsZIOd?L} zBMjaaIw5vuk>H+h7(A=f?RMiab-kV)x5hg}+WM+qDAFLOnaI7*pyI8mS+|!x>XPSe z?sF{G+lehX&~z=a&bJ9#%d<&lc}_i@Mu2?jHkYyVvu8pr^C4ZAht)?u=U#r=4IxU< z0~Cb00pe>c&6r$2<;%61`Hjp^7Y;JMQd4)s%Zy0&0KHPrE+St^eATmiUN;C*l|E3; z{2Zki4!)1jRJh$W;wF1WqEWnL-Ioen(88gprB`MZ#f6KMq{W&{DX|^&6k9a@II+^Bcd0SA!D8R&b8QW#snA)0UW2LsYo%^q5HD4*20{u51XZ=+ zU2DHo;tWgstmNCzYbN?xhE%;*dlLWS1Me~WR&f{(ZR_lbsW*)rpcH(QuTxH@`#qH{ zR`Cr3xD|BYT3p=GF&f<}6LEY}SD8AW#RBDwFE=pYqTnkqq)6q;vQ&}P3dZto`^G?S{W}fThcJt3Gnyu=!BM3q^4_ZkY42mTa<)UAWb*xb; ze*Tt+Exm$#+O@{>>iL!tzlqIeo>9S*o!Wc9ApR99+z-qTjxU+%j@XXBx4Mok5f@R5 z9ojHr?!NOn#|qg%-}r0)?&n(d2}g5EW~4qa_qe1~?F6FjXrPv=kb)BZc3`eB@v}Dm zed6XXfo1;8k3Sz;?n5AoFJ0>!U%EBZmu%Vjc_O+!soJ*T3OO=ei#Alk(5DnqU_d@H zM9Z2hzE{wyDlS)oRt0O3R)n~XO0-ye|<)u zSM}3{zKn0c{++jzAsGjV%-o2jB#Q{PnE~RWUa4JJmyS5jk7`Ji&?}v8DboMdPCl24 zMA*`!m6siI*o*x38pR@Q9E~)bZnSKPHE)yF~mIp zI|4mC-1ZK@5nk*J0!L^Ejvz)FRj#g4)N>(F=IoKx{*$^i7|Dxk3T3@C>-@L7FI>H6 zLNc15H{N8`?=N+H_0jZ=dEY~;m*`j&HtPZi^c(s;VCU#AdB3M2gW0sUtGn=^!Z&j* ze*145x?22uQnW(8QzlUyAM70#ynZhXMvH%1qbLdhw5&4_fWCS;TNnU;HdkG>2qctj zEu+6$%70CN9qOKKkGG4Kb$3Zx}tsNNg|L`J|xvU|FVvx2%W|w_@$6P4(IF0hTOpOMT6uXK$Bt zm<}@k07w1?kT3a1MF*sM&5=s7j*nYh zOh-z!ZOG>mlixS;IDAX8WxI3)SyN&*9kju+v=7=#bPN{V;9%sZFIh0+1cif|kKm@h zw=<}xxs#qhPuxHa_V3hirGJZ-wwNGcLnr{_arMPQ+gEL=z7Ef|3X_sxE_*j91ePvi4&?o(%F!svkeS>pag$;4c=&Dr9 z=*!(j({rf^E`;St(x#vz`5B2&TQBW_Eaz z%G~j&Z@;o42+7raRX^42G~S+q(_8k6jm^Gy#^yP?T1mb(Rl88>qTcyPQ}G7VgOM^D zW>4v(q{gZ@^rm`zf$?1hsj`9@tNCHE|O=CgRiKYHDng*%8$YZ7olw@UwzgDWl2U>syO<;Q;g z=)`qPtVOzL`eQ&2`w^;1<0Q*oOiW40##NQ7!Jt>-+uEr_cMku4-&LLmFw;{Op#M8K zAHZcCu{%gFi<){6O?Xe$INLTs!iXYNgx2Hh+<*?>&Ei#~VdMl~s_%6IxBZ|<90bLCMQ!W7Q z+FLkzg1=PuF_z~q2xr+`3qg9g!Tx>KR+LCK-~-O(W=pWi z#OgkHv1@yvsC@$9!ApTa!?~sncEqT>lW2cB_tL-|qppC3zYeV@3o{ob(^t_#vE%n{ zEbTy(*(haY2B75JUl2*hB0GH!S3HBW5J&{X9le$TXoP{q5uNI<5ksR1GDnJ)(NaniSElx zyvU6WBpNBXf0uX|aO)aycuBMkbY{2vs9sx4v|!dF$eX;mOQx6-Cds9=>R>0SbA4n% zqS{)NPs22t^C~1(rZn=l@o#_1`(R@nP<$@@L3wp!go&5-D+4u}Xe(-{jFc+j+#nTv zs45RWPYuKCINVcVDzOulT(_(q-F#dXBvF0If+8w05pZFA*U?ZJuF@_@e>h@yr9l4& zH)@jqajhWalY)y^>22V+ZJ>85+}6A+Y7P90hy|>A8gKq=r4!NTQ_!hyvn(kA4qu58Z_mmBYsO)st#WW5mfBICl=8QVyENr=?Bs+0LFZn+m?JLtT7INV6u)_wOe<)lcUp z0w2f&KJ}A*4bZ`!$)WC-P#NB>McPYfekzYou~lsRtlr_?^^jJkRPOKBHTYf9OxW-m zpntciPFWJ!IKD$EHjS2olI(<*&+>}YXt>bTv_G_1J z#Y&z1NUj1|5i&Nu1j&EQdQOEGb7LDwqdid$@e}*@2rr#4!R<1B&jr4nmet;FOq=Oc zW56zuY2#E&CBj^u7roaGp|QPxmy{(r`CuPm?@j93%G=&4L}tJp%1!eO-p2A@pa0bu z@~&W|&LDmX{>-M@L&-s5utEiNw4F0^tk`4<#&(Noj4wt#JUw|1pMnnI3n8&z9(yk# z{up2u)2RV!{B3|^<)14@OYe`jAvsLk27bI#iL8~CfjoA3F<>K;L*1y?essN9Zt$ZG zGs~#Z2EFMNe+dgeS_H}_80Qm0IM)mesR9_%>1%gz)>fhT$OwTzoFJP<6Wk1#pJO9> z&I>1EHu+CS6q21QB)Ibv!~?3T)F2kdUMB71o|m1ia#lTlb-5&TTCso5eoCfhGf3@8 zC;8w)FGp}mv^XnwPGpK?CDBXJ^s{=|yka~Iu+wB$2m&Rmb+mB>x+AMTYA>Us>)AFp z6Rz{gf)f>&Gbmuqf&8HWmceDBn@LvWVyeO;-4?%Z>9gNpt%{wW3W=@nnaXhE&e$K! zo9O;6ZCnhbjmMQw0mh)~im6fp(Q+tD8S~(G5Guh*m<;1p7pAo5pyE^)f|xI=gmF^a z3CWPUwV+3uxD{KawROR@0KU7+DE zULU6oM9d+xKq18de_91k`voXtZKOt5c!5h{Cwjm&Z#%R}09-e*G^I^Rxf=O1-~7I8 znYXd32u3^J)Ev~)Gv0-=dDYy;#3mqPD=uqRRMB5ciEm_tM-Vp&R0HPuSKY@JhZ9YN z@eC6$Yj~AAJ~xLpguu?%+Wc$vbVRNI$Gg-i6XPFX4r1IX?cAEHZn(002Mgyv6u;u~ z<(I1mBRI8xBE>Naa_-DkL?gzeY=4 z;Fw`0Am^NYiWDURJyCSh&$-HG{F_7g`pBs)?h45!g4C-|6I=ae;z%UHKIW?yb&E)R z)+8jpXYrigFQx%pJ(}kwUbs{{f6o8&+7d)z?sHdJc{zdpQTR7)Qfu zecscZ@_)CLAxA6(cEQ#;@SO19kJ%`FXIw-|Q9$v2*V<)~{J3un4s9>HTwF)F_b|x- zz#P`RV~;#I@qNK_M3h51Mi9aa*&ZiKhZ%rq7ByX#>d*z0Bc;HDgzO{HA5 z`7%4)P&eI+Zl*e&m}0Q(=f&`O;Uz%vyT!w>o~9eKupXmfoGelJ3Nv{Zb$9vw zJ8T98CEQRv;~l<{2inxni^05>1hlC~8q*nx!l9uGQjF#O(D2G4;EM((mqLIqo&&zP z8Pq9uDTi7ar>Y@LfSDo^c9}eK?4mBPcdV5l#ko zBAa|WA^xtI_HHGLiERfvh0BHSlUyq`u^gK75|;+~p75d-PrSCxBjpTxlq;quV+Frt+UN7lxuHIHjkH|6Bf%GWl?msibifX#!;CGeqUi@zZ7 ztia2D6d0a}^Vq%j2zY^+&3Kwieb0eYrCML!g^y&A@?}9zh^#CxS2j|;t!t47#JuQDJ&xc6@*dn_BloquXTeoF7BP9*`qa+FV8`b z@8)OO$CQwTFsB>K(K{4^$^Y?9h^~P8$$Sny7e{^*D>a*vyBcq<_I!!$S!Sw)Py*{E zt{}AF7sYZTWz2|i^9e)*zWr;69F9S^95uyZ!tZ1DbI*tU1-T!Kev#7is_)!g*4X*I zF56|_Ifz&Fz>bNHpLDVOtxjzNc#h=2L-aNuniYI23Q@>CV((&i*~==i_$; z6$z5$GjAc}wNkqPu0^_bl-+tFuky*L?8@$!(!N}2`a|cW$Du)q(^hQUcteIK$C~CR zpXuQ5OiIhJOK+cDY&$^Gk_A+)XNh8GQx~?$R8jXo58ArKH-7$j?wPpQ;((8HFQCUu z1$DrwEVv=Ssu9Bo0(Eg=%Fu>;&ge`5+Zgu!w%M;s!oBYsYP=ruJLCr%xpe1{tT_?! z75#m5uumYcAWR6vxLqWu<2EHr9kn9|IJ(Ytick;SDp65s0V2M!;an9eRL5FgS4<{i z!Vq~##SH`Jx0YdNu1J5Iq&`O-zaAYw!P<*B2aZm#vw2te)QCX)CIV4YlmpFHw&EdK zM>Mcd=8Yd?o z8mH%^aT~H>jmX~HMv7^=Yroo66yk_t@-NqM+KA)b@`w#llt&|^ZeKyFKKRAbKUAvF zfS-D9hi;OX4OEKBt$mK&daVFLB4uPmy3S6+w=;*o>Yq>hY#+LgHaYiu^$|vVs`saI z9x)98(wN>xkQ?#4F~91k#ga}_@KN*N!Y%pSOZ6F#tz@|05beY) z2P&97^A>HGW&du5dK%jKeCIqb`uv-7?L)ygaz_^qq7Y*5YV>H<$}7vxNscF|hX}HX zv_KSl5qixhdD|mf&d=UWNj2?U5btUIx*Q$U74T*XioAJ4NV( zw}cA!_Ch+F8-jM#$894{G&BGDR&QoOJaK}=ROA?!(EC(xvVtd$@E4u)brH{FUhxUF zoOAMzG0r)Y8~Ib|JPxgcQHq{ODg2g-3p;Z>B8~G&gRHI7E_W%h)}$_Ecck4Ce~0Y7 zxO@AnKZR(gUmRMG$9DE=L$aCf&OL{LJ09`1bhjj0sD|d96MM^yPMf5|=)Vykf1pw+ z1ctaj2#`O2ZsN@iRGI@;i~fyv$;Mp;mzl`C|Ns8ff8xIW`SNpZuw6;(e}DP^FMp`m z*eb$J)Bw(RXS{N1GH@`((Va6UE;qq;;f1)r4#fXN%`X!(h$bsvkT2S-dh~v*GWqb1 zi_&s<;UzsnNQUb=?4C8UD!J&o9=&_W;yP2|%Ry91UIuhbS<&ckM zau^pqU3K{(@<4bS9cu1~X%B&i{Ekfd%~lAiwI)`KTVKgkx{YI*(#Y!?+Nkn}4qOpk z&SRdfsD@(#yPhGq7zXqa2#vnb_A4AnujO{_4>zbx`Wd_puhF$>1?rAYb1vQ`H&gRF zK?DpZ-1s=%nQL=(+9(fo+3LBW^YpwM>F##q2ghTrH82+5lskin|KgoxT3YYm-Q_iK ziR6cp&HLoMPMzlCCDSJZ(pigD?y43wD2GT0s4N zG8U{OYJ;1iIquG<&nBDNL_f8(jLcacBa;ea3wq?t=2vl^?XI?Smaatf<7^RJD&|)w^Q`puXCdm1&A~fM+F#yRoYoxj zS4oRxo(zQy281$f!zlQ&%^OEs;2xeGKNv*!QOWrYI z4#E1Dc6`!a$TX^Sxchcv(Vd^u=_|#P74;jNx}R{~XsY!ddkf6?2ad0;syZ*QIR_#w zm%u3_z@CC(=Qi0IvboJ(S#y+5%+W3Gf)?LRxDQ`Me6XcUfL);TYegsMKrx9S*hd`aDafs<#(?Ckj7%OlQNe%ZG4n>M1@cZWyuYV=;p`-ps5+@cuDZ55miVKt zo;vjm^yxJqv|WMD4)oI!?4at`?bEIs6P2PzfQT?^FBDl|bm^iV8{7Q;igU}UuQvl< zZ^pCqog%BI#_p1*1`L`rkKwP)_mY*Y*0PEt1C*R8~hTXi%MChtJMYejji>|7Vu-D_?&sZmy zdby3i=_7rdYNYh--@eDA3=lSSsKFzqDiEpCPiv?zm%t<=fXIuX!^w*)}zZ?I8WgwZz(e(}%2H%QT>oIrA$m*=Ws4FP<`oolx6 z-yrs9V5lJP)lGm)IvxYzioYNn2m_obrVUP{;qLjtod4)0^3ai!v*Q(%mESUJ5hL3r z(+fGQ05gHOS-#VL`2->MMGL|Yd(dKUrH?fu6z0GtrK)yIUHUvW;y7bFQ`Nuomg(M@ z{}qy93Fau6f!S-IUDMA2_^tRsyT*-Z!tXrbOz`oNkGhW8pwGQ-$ z7fmAGoRN*cetmcV!X=T0O&^?`=%jKNdh~+bw0aHb0c&xwrHmCl2-!^B%Ykwta+}29 zy$xO#gxVkQl@QbZ?2UDlXI;b<$K5YEBBTjweDy*H=Wj=6+r^do^27vi-$3z%@6WQ- z0Koz~iwOGyCk_G0AN4pkQui)da352W0b%!)^9JYohx5j^3ET-huyOd4l{p;U>01Lo z0P#CB?uKA8TJza|XIcGQw>z(*z9;(Zhv}R%J-rH9SFdFRLUX!rpK2Z8exWDuNu{1A zKqdB$`#zVKXJ>P8U3R3x!=t4j{}=Bu&`LFrP7m>qMMClHU zf}{ZA=QBe-lF(TA=p?ghacqfQ;N26k?E0%!W6=qlyN`?1HSWYVI?3sSk%pBcb7kbk zrV%nzohL~ zV@~SO(aUMr>nE8Fu!hja5jI~2$q?nRuaF%h!qJKCHuA7o6@32qcYC1YEyY1$NAldl zmQS)kL*{M|nckhOOeY`1k;;EG0u9@nl)!UucPQ`EpAG^_Va|qwSxPvnk{4N*n#LNG z{Vm69K1;QWPJLEzk*EK9{lgg-7btlKcPcM8B}^3$Y%S62)W*h?cQXvZiBWH(uN)o$ z|J%lA;~g3+TIZmp4T|#|2r4aa(KeWd`1GBe%ikiZs$-oXs~JP&-Hb^ahRPMqk@uI> zs+va@1Fz9FeAbNa%c3Nrg;wi`#%*k)JQ`lXUa&tj=B1m*NzYA6LPTdJIaK;qCRz+|rsJ?7gud?mC zX`Z&vJhY7>WFmP3?vkr$E}}mm$`m{$lL-_POF>9ki%|zNEQ$;5ULuT_yH1X8M<8vS-gWsht*GPfj`Cr!%TnT)M)Y>xH-{3a`-VThFEld|&b}}gz?#<0hWOjjY|(TW z+8J6DoxPs);I+z%2J4z@3;BRmslV`lll|G4O5-JJqMMhuPsdvk%D5R{y*6cH=^zEM zaTXSsb$wmp;Fn8RU;R`;HukWt`Ruihhb$@FTq!Jmt_wtbPdLrmE zPL!d_70&0VE;!OWvK~)p7&cbFXMR%}Y`6P*3RJKBORA9MClAt)aNBQgZymZL^>c4` zxeFF@kR|>dJ?^HB%WHUCu1zy6%snLY>=|iDR!&I99&79A5E;aL_~23uXaOz$j(yGl zO}OX2%yUMGQ*WMwV`8jq1OTe<3h^7kzpEFSSl?IS&+JGHT8$O=2NE0qp_W1dlO+6R zWl{da$^z3iO!l+EnJLHSSC6^X_sox#=It=c;vz@mn(W5mR`%Xf+M&Zz!jvV_&&lfI z6U+)L&fFB2vvgk%%78%GdJ&%qH1OW5V8VUk6SrjBvDpkl4%=(fxz%f3Mr6;D)z7>3 zl05F+@9FoaThL*?LhI$ztr2XPCNC5ERAX&+3BYgvFHQgdcPN5GI0}@OOd4n|-3g5* zIs43*c4Rl>3q*aN3spk<2i^=?NXs&t!USH7ha1G>Zsn6hkh3F7^j^VslyGr`wiMWr zR55X&QV;G1nyQ%{rzoX(lVYJZ_IXWWpCIkS%QLZ)gLl$4i@z#A=Q$J@1RZ8d_%0*D zkP%`?hYjzn$)dqn*2QMhD}GD7SjVEEZp2W}g^kVdIp&^iDBK5q#RNy5lh6y`v&{n) zR0%B5xaJj$bOOHfH&7lk2+SyQ5&J&frev1u{s_UX!fWyDOX^rgx!2^p8S~40e9?>E zP@fegz!i3~(0qIrRHiFGbD#kyIZEeW0Qv?l>048dQmE9l^{{7We;^s7U8l3AEgHSR z6GM66L%q&_K?0thmeMu9ArVgiBw_@h4H}dJm7;J&*em8(fm+vh>pM}KLscDyBE)s} zEtdl!d>Q+EHseZjAZynENVcza~ng1wD+?O`#K2Nl{Laf^JiJz~9iS={i$bT%@h7Rb6 zLi`%DZZgSBjT|(QbA=eV87u~v*zy*m4 z-aWT_4V?I&_m^{WsvNHd2L5yaSYUo1DoL~l-8ev>m~@V(y$t(efI@ZK1+TxfAxUh# z3+ZWH^l0g3Q*y;}pvj|Hx{Q;`;`}80wAp6ATSPD&Y&@k(XxY)+3?w6$>76i zDCMJNNBkm!?;ld+-+K{|X&^n;j@~|ET>=fx(>cZN>53Ms8E&F=^}`x7w%wJp-%&7G zcS4A9i=^6hRB#=}VK@dLXMp|~jNK52z+Q8pM!A4!C}%e}WF+wF>2&P35dE1MAViM{ zU*CehQr_VO?$GK9JGxxFaP&Vy?Fhfafs?$*&<_8bq1JTIO_`>7G1tqH(3bq0gK!i1 zA2IPXKFVglBJicR1yGN<*ZaC>+1@gH)$vkftBA7uNxjT=w52=ZEwl^DbKMnnISjrU z3MSD0Q0xIgOQ3!?1c*oSA>xWiM)0!Oyep=+h_TY|EeS~dTlAOLq=goFR0kBH zxA(8aH7R^oV9U3`tbB}c{~#H%N$v84f<^veG`*+00=1hg`ezgjcNl)MfM1`3+dA%X zvxDPaJ_Pt@a0R6b`7Z-##PMMbJJM=mc!#Bqvo|j}e*bRi{?PgnT{fRJB$zQ6T0Y3C z#ALSJ!gDB`#xf2+p}Pq259$;>?f+cyQ^Pf*zGK4*ZS&?b#m2vSRvG2$`yaCgFMd); zTm@!emtADLFbBN+;FJ*%)Q>CUIW&VfQzcMWy^Dn(@3DZ+1x(dT8b#ApfvH~ZO!2t9 zA9YDE!N|g;Sol_c20iVN^LCuiAqms2XS?#@^?qMMBLd|7M>)`Ib+|rs?N&jmR9B!p zGo2c>?>UCd;v_+moL<4%%tAk_|A)2jjB0A_)(wJ06o@ndDFFdd5fG4G0v4)BQ9-(b zf`D|8P7tI@X9J2<=^_NA_uf>Bv_R;DUK46)+4sf0_jkYV+;PV^cii)X5&xK#wbp!R zedhDvyR+tW=~e~bM3?I`o2f_0QbQ00?z_z05N~U|TE~7Ov_*~IQ@vbyV^`y<#D4jV z?TfG~S@Ky(<8l|^oILZ}LZ=Ry7XP-;6*z0dT%el?v3e9}{fVG^21(YClyU-uFDA^E z<@#-#ds*4(GH(i4zvODx1W;!wX+E@fitb=jvN@9XQX2N+bFj=%(FT2qIj)*wZ+>OEj}PCbGG$h%S+HG;?pJ@v7vfBV=k z#9p8iyh&%xh!Lw3o{T1`iJ14*eIT572hs$!=gO8`O_6gvm7+w5I2g`murFAc0c*FfJVEoipH7{7OS}=948i~4ti<5$Fw)f!8 zmBSCU7$9C-KDXR<7UiydupoXo_4YccK|Z2{_Kv51-gf|o^db5UcSRfxDJh!r%Q?AY zu>5T~oCR7kCX7eegFYxY!0gBFU))5Li~SWJM=XNj@A^&0=M)vvx=h>~D;{R)z3jjw z{<6_=(N76=`R$$ z!qZ+{tmHUi#ryBv_wqkNAiE6v8rHGD7rIcn|724Czn>(fb|#y!o?7Hy%1{}VbaVWr za$Ri&s+~=?=63rR@>LiOLyw4{Ia9#n#DB&O1WHsdpK-5X`MKDxjvoueL-nZHFAw$6Enl#abZE|SB4B>Mq=g-lcrEZ#7&IGe0QbhwL_DWw2L$rFvPs zX!^XgP`&v|E^?oEI(miWzdV_$ncm?I!{#mKiGs9&ax??k=L6Q-Pk21-=d}CC5rJN^ z*#t9{AoYF@15dZ03DS7mwT?VqHg&5F_#`~}gkd>dD>2EhNttT=M4Ad8a~_ z6iDQ@{h2eU+bZU&E2b-Cr1x`g|6-Bbe0b=$TT2RTRNmL|2CY)1aMGRRhZmNg*A1qZ zGK$=PX91%zZx^cIcB)`?7zl-=gWH$_x6uYH)~CNhh7PB62iFnIF@)K((KhfZ6?zzY zNm8U`+J&p!gWcn;314iCiX?BHOT&2=gLSTsL+(VEmPWLA6y`@oX0!~DPnWu=G-s~_ zaNvjj0L*v7+oA7eZxVTB=-E9^xuRv>SP^SvF zy=I+j4{)jgO-6YSYA8JfxOa$`Y@W5ac)Z6u%nOlnyfvkjwy?)~uR@Cak`(ogBzQ?m z;J3V3+{SM&_d~`uE%q zwFc+)9-g~RvWLgDM}ppSjho$?5b`BcZ7PQ#i^t;0+VSXWiU&HbVr}MgJTH-!Wr%DJ z#xIdSh5KXG1lK(^)}i@_RlQEjxLN~b6-Z!IU@|JG!~)XSz<6XOxPVLcN&T%SZR znDsVaoQ_9*F`Vpj@XTp*F5gxQe7Q9*&>x4~B@0fu-lYw4JY!=O#6B z-P9E8Yhr&^Ru}tO8bEB#!-OPQsZ|O9OK<=lO1uZB0U<cZA(3ZD^bFAU zbs|%b(6v)q4J%h@mj@l{%|I_U6ydXc_0^9K&Y#Me@6cRkwODX!1g&cJ$RioI#1-xY z?b_nvu7gnWNb_~U@9b|H z+!tI&X|cOG;%j4>>(VYq6B8xpP3toY9xh{WfoEKMdM^<6u z8mZWS-LI^M|BEVB%>-bYdUfyO#{Qv+2PZp2$5(FRG~fNVKT?E#|7(|SvBd{N{4opt z^QtN=J6ySPt?UXbFj4I0k@!+Tgv?#H`bm9VcYK0Q{VzEdYw<(FO>5W7SGe=`|G1qS zrj(B9i0)jGM1_yUbH~P;$-~tNPSen!(gM@Ty-m$ek0Qt?`LpkzTP$C5f$S}LT;7o5 zAn>Z<>!bJhwa&}DC}lif<@R)!I|6RX!y^lK+vu4hG=-wtC}`kB=|{mwHwG+YRqzS*ZqhQxIr3Z78vHta9+)hCG9EhQ1w-5J5Aub$dm>U9lcOT7>HpJ7(3x!?OmmveUQ? zJ|F|-qouEN?339m$tvA(B-srRcl;CL=Hs%>xGs{vLPr?1q#ds+stA4LVJ+U3 zd$-6ev0~ksOS<`eEA_G$tPjZ1QiUPX zK#ZHWDn?D3>p~M9Yw-JnPkV=F9!|4-bHZJH!)qED{L9?_Gw?lS9?rWEMcn6gh)W9~o(`c8xcC#q3H!!en!B)EZH> zYC^?oUSC7FP+%ps*-~R&pueJ4^Da^O-Z$3b`&`VjD*G}i3%-)?dq;ckCN5ZV3=7)K zy55YwY1KrZ!pIvNsqgdYNNPz8-Pi5%#n|{xctSw*V~HJZN50XeG-_m<4{iUk(o9a@ z=6aw0*)wdR(9!Q^J9$V$6)!# z3<-WZZqSoVqs@?5kyuhj{c(tcVOQnhPuy6hBm8G-&GBE?jl%S# z;cq^i(R<2%uIZTi+#IQK1=E)#f}H5g%pTnf;e|E&yR1xR0fOq71V@a_Z}E2bl*^4~ z`(Ym;4d4D(&;I{&vPLp%Qs!(s$@=T#<2bdPoAS$B4kqHd<37R(3PCLi z7Hu|XmLFu9Z+A68i#oUu%~F9Fe3=)7INxw`SOd5WAlr7nS~bTyzf}2d-4zidx0gv^ zMufnVZHR+3)yH@J?u2o9p7VWe-zu^Q{{RHXjGS;Yg_~vsADgzWaH{4n-wQSA{A3y_ zm}SZ_i(t{V&J($o1;$!-*4k^P zspj`5B1CE=dJA5kOs-v};HKT4>&?(=f~QCNyA`9aCwWl~L% zt6?lU;Mp}&tkfaj?6`2`=zdG^F9|oZ;>pURhDwgb0chwB3nCzeqPRA21KqN>eETLF zbMgo|8}$VmqbKXhbthlrE26pkRI`l5m?=RNjoAHlEFQjq2lFl+QFAWsa#MPXw^w42 z5D^n?WOEvh@#F%4d++aQ5O0*qAf{m#2)2aDrNnz;2|ScOrX|G~hbVEnh~!+-+|R|w z_h;34TP|B$UEW>M!vvmV^QYBF)1*N7%9Z^@HZK#vT;whUB$;ZF#4EnihX+vXnr$kB z=;zMw-m26_cqz7Er))_Hz_Y^GW?2)j*fdq4J*85?@cAr zT2Bd*)zB9KyC-7O9lkUyBEnJB!oufzAMTLg8N4}7Q}(SkL@hn%7P5}o$b=Z0@d_PAY$Y+;!F zHhxSdEcCLx=b;1JqzHjh6WXg&+$GxT&>Xh0b_VQAdP7k4)}3C=PB>>mOt+Wt@caBr zS4O@W3U>Q93PY-PHdB&j>qHEOoll?-6SRgl`uqE75~BZvM6s#J`jVv#l4Jp0XIynX zJBTLD2plaa%YzG2&%155`0%HAB4Jk16TN3B*kvPGYeDlmc)c zKwxT^C?0e9nV+N{t1!lKT7ItI8sz-303nBZC2k;RGzd^)zzAYNm^s?{_Hyt;D%2C3wM!v)saBb+e{D+;XE8=k4p0U!IXh7v)4 zxgR;bv+qZ~|8Sf*${YsbT5JDu5g}d+BF%{I9aEs2r?r7Dh3tG(dTtve9v#{(w4=@7krW7 zG?vtsey;~C=St1Xak()*!1o02xh`>4hg~YK*4l19ssG}Mau|S;xd8Z1^X1`K*YuSN zDccO1&;`t07F4A~=PtvY`skCaW0t*>s|ecBD+j{}{!Zo}?aGj8(irI&`7`yle^Fik zlY##~WZnz4kFY2zH`Q4dZ{(BEe#^mXMVdLAjIgM*C|QxP?TA2MDSbZhqyh$T`@}#_ zaNNO!56^&pbqGLoISP4}x?1BHLxH9F$f%Zac7xJ%40&U=gBh>q?H!@0=}Da#4Dkz4j(JC^^H`7Fbw^t-SIGYcpD|AU+Ma?2dvJLYXM}EQ z$Frs3n#$B4$CS_a&&a)YCReDCOWqzx-03z`E=J#8c0SP=5?Fu9^ddI>Sc&5Zi2NzL zpzaVzSP*UD3~|iy2-y#BlQX)aUJ$u;`;fStW6)r{GY8*4Gyzn3fCPIAMtHK|O{yN+ zodZ$+{IwsL8fkGTxb}GM+TcPeh`F}w<+Mcs4E12 zmiyR-i#J2RPhrHNl(+0(n8L8b^_2v7M8G_54T*bq8>*)C>=J)tP>+GV-6w?cNC#8l zqgkGg=IB}4dQq|PFJD!eUVr=`C^0ebM{>9w_NEQum1(^KR-fu6xd<<9xfRvX!bRgk zm9)2%%A63E1d@|6yQ6J)BE!1AHAdw*S)$XO;V|Fkz9`Kcn@TJ_$!jh7&%QFTd!NIn zX4cd1nN#E`_B^(DwKdNyH+rSsh+$tNgr`5FZ_~i!R$<~5#48c+iR6>*gW!JRXm09=1HMcq7sK zVUb}8u!0KsT#~Eqns$0s{V^E-?WJkm-hn^aiDbK{c}?m?H>4V#J>|7a{qbm|wsKZ+<5guG#eUOq{%FHr{P3qEQPoyM9>Y_sY8_wr&YcK5Y1V zeLql^4o0<*ccyyEP_`!KWj#19_Y?$vXAQBx2W-bz0z7XYNyf(NV&|8%#>Lu7lrm+k z9@}kRllWSJU*EIXsE0u`MNfDbCtGHYG@0d$W!05clZD=1N_+DsWOWzCj}M<}%9u5n zA}F_gT6v3id}3jj1i9r^Tp?GvDA_BS2Kx~1{RtO9iKLQ|E6jSTU#P&kE81wepL#}z zIF5OL|ATT3WwenERf_Oa8&bJi54>^r;@Dk_J^!0pm<|y2MC{Myk~^jNnl(JoJo{G7 zkOH8m4xZ-W&x6e8VachVK;9*!PIn`M4w@r z8hyf>`-;~h9hr+8xs@X4tQV_~{5$B}JmA#hsf?nP{lV-OsY!HWOpC7Bk|^FI-MCPUS-V_3;Vs*?0%CcUPxPLACv9SX z)-#^s(C&~jwDdoiVI#_%z7d@6L#9Q?=8%nii!iH0*S+Llo4W{#5u>MLGWe%G{p0k^ zLg|LKTmyr9s1JLHeT{i}w#`PJmxSW<03O%djYp9@tJZrd=D^d2pWl$yX}?L`vfz<* zB>QTAe~R|C-THcRf%EjHlfbQ9cJcenvRz=)oor9E>PT~`0#7JJ>m5n6$$2_g=yh-? zXN5dkj&c-1L)oYxZdI$sQ%BcCkanxqu@!5-l8}x0t>vfJH$FJxf0Y_Z^F`~s{=B+Z(rMORLsk>7a!e{h6TrR@KQY&P(uBT+r7%S z5IO3pILM8EIb6h_^i;uiuUo`kru{~fQeIfNBexzMWws) z7f7z(eElw;G)jp~x6h>P{kP@nGk)@x=AFAUe9gzQ878aMb%md@##9_!?2pXu!$Y_M zwpeAF+Ik6rLEag3Q72WeEh@K68<^A+P~9@qxe&-T;2#SxZLd)Bg!5~u3_I#U%p#;4 zG6{tje%NiKey`)(a|c4w_1tX!3#Y1Dv!RNgC;rj zNSw(ch~VN^n-ViI?G>`6=x~})vUl+`{^6}LP#Lo@oVv&QZ%IsfwC`&V`Op%$9{mY1O2ae(n86PKdsvc4gN5g!GG#Q5q?UCBwg{eD zF^$n%3O7Dp^f(^vcIyp0CIKs6D#;ca&gJ}N0I%)cXEt^^CRpE#k+V`sGh&>>^|v zoa465hl$I_`77{Ilv2Zh7ljMYZjF3mKvT*T^6Y?V?rWf0y#m5S!bDh@nvl6!Oh;9= za{<4dVnbh-oH{39j92pc4ZS%xtsJoKOl`iBQdqJu5YKj#4 zCl3>=jU+t@Pu_JWI?fwSgQwqrG{u0c2)b=_*J$|l+U*)W0Ri5qvrkNZ#*UD5@xC>2 z?js2m0(@Otsp~fkh?iajKaJw&Y~1M-ogV_d{?eY1+%_!3a3Yv?muzrG(0_2O8D!1t zxVxNEDHk^=?ZH{(iZw?mXCWb=x+%ILy-}+cPopzv?`eoRh#QjdpiAs z>(1-ek18wN3IrbrHE}sC&IsH$m_d(#nr2&ptg7JXme!57 zr|^f9tuz1{e%(aZr+cFt*F*IuWPA4(8-YysyMY0?x-L5P47fQ3g1fbKzJX|2OBZ*u z_)(Jq*^4)F9e|kcw?riD%1Is*s-;H8BKhGh?~XWruUfv9O^k?A!>|uKY4(_Gi=Hw6 z;FqLmCKdW3HhiT8+c%H%2M_p=&Y(Z`%j>qeNw5zqrX%^`h{MuLL-g1ad`>ePqvntg z=c~Z>p@mBcQR6+0&&|SFMTBH4q3k&j7u2_0l2?s8nV)?b=>y~a^*wr%Y~4lvm&C=!?`;0<^sgz9Oqj zC+?uJW-LD5RNwU)XfoPfbRy{X^ciS4*he+d{kZv+hK5{CNCE=WKSb)`3X2hI0P@L@ zQJ)*{K=)eGROd;w@9t~T4sAEkk-a%s9@gc;0?ea{jFqNa{JXvKOD^`IteFCsHhTAy zx3o%`=ckWPpFg&TI%>#69bb`-HCQV>x1K4Zyi;O0=YePQ+BZ@08BAo)$Xz;onEujg z08opGV~iw)0x3$Pm$#Bq{PC!y^N4+etu(M9zvpdk^4{Z$pZs$P#QyAE6JzvuL`ily z(iMNBSF57@P02vK&Z(oHp!*zl7!N{p*AkVt;eSjx{(ai<|Mc&)U-u!7LmOhtJnMDC($h-N3j%(rbtn?kLcG5YBh~{4)yKEU1%zUc zB?(!%HvWcPvYcfAxtyjf4`8H0Uq4Kf;vah@V-_!;$-{?%_AsggPy%=FV zlVIH{e)&mu^409&DeivmW>FOq&e%A~4yV;?5UwpKp5xqW!{sRZ|@jM^U^d9}sh&5GU_=z!TA~d-k7< z`o?Ae1a|4;blpqm%wliVbw=lvm;=)@@;4}MI4Vo-G3}xrq9T1{0WrQF8aWjNsCSg) z*+DT(#cv62xfK9YFCz${y9kIIS{+{=SRIk1Dm7#AHbS3~xGcfEC%Zap1q4?ggGr+` zn3et|xO%3i=IEZ~WuF2E>l{nr6&<|TP?dr2d(I8{8*V$YE14#xaqqvs=MCJTp=c4u zG+|^NDiROSWdN~M?`7~gk^~hbeWulWE%i1Vh!c&YSGZ-+C=s42;IgN7wZ#ZkzF1PP~nh zKi=XiVc70nOZXGQdCHQ6mPRh}c*FjdxByZY&@6heS?d4ap52};xO%8As8ccsFmwi% zQV&x%`N-kl>(>SfFRe6F4Ea-V>5NOUh7zvUr-8L|{pyZ@Pxq=_j*LJ0z7I2}8(8%p z1cm(_!-OIB?UG20 z$YUZUFdlNkL%5ubRl@fYqhiu)%CkZd-z8QY9X)4ji2k7v<3DsS;2+J(qFRom^>|7= z$MV~sQ*!Q63W(E=$O=nzh{ob-W>0b=TvVL9N*4y~bO3k0`SvAExjkwUX#Fj>-rFQA z^jc5Kq>sFX+pWh&$#ig>u1+K8Atjm4@>u|eWI^s*wEP+WL2Ky-sJ>~p?!9K3{nv`vFD-%%^Tqz4oS#c)^{qAZ&SBa zoneRbyu@&ijcX9lh!Ko%o;qX6=b3ApXc=3hIIAi9>V@mk(eHRmIIiY1^@u5^TYw-cW^BZruV}H^;qiwvHK}M3~%DjUD zvafG(q(s%>-?ZTOQ~-r6o^Q53Zdxt(YYK(vg~b+`b7L~4T${8CtII?NaDH13dh3Sc`bX$ z6-c)`*9}v{eWWefS@2|sM%f<)vWHtUzkO{M)g$4IqGZKZ?P5U->6`8$Xu|#psric= z22hS0gI;4%GMX4}1=&j_SQ;P>FIj-lR(uM`E+Uwkang*mWAy;ct ziI;VHi2DuHZNaPEa|Ih0x~8>3ui z>)6MdgId2C3+`zKt1n*ToWo(Qy*?|og&jJu~aNEB0(U5GRF>c+2KmwI&k`%Cb!SWj1lDkY|0HO1v!!iCRhqg zP{lo$^!7kZ`QD0se{|Tkzpt?5KI%5Gxj#z&wmCom@%f|xi5+V}YywQ>wfsd4OPJT08)@` zE3``3t$&7+IdZuUCW1yW$y4}!6{ZCaSwnd zXW>|o&g%9eF4xn!#XiMnwO6DWP$^Q$^C^G67hW*>L>KA=Q6S}rw3lz>{Y+rP>uTYB za$GX4i@A)xn!8y9oGEPb{oaD+<=7tF>5yx$4TT&)Pt5{;$T$YfA`Qfb)k!(18>1%9 zBrqpZC;IHfQAp-n$*o(r=!+!GRwJOM9_Wl;^6;yxsia=k7I_*rIZv~5ESXP+2A!0c zqBZV3`Eb!&iG%%JgjJHsSuZ5l7dGe*%0@fuTzlCb*2GD4J;E*C$aKDxr=q-wKiDkg z;s(7#yyS`pHpzln^dUSlMO5!ewkI=<^3Bg9lo43jTvCiiOHC~96@kNwhNXe)RprPV zyKS#=vqF)r#et+ZC{m=~)Wu7R3TccT_zStWkm9IxP!l`{R^AMf=6B(-g1QhKN^JT$gnA*?~~ zC%U5i#8G7piqe)s4~uRy2Gt`75wgY;yF(RKMH3;|K#s`L{YNPoh0<77-GS26(@Zkc zz+TT}J$VgnT_d7 z?$qYJ&vP4m^P9EImR~NKyOF!w$({N$g^XG{{mb@kHaq_=M?bp6TgjyokzMYfB97% zz#-tJ;W`7S#jJ{vddiQuy?>?}6sYZ!jI+0QXnUsJ2OXD#;dK&bCoaosD5d@kX`Etl z;O3t&aR z^3+wRG~bdm`c5sm8fkp;ic|+kWkPg&>-l)wQ%mm$Ms0J)dzf7xQB8xOy6J<>&3Q+m(j{2nZ#$VuSyLIL1z8P~PDI!ZBd>wFg5!L5cO{iV2l8_AxeJXvkMz`=xE{(?c zt>IMbuRkG|R$Ei|C_zd7%SIkf`wOg<{vQ~~s+IrDcl)4YrKj+K?S^)ZcRp8#YAs#9 zE{QGYDda}2y}xp=PdL|*ZH+)cwMOo-&zr;Q>)vpD9T{x0BM3jf0CpEb8=ihC$n6NmqVoR#ssna7&Q z6rkt#k$r{j3&o0k>$GblFQ&ex{rY`~6Y&i0IOP&9BKh+VdjaEq;Q|h$w3xO^_N3Wr zl70XdMF|jgr`aC0V`Se|i?r|SJ1O5UIj(osW}4rN%dt>=Koxe!mxVj^8A%t{lGHWQW*_NeKJ>BD z?77`u*k_ZETF+y~JlgT=tYS}nThQsO_U>H31*x5)hSR-(4?2yy-u{3-0|DBy07IjL zdl{JFTKLXyQBx?lOo!U^F~8qW3d7`yzdTV-{S_}}ymS4n!NO7l#@z%Xk;8*TeOOrW?q|EX7@g#(UVZqcgM_^orw{=iaWqO;s=frFk@Bx;ceON?s=F!dJi3sdi<-#>ZwGKBM-kvHj^G?*3P*Yw!bWx}mxjA3!s zMCqyvtulm;L5D@;m^*NKr?aV15)K+F&7gV)`3uMd8!HhY9qU!AS65OiZ3toJst&lS zIU)a&QsiK$3CRCh5NvSPZRNz<1A@ChZuhV49AIk%wmLYYGRV)RGi@o)B|(JUW#HMb zMCakGKG%%gc-N}UBhMr&&XY|#`wA<53T~H<^ zB_S`@?K^Q>32pkwoz;Bd`iy4Yyx*6{5v`Y?GK;(tjh)b<)1g=wqlfU$_&HSUZsglZ z(c;04g3=R7QR!D>`?^}av=ofF(t2;9#cUDw%5x_cDiN>; z0cCe7&ph!-;q;5Dn5Xj?kx{J>-YxdbN0A-9tYY$sZNP?eo_DP3r(7nS%rfl;Q`fPI z$8)d8NL)FG9cN!03p?zCW_h{esB^I1SDMusu+{~VnO7z?5_(`25~lX&#`<=9wIaL_ zez47d=5^)&|xdW&Qimv7Gge{z$8noXmVntoS^{RE1X-0_BcO zTjIL?pWV2B;qU+H!2I_=p+Ur91kk=j{;hq9HF@Rr0LPXZ>C9>7A~H5E-)>-;kjmfL z!S^c&g}lQDm^@|@3qfD;U*e_mth|kg13nMNqXFbn2%p{W1>souiT^BU9H!I>&O`!1 zd2(=zV^V=xCmBxXqpi5^(zdr={Hgd5TRQ!QS%xmmn1I41qB|0FFz*;UGzQbP=sG)J zs9a%g3i~}kQ13kl zC4RX0iT^yf-agUlmoG0eUJJh{{z5paf1pIm4f*-0!CWOJ>E-S4o4-MJLGzsWY} zKw%EZHXa>cCpcMv0HKkrGY2%f;Zx%mwN(Ayd?|Dzm35coO?pO+Xa}!;eHZjXjI62^ z3rW>#R8q?^*8niKoo8e2!m#jFaEA_r|><9XTceOs>2^fOm<- z25{}fS7@l_=ZrSxJT(~A>-cuzRXo!lF>gNmLb~Rr&zE3)Z`Ff7?0<8YIp$o7J2sSC zV58}YlD4{cca8zZ&9!AXK3urXGRJ`Xj0y|n#x=tj?*K1_ER%6dspP_RGu{Lz>nq)K zaZu~LP6-+I`?iGMj!6!D_z#t@CX)V=YEOyEk75+*UASZ)YSckIEh2p6mNDv!BgdjP z>|ZozRVKc-B}LuAZaPD`{HR3koP6GSAHwYFHD&n^n(!b<x2hTbGTfrxq`_;KUns zPBR^E*N!jD>*lNn9O5> zi0?&X!7oC)f?p)tKqLXVFQai=nj5d;FBuLZv!CS4f%%Zx4=S7!0{9?qzgrsRLVybs zFU#*a1eEE)v9OW3muFXbV9oQdWAPB$pE>X+#C#&5s62g^MzrYJiwHi`u$Dd{v&iqh zvI$M8TUVC=d|Y!!M;B%L{8txg=dIbwCgn}TZWx`kwBLK_VWk5Od?}~#u zQ-2A<mei8WOZufEs~(*VC3mX|X5Y0M?wzfmg%;lO zK9>e_X5F%U1q!Ese!&0K=fUD$BtE+?n-GJ(+rK$qR2ng1t5=Ym5k2vqQiL=K?3=K; z_c9$`uGc%sMa*RY<9Y@tqr}swVMICvJqU7Lphfaqf!s54Z4AqAZ73BHoj)nMCRQ9W z8*k2kYkcH}VN&$C1-A$GOugeefK>4!JKGA8e~m7^*`+jJrS*|3g}N~BASqWc$=}+y zNAgY#yVdyv2EgyORn1bZB$9U=g%tLpnz<3ki~#e9wo`BPZtt@GAnpEe@Y_4{hI}! z6HGe(A`wB%h~RZkD62b&P_@%_ia{iqjC!71NfU+lSLK-T7W+M-X<`4&ohXBH_w~2y zCh&#&oZmz3hQEi}zOtrann1SIzdsbF3TYyMO!5jW7=&p?7Z9cs>*1)%--hNti}=UT zmi{C7{Jo1w`luywFMX!Ym`y{u@|8gPQNbY1IL$a*^)P=XvW=g&y2v zAsC@d54p_O!3PVbe)r`%DaeqGLH%{&Ek0L&)#^R7b(UVTyVAIQ(H_58!a3(Gs{}f& zO+F%C&l8M7u`NWF3GyTrwNJ6F2@w0oNEevT`oP;LFVk|F=h-S5ZaI$!UfQo)J8{4K z13I!d&Mzhi=}ID0^Q~XFy8Va=W#(p;oX`gf5=hFl)})T z$Zs0h;~K!g^@cP+ro!>zLyc_6dc%uU!U10JD1NF3;I9DL%JwYmh+-VLjo~yh;J^mj z5Ie%F2m)LXpx{g%dtJg`&6sUCGqxht!ku8M6p;CNtXANb@^Wf6w9L!ZM3>l&+e9&{fIaEq5ONK8 zyVCx;?B7WMrPh7WzHr-L=bD|cXpwrDVEK81MPvQy2ld=Deiq@h z4UYZQl>hM6dv=(yf6c<{z0zQ21X64&jXItjtz0)GAh=e>5dG4iN?$vR%T?#j!^iXX zo)_Jjo8ao`zf>G1E)XXXJ1P2yhyP<43E#wo!KlFxCwCcp1=@rd6{Z`XBQj(AbOeui z>p^QKJ^wyxJ)-qY`EGj?HVelkhL2 zR&o!gMAj-U2>e`RDUS`CCMs41;Z99N+OuW+MIZr8(lB@$S>w{Sisj7 z%e>Z)5aSxqHgz7(IILc4aVJACb^Rl;ffeC2?BMyalU&;zB)frrZDHU27h%!y(vM4i zOU{by(360fN^=8&AH-$0R3bG<5-#tcf)W{nZp%{9HD8^yX4Xqx7gGIpki*T|DYtj7 zqg=>@UboukySOVHQ{A1Oa@Uqqh0GvgqOHh)P2_DTWMB4CE9TOB`y#8up48`jg}V_8 z^IWD?TpYAuwea{x&N1&K?DVy-&{PO`nNa2lXm`xC2~Oe)D8@Rw>W%u8FFqwP$|;9&_1D|+WIFzA3z6hbig`u4Y{S(EKZhve_i4!=nH+b| zpC3zB=bbBJ)cTLt@MwH z8-FOKnB2PR5gpysuINosI0;tGPLpEnu_X@M;v-V)x2ad=kA#uB4X(m4e%K=9I8*yq4E1mvL!gprrvI8XHXdY)l{=e^ zZmy4N)zrm}2Lx@odq&b~RM?7y?Z2oj>$|#Y>KUk=TDn1N)?AF@`P_^=?w$Wd-Ft>L zwRL^JL6Cs-rt~17AXVu-pwdJH3q@%vT|fi`0t7-4PcdO4VRqmshU0~FPBg)-L$;f z&c$YP-HVAUB%f^2XT zwP}RX*`$muIGFr7lAZi^4!9SZg{IhfM2`Z@x9=e*{lK`nmWr#?Y|oVnq}W>Q8oc$#QIu#$=!Y*P_fsSH%yg1cke*_M_vMXl912` zX=0bM3EPqIIEhl*;z&#NxveN!+33Z69UiI6Q5g>`Z*KDXY#{wH?QYWFXNZVKMZszF zPf_T4b>2EdXbGx;jC84@@ZXoZI-(hx{5q%6F}t4ibbO(JPT(okZU3{W0+Q*b%n`h3st2GnCtB=&&kI@xG?-W(F@|QT@jGMctzz2K)p7JI| zPUZfp!g+|U2n8ZS6_vidWkN}f%tN)ELNMSPJ><_7s z=Gsl~aGB9|aanQH^)D5%T+`@vg>iQ&DQb!*8Xq$^YeUb!J{!_hsZy*{&%}Jl|D$ON zLN=FZ#cE;R?c*Wd3<5x3k#~I8yxnh`n($P}(a6>a&5Eu|;XB>T*TBsrD(}KnuxJ;2 z`Z9ujF3o8IwKTVBW<~jUT{3`U(ff=?yGjAgiLFa^e9C0)dk_;Rbzc8W0yi!o^i%!E zk-ME@dkgN=zqVQBt%H?f-cNj^O!yNp6fiH)TsW@^dl&FhC@a?aHOlrQF`rf!Iyd1P zOzB4K@-sOJ#$loLs6+PETAT+Ak7YW#ZfUN!$*@{%AfR!w#7?2Z*6{PeXJvbG*`6Ydu8ICls5w~f+ME17BszJ!hxB(4 z=YcycSne@?5D0k>Snl90^k=>Q*_x}Bwn>HQ)@<3&F4eo93XK+k2wVOzL^H)lH!{@Z$@@T9& zb%Ub+KXDoR5j4P=reUF$r*Y`qLUM0;AE!pVhBeSFN{vvyC~y*?{JYho=X zvZ@d&#G8fH2VKI2rrJ{o2IFsKRh!YBg&FQf35qsVW_GXK;uEx1?Qz}T+~K%DUtm?D)*sRHjC5`LTx;@>%KA+Ou_luOirK~Ug^ z>C2|gi}#ncydT35=!%UuL(PbV)e28H`C!R+sdoiqCh%F;e&}cl=d1IC>B4VYwkZA6 zdZA>9e(J(-ujjEi;l}4^RjvJ06Y9BHd7}mE%@rThYE~s=i^LzpYk)bqLFD3|8tAL2V;+o;D&VmZvU15lPa432VcCBM|bXy zjUmkRzJ@^)M4diJRYpBK`9vBpcJ=+g&1Ntz25|QXRc@Y*&L6+V&O`nhe-vZ@RU?=r zg0(n+2bG4)-o#rjAa#RmPY!Q&%`eO(r^ql#w>~SmoG91J z&E|q_^_d|vnFvLl;hVR|y%wFd9`=O>&^usINJd5PB-}h0&HyV~?d8`c) z?DteenZIkFY_y5}4wC%8E;anmrGnf|O$3L1A@dL^5kGQ;hI^Q5=zJtPn>CJXCB9Xy zB9kGBC^S4ajJ&)_;r*mU{2XDM@4+BX#|7SI9oY`-AdFFspP<%PeR)WU_%gL~t@?pr zyx8;`&33iB_Iz{p3f#1Zb0*+$Ljxpn$JF4keNt|cU<7eM3ig(s!!r~Nm%a&}KNS7q z!eK7+cAD(cu8K)#42GuoTDHZFQ)nvwnqR%8YeL37{YL$+4-*Z3nsrf}j3P^{I{M1v z(N~>jiZ8&GmMUkIAY5F3ZFBke^z*r0Anuul!EZ^c!xZ1ibmeg-+bMuiJ=wkeU>Bhx z@FsHf;fR=-G$@a_qiQrHOA$a}lC%hpi2-bM*Ctnq+!ks#`PV94q$rr=?Z%=^tp`r+ z`g=6pvkZS%Q}ofIl_%k&ZIbE5L;=3@YMZJ>>Rha)b5oS;1VBFL#v4XwbuKNY+Mz=24 zPC%Hc*03SiGAG2sOl=t9%ty@!xuu=W{U7!tdX0XN*nRb?;pXmcE$YgZqHQ3qy{qN| zzuC9@7#|A+DkhnD?PgA^$fwVD#W|<+SYlt(%QsDqKS0w#TzbJ+wn9eqP|BG}n%y?B z8Asbo0&6!jl5DlC@gj-9pmuWi8Icax$Xy#Q6K36h{<|314agp7GpA8{*V~U8_SQ7r zZpzPMWH`i{aH5M=BcB?UXRr1az73j~7;(JXC)^hMM8D#23Y+&6Z>`b4p1j<6wcq72 zi?UtVkc?+reIr?XE1O9xFMkg~{;j^-j3WP$SYS@vBKMfvcM+f%o&|740nF9o46#Cq z8sVjcdX{SAcX#bKG+o`s-2vEz;IFN&eoh=Pl7IExb z!Wq50&u+F};8woyQ2p>REZs-DDNi|#fp?|Xq5)TBr7ui-Dal(bXYo#mmA$Fy-KDZz zr<{BBj*nlDvNAo>Y;Uh4Cr{1!xX7uN>;7dqRX=p+dm2T`>2?9KgeNrQK?#UVhS0nv z*JmSY??J+U#;o{Au-36QVs%ujkxtm*@&~n?h@INMXo*AM$^WJ$V#JEf)cdZrvtGJj zXQyCvb0G1jA+-LMR&SBXt$g}Z=*+%mmQMaw<}=$r)S>7du;~g)l>r$z`4EtS z$D*Yu0U7w{b&%ppg7l3(H4&sJvA^u}1UDzL=Hc(H#P|l+ob&=_D6P($H`U>oPdNK- z)Z*#`G!45zg4}YxfT8xhsLjsD%-2HEHZPKT>ufz0jc@majNSQPOW4NM#6PDP;mR;M zW^w1Mi&Mv`mnvpO;J2-4(9~AGkZb=tO@seOCCy zwFf$K2c!$EH_e4&)Wi_3^{FqtI77xP%!`u|Yewcx%C7C~LZ9ajXQKkWpSB+5EIPHL zzz!zuv%hz+Xb1f2FEw(#wM%$*?#f&0Z4+0UfKoy)mOI}k~JsNFI<6|nw)n1YE1)34>Py#+rG8}nukrv9T+Kd|0=)n7P`587M0?^%kTkcqja zW+I{pIkKxp*EHha&Kf`wUdnz3Y-X*wDzV>OD5#Z%pbH{RWLU`VLX5Pz5JBAFS?wRN zCaB}zAv?4B-d@GcX+N5U#s~TQeOwf!?Sv`A2VgK3kGxh=QM5^{WgF9HDJ5(LHh%h& zd`m4e#LAeUun7-W(R+JVf$2{Yq1_cIeHbyRH7s*B{)zK7U7-_Mo(M2_L?IZ2uhnM7f?;F~cT1Sim*d?wv6 z2^iUve<8BBJLs~jI;FENUd>Mb8G6T13OM>?Qh&r;s65WB=5h59)1%B)0;-{2^)$iy zqmC3Pbc}pW@4@C#JvJrYtoV^!gE7r~_o-(F?pIi1Gr?$0i81`Sie5GXbtTahUxl_D*mQNzG7Rk9W=dV?d< zd5&e?6ULYQKb%alb9?g`%2D?Tg$u=-5JS^R0So8_K)~vT+a4*WxsqGzsYr5r%YV?h z8z%oz^A`9`1ks<5Xq+4hGGk~+EOf&coDzdZ}t6#=KQkR)dNdL=c=2=8p z(8I~7S!MMuDU?z#&!3{ke;7ypubLSDlkbr?!JQZm&OC)59)VYzIwoyp88af(;9L8G zAnkA{UQwCQV3#y~!^TrI_9cpGj~iWw?S|n3@vVS=1q!i{bHDGY)07Y)=^M@QQti&w zr1B#JP?rB&N@5Z~fgo@Cj39O!4?ImPkn6ueza=}S&lFcFby8hja2t#+cMFnZjx_OH zR)Z?KP6bO!$dTwTb!CNRMI{MZ-%WqT(N2ZbbGwB^MFk?xi;mXXfc_M_XYJuGi1Al2 z(%{z8w7_;mR#5mq_|nlMdAz6v?r2%5tkiDk(s;|~X4<;^Nl}}k%Tmw}K5DP(GZ`32 zogo1*&jVi-@&Hzxsh@S`zX{CPH)7bX%%)rVxp3|gNA~8;zS@@r{kjaSxCP0>IO?JO z0(`+fv3T`rP))yFRtnxR%wWUIkOAEz6)5jcAVDe0d;|CgzcJ|=&s*LI=Y_I)H&YXgPn!Y~8$A5O zBIl!44^(!m%e+=WkYvi~A?i0a`y8#j1ViZeXU6-D8wbL?K-Wr|sgJprNMwYug9dP3 z0cwi2mZ1L>=mi3}5uZ@pjx1viFCr$ggfkuB@?#fAW&^P`~ECsjg8MFod z@292CQ@#r1JLh-}j!1o#x91)p{ebwY6oaZ|AU=<&0v<5kbrXDj)w&gh%S05H6=W>shn4>YOrMO{L0NQ14cZYUFTo6++;iT)$kfx zN|{lMUIRkjva-Z5tW4A#cO};Jlh!-wg8s|$vAP=2t;RA~Ej}HYGgLtAlz1!hBTnsV z{6}`K)`z~cA2&jpA->sv9mJ@d8sJWn2T+7Jv9cTVVzd34eI(b|rs=GkIoU#6!pSQA zWwU+mgY9#_`V$z~G*yUzvtT6H&o^K*(G zZa(G1kHxLwkt>f^UyuBF&Ki#%GtP#vZF#p!LJX$iO!~R^v|zfmNoc9st>*4?3;^#o zQ&aSbvn$cMz;WC3QF6P7%f&{4KvX&ec~;X;J6a90V(k4ze_AWn@Qe3(Wv@8FqF`U< zrW24^#r7SH>Q}tqEY*2LMk8m1jlwyxaje=8(&ovew~JV;oj$P{dwi-AOC(wIHT9vu z9flpUdHw~Y%%t~?SVaBTpXmkL_)HXP(daL>Z=m9`C|KcOfsj_L)t~sHP2jV<1Xila zJy5czMuA8UtWU#+ou6vs|mcSNZJp+YDsJ4<^*1YL-j6$t9S z?4jK#8Fb-;rFJf&`R&(u#hCBX6<6GX7v8q2(8e6hd}~qY6CP3+5D~63ya>sJlGe6K zD~ty|mj`a-?g;#tHEB;3S-;sh4MMlg=GsK=gKUdO#+5NxtuLY3my(Qb6|&lrsDvT) z3JQ=dGOKIE??6Z;xD5U_5m2*CLjPlJE`SiNDdX>&Wh61Lysq)%O^E| zev+praSD~EfD@3g_d)?1b_GsLc#yU1Ehp4?7&;mElVxP2x=2SU%oq|(MbN%X*7gag z`x&aO-K4If*#{e|oKLQf-+_4vK>QMe)M185AUjLAnF-?M1^A8=_KA{-G;Z>86%HG; zQ^0@x-YL>FqRm>7L(GgrVd;&D{$I5oJ204~w+Fi23F-33zKvkw)WxrSjbRGTxU43^ zIiHlH|8ZnYD=w*2D?w*idy|g?vL_&brHDGP0;}{QX_cD&y-Ig8hDC{LD%ViMr9^jZ z&%GU|=Ip;97hObwz6CUkmQhgRxz^cH<>!T& zdM>xH3&{#WTi;m%zuy3~)i*~bZKZ4ff)oQ`G)ZU;$}K=4+`B_M@7?zWf3p1`Xl{Cz z2Up8VU_3aILY_a2LUD&HP#iunn6|hCY$E_tC=L9r2UN8?XEN!bM9wQss2$2a)Rqva zELlJsfgVo&-axQ&fUD1Ky^Pe%b9(R|0rj#Dj6_j6e9D z4&Y0UypK;tlNXmq3{g}LtOp?8$HBx6f$s|YA%xyzjbv3#2b(cCBww<63EVt?XB>^- z!(&0JvFJIYz6~s?kudycq??at&ZDx=BS58`&K@Y&&OgPQH65f@9mX5>&CsL+_Jx7) z@&!;CN)n$X?W+FPJF|k%ZH+ItD#dk&V`S5Qc?I{%U(`rwien8BQ^%YD(wQc%m`<49 zC|Y_Rti;P06Z|d!r=);u@`4@1c#!_;yxY?oBj<@Oc0;Ou*NYpL>xO=Lm} zD)CXX*}mb;aM&n<=89f&_}ZCr+k~O>P$xF>bI(_$RJ$MH=V7~%z~)P64G6CIHb8eA z3v?C?ONl$i~7mnZde1}inxZWdk1 zI6NeVY!;ox($BWOi`$Q_1!gb|$IrmfoRc9m8`Y?dPb3B&nh-%>dM0mNg?y$1&WZ36 zx2)&AXxb|FSWz3ota=!MkNHb~1QTfRL_}fKu-u(jh}qOBxf`5_>{AkOZ3-Qem0$=jz)u=$uK~5fIW^bAo$v$BtwYG+(KNKlZFcSa)L32eVN&ABuQ3;q z#rZc{Zh8D<)bfqJELzL9y|J1IWKg`|^yY;<=&;i0A-l8dgqJChS~0ACm^>7(vh|rF zNs}TmsC~-l`$7Qtxpw7ENibB2+PQ5A4WA(P9`;nEta7TQ@AtM=gsMd;r9AOQrA`OemtIHntjI|WE=(HGx0TYFL z`K~8XZRu}OKS^kK%>w5L$hB=$6rA;T-ILLgVKece9QSvcx*A)3Dzkzy74__t-vqIp zFp@aP*W@^)l^Ep+YN0i*vcpKZH6iv1oi!CE zPS`(vT@{Xe7}4sQfEXi{I4~qn=mIw&PYWQtVG_8 zT`>;_oAU+y@4fjI)R$3PMxH=Dyqa++F8Vx^;Y$DqerB?jmqY)S!ZZ1c86wMk^vvo~ zUWugjruxJOBaHnQ#365INW!|OWb}f}j{oO&cbMNtB0m{>`y8zeeQCv!II1lc2y(G2 zsKqQyn!!EDULI;h+2ZIr%CS#omVUangPC%4lEc})ujUzUugD3rnCx5d;{3$=C6UMM z3aZga0V_KsXwW5CO>Z3T!hZBXZ8cB)-<HVEWaQ=9w8wBV&a`HEEZIO41hnbmI4o8(;f8mWo&q4oY0 zO#QCnVbQA=9g#cCR6^|aUA|Es-wDk4ycvtzEpZN&yV$)qoPi<*{GViKpLfTXKr~vY z)S8%r=`kce-hSBbs0&MDKMl|4f*<7~V#mWL_eotlsz!E z_N|RH-oK8$$lmVEW+f=S!nT@5<%Q}8r zV8cv36BiZ49}Ic0sG+J>(^`-P?Bsy!8xkA@;HiIOGT-7+I4;vZq=|JdKA70<;%sa7 z(oEjYM^NkJopr+#3kg07?Cf3IJ**{U^I+zmLw~kS{P9jzR>-X{TuinKF;|5InJG1R zG#2`~=X|V4094e%#^0a$Fait`GlamXF)A#%=F5kJt5Oq{?EDdfky-)@f<=7Hy6ko2 zfT!Au>9tZux_}$6FWpkkc|O$rX`9T{+jKUI9D7aY^tvg?6Ue^cn?Q)#i+433(1z9n z+#iP!xU(eO-$urXY8TY3B;EDHVsh{ME3}Cr#9D!~5qkujx}VnQgM?#NC8s{)DNlX_{3WUFCtUWGes^S zaniXC;kr{dAl!*>qV6C0g9mGQX7gyqxN5r79ecIs+z}aL!H*OXa^>~0w-*~Bv^M*S zJeRtYQ4N#-b+Pl-{6<<^-QLs(V&bDC0Vh8Jb191|fmVSciT97suAtmul@N zVUH%nzYI8gemHaHG#9spji^qc*{M6UZXs=JK`~OujTv|g16xj1sj>_I(UIKHcl%PE zcbWjrm+K{#li+`m#1G}82F05-KqMyg*8T>9TW;e<>wEA5jhB{~%)j-Yk=7T0ki`Yk z_bAa*O6l%nUe!)N!54=VsKJc~m$?+3`?d>?t^wCS2WWUJA472Mz6Smk{U z@S>2lkEEzk$Tce;B~i^bHB{UGR#)P&keS1X)(pQ<48i;8o0QT*8AF#9?yCxjs#idn zX@6Ogcs@|A-fYM9RVnFan~;Jcg^k;rSalLI zbcL4*$mcHGmZ3MVcX~HUg*v;O(o$T%ftUsD-=7Yc82I!P1h{a;zb@RjeD!Ekz-m*j z$nPCK_<`@$^XsC?WEK~VuHC0)^gf|vjjos?A0w?m;SUuf+22kg6AO`#Hq(i@Zl1Ou z)0HN)jtBE~Vu~amRHTr2pdNuxRd$kYu_ccrhe;$bnN*B{g(^vBjM_(%x zp>j6INvcj9y)M9K0%bmClBWJe zTWTF(FF3KGkDYg1AO-p57bC-}kGd#l(AuMemimaQ+A4SUbIZKv<|{1La#yoL80|FngN`jXN9H_30(wpF27d z$HsF9%3<6gJO-9uc|^ZvHABQnRW`Oxq}Hb31T)SuyTGh2vD1lj)kMLfYdJ9EzS$oH zXG&}fj5oo6ASg|{$Jh)?Jw4E$gA27BYCO8LYy875k|#@#BbC*`@%7Hp-a1{N&e3}c zdv2UZYZ$_B)*vdsG}3XSlswl=^r4xf5u;Ad$FaOPe+GpeRnea{s)+5|IJ@Wvlu@}n zLs@+cB`k?l`V}Kz>xH~;*R@_jLxB2)W3&anee@nRc>rkBUX#G-by@B7<}Ihn-Qz0N zcjd$HzC5gd{W9;2*!#dN98_>YfL%@RZzLip8V3A+$<+MDc2?F#-|v+jP0YO#kbhd+ z$T&2fV@ocON}(S5b0Lb6K4Vs>ex;Vhnj3# z0E`|<6pw9DN-B&Fc-o&7mGI|0!0J~2D*KQ1OwGW3UAuBMyv3+G>qiPAe@)uEKoQ5XOG}k5+0STOL``q8inoh=f^<)b{LISEJW>5+7Tz#i z?Rl>3ZT3W~>qj%~x#tUQ5m(0n+9=CrkMEGabor?vw4)Z+H33Qo8J%22kmr)H)xE@1 zNTF(++FPGbD*#*VFJ?C)T=3#jzD?d~!9d+}>MamPW$tGHyB4otr?~>BS(9kVU9-qM9vvXTm3vtBf+_jjs?>K~wH)kcZbQl+Fo2!^ zu_#pAcZrw2}c7`@v+Mbp__-TFoWII!B)t zmebal)*4|w@>8+CAtd%K+YWv9>}Z-nNGB z2l5_aOiXMG1Fsxv=KUM{@m9&?)#rt-iv5^r$@k>dMfnI%;!$**2ACREY`qPT_9o@T_Q|b_D;c{>Fk-lcBtbeS z47I43OLPOt+5}P!M;|%mM18i;Xg<~Y_2X>5wpkV0pgP`=m7zeTS^vAT z%C9BFT;(WI^<3U}wO+0ZY2&spyr_H%a&WpP?)W8q&I$`$>fpyGa=?k!6}NCqDQndQ z2d=3C^=eFg1djI-=~Yj^An$wF`wjI? z9VQ+C>{xH{gE<#arxL)DZ9OjPNQdRbp1%dHo zvyv3L60nw#X)>(R4$><Q*t36s!yw=jRz!Idx!{Lk4pbIt!9&!~!xE1YF1G07xi8uWg z7GsbP^Rtdm4b}@GbwY_NLFnK>VN5zIr5`Zq9`vdwj@zOD-ZrZk6I^$PFbEkqC47DF^Dc26KH1v(@3El&?Rm#jo zqGze7r!_nDN`Bt)|2z1HEm3g&7Ojx z+m7stanBV;7oKk=y0kZiuU<75`N$5TuR8@P(!8iNyK&0myB}x{$SV~dy%5_dTU4EhX>VtY7jXl0C z216gVIbamjWyt}Jwl{*2Mjk-nB+aFEc1ChPd>b$o3Qz+Gr{g2cFSe&jusS%6qi^ZA zxpI@#h=$S7Z}pdB{2r5OX_sG_s50Qei|TCFyehj_?M!P^BK?6Po&`&>r`mcYXh|K$ zGXITy3JPj#G{0+W-^_>{wT&4&9D}GW@G54pfCttvE%H(iWZWw8*}%v-N0=34H89-z zN>1^cM@gN@puq~lT}(+R_Gkcdi1%)t1Vj1Z#0Hb!L-`=y%~#igjFX~Tdd9-b|8Akg z)1}7&j@D&1R2$l2!-QH_egEHlJ6V}W!Pmbuew z3^#D|N%XJZn~9fZk%#h_9wv^V7YZPcdO<#(>*DInH`SN^R3%w&%VR)vUON7j zNm21H$ZescXhPnU^Gpq4Syj`Aw-iUyFcOpF;mtNafle%&cBQQ6wy){Y;In`8lm{GZ zw)Xs27S~Wj{accUyQTkv^u1ahtXUR+ ztQZiI#{ODex*O}?og?%TvKr{Gu#HesgV&Cf5{u=F|0DVIKL@$LCD_RBBg04gTZqbj z9lvB^cku9IX0nEZC<{HIh@&vll!`fD{LzAHM8+s-@%a&bT~WmR1(LoHKXscf4Q_PxTWt;gG>ItNPuioSlOri_!_g* zkaokC4nj^|V?fU1v~VNc_t21{Eamp_xVDGAbiEr5G{&^u*qDb(AkJxR18Qmh$dako zwQ%xB1qo1W1$pcheK!V5p5SZJ=(!IKoJoL@FJA2CY8LbLtW;7soAm4&72g(>3>mQN z0Fw>d8*6%b%Z%OGj!YJnrsh}sJ5S*RZJj0j;#3(p777qt+}MhMA;ym5FYj*fqXf;W z?eudVuMM~O+Oti+BwMX5CKjMzk&ky?es#;~bjH#_jkfQ+3*!xkLU|BfPehkK9eC{B z4>R7}*~)+XP>+Lcj3G(arcS8riQ5hake@&3?lbgdTt|Hkcu9ssSL98u+{ zLkDV}bXW>w#bW?{Zdt!8tGi04sU@zq4jJ@@F$}$CG<^{gDotC$4 zQ?2LbeINgZB|{dYfhVU(BpCJnaQMN$o!-VVQgb^hLnDFGUni3RLn;f2&U8ok7VKeW z*B(bGoN zP3`dos{;4Cq}`m=gMvr*u#Iym#+3=nrV?4@+Ht?iL~{61KDEj?5 z?HglH&)3}gRjiB2MLGVoO`IO1V>RH5ycwY_B3 zPz!y=lzsd!8oXvVD-KV+HLmHhFa96QRSQN09TLtSyj1xZrLpQ&RL>prp0euRRE<)j z*yJhMfiU@`vWB0Uf>Zh&LcD<|3`!x$z;%nrs?%cby2~KE=1nf%E2zoN0$AoT`cq28 z%$O+>m6`I(vXg>sMP&@TfTL}EaFzp&G)oUe3Dn*v{v)9wB{#HiZY%R8H2u&njnG!1 zi8OQBHe}kr1q?N}5L-{|9^g$ZCf6mcf)H;}u$0vEn5x)$_nF z((JDw5HJVE6aguE;OCJu-s)|wlDyA!(cxEgL@~@z(;v1O(sZu`Me#K9rz*YM$Q)0O6R)CPU%O6q-eSt|1`K!d5^LG|20<}a2k zH2a7aXLJ=~h@f!%-Pj(8u4*X67qzBc@aWQV>OZjI@F2upm>=YZqQVFXHf+ffv4pZe5~vFJJ!sOUHSDldalP$vGlNn#F}#+`Uak|K5p4#Jp_Lq z0`{SAO|E~zBX7>I9QS>B6NZ|*=5s|vb+S70Km~3i2Rp=h-TUz~$#S$Y&+;yiLaB!y zc&lvd()O0z)@r`Xem@rsGWt+(1{y;9_XC6hTjoty?cP4;`%#hOEC?(uh(m?Gie4J4 zvY>+fq5ri=Frl^3X|ZEeIr}(St$WX>dirsU^-u4UM}`mGuv|XfIjW)yoREIpUatSU zmDA;i7LB78b<@e3YR6QI8jBiJF?9Ze>rPX7RM}s@1%t&V=)-2j$oxZh5wc?C_j{xY z`U6(d_2r}ftWlp}W!%ypu2shCpdtLedn=E@{TpswN~v3?cCAM4QQw}qa(z>&`CPsR zcOlzn#rlmXY!Vn@g%zU<{5?PtnK-7B>}AKn_o9O7jMU@NG4hhXu-sOxrA}*Rt%qZk z(T!s=XA(v=t2eoIRzC#-EccH&w65%OM)6ZANl+VP9|Nsyd-bCmeT_c^?LVjs2#lG; zBdtwNVwV89UiA?Br_5@1J|9ceG` zADT1{5WB1Vti64c``Pi59J=gpORF3CGVq6}$A+=aH0ZF%U_IfXojN zlfK*?kcB}kwEds%)8ve?ixax8-+XrKjH(WYLbpcU6q%LWic-N|=_m8aWs_SL7UpUP z=R6Wx56ODS>bU7e7h{J5INsxUa|Poov#?n$%{sIvgavsC!3yi5^9Oth6DDDZkqg!j z3u7*R*QlrPl7X0A-|H7F+fupJQXk(F_g(uk)g0%c-@VQAMX=Rn*{0egPx&v;xU%a| zsdu#aX2V7d1mb+4D%g&s9dwl=Xh70dg2^=dxa(zJtG6A;SZ_q#P8-^vC1&d#emRXsd)P#bZ`wTB5aF6udNI{q$3 zFL0f8?bcNU#aAW;g6UIJsT+g{%hjf4^(~M z#0$aIzK{%EFMr?Y`IJ$6KH*%N8L-A~-xWn*hQs z-hWel5fi1}IN4ICvH{L6y+%=v*UZ0O*7($GlyeX_c$1b;j4?pLqJrv?SowW;zoHJ) zT`WhDn;4hKQMOAo<;|7EkA*Ufo@?omK3*67DNA#tFV*EVetAaO$-xAf(Dvk$sxBr( zbTK1xw01f}MH;BQSEV>4?uRS367K>#8hYj7aq8Zh7&0N~Nt1asKjqNDPe;xq_UPRAS8mYS zT0j__*V$XKvG)!J5}J|~h0u=AfM{^{%<0~{G)c*ydHYW66r~s!lxBGP_AnM(JAt zeH>A_6TWo~Vnt%WuazGkRD(&gRpJ=~+`L)U96n%Z#iQu@w6T$U<{L)Axk3YlS%vOL z2lQ1Nfv7Fxr)s{{lZX7*AV-v>M60@WMi$$UwMnny>|$nQ`cl3_`r`G3s_L3QL|I=? zr0OO)QM;BA_4692DQU?9U&fId2+9jfJO&lGP(Q8sb5WJf4B0Z`HikauPn=={e0M8K z>;Q0_IYUUa2X)Q^)DA09T!AqVa%HPT{Yafkf0s-P6)ohpBA46|_@&q-OU1p6K-d@v zeYGa2X)|~!c+8RBk5oN{!--cCYRoek^3}M_2^U-$S#bu#CU+c6$%<|5ZqT@FQ&P{C zD|9YZIj(>G$w=>Q zA7x$msPl;xZA&;yuT|ZlmQ>Z1$@2@1dmA!c zHeL+;!A)~-C?NrCBex8pupEK}&N$tV?pH?m+ANxqKKk1zbkQ^fKeYcmgztIIWt4(tlV^2AtqEEVbD6YYk&}@Y`C%1U7C6>Y57a8*z@6!)iaZd>0e2?$60$wH+UaA_COOJhvtqrf zhgY24wLo476~Gm`5?MhksU-z%pxFaa8~eneE|b=k4qd>n83S7NPg!x1C5Zwdi{1{7sn{nVYH>3nFwZpoC67ep zkUXC*s7kiA9)Gz8S(nWSt6>-_D|P($&l@y30^1C0|WsBE!Us*XK(c?Ca_IzeP;vx2Aj2V zIFUBJCa14`w>qY;PmMMf{Vk9jxUy4Y!dz}hJQO&ZJ;2rwhn^j$G+J(W5Z-0dZ-dCx zxN*pC0vx(; z26$L;*_aey@w);G+lQT+G=GXyEVa;SK@3Y3Sso#~s1H4&_By@L=Qbr`r=8foVKKAC zL2vl%Pe#JjKT*R2XA6*ixF2B6H{url35q*bBSl3s$AaXoKb26U+8|_u_}-5L0v7P^ z{Q=7NAWWJoVUaHLUvb2DDY@hnFHhRo2-?`1Qr0g(*%cHPou&_g_38cD8Mei43kj=Z zE;fkoLeolY*yiNjoY>T#;}q6ZqM{17v^;)A1sdp2;5W=sBLR@ZKd5w!Bz_5~M6E1= zciHb9RLC2sHj7JJ+(LNO;cXg5be_>z(|fIPTcbTz&>nBLQ@1nNEs$Jl=W4*K{YC!d z9frt++$W6)$o*F>V>?}nT^?PJa#p{hWdgEx@nv;JPsSru2rAOU93R*jTHV|PrEb+uKTX5dYr%rPE3i>F2@);aiZI(=2( z1qbpB95zG+KJ8dPg*ryLL6EKdH{4fpskt4VeYvy*p_(Za(uvJx07B%72w* zv|Kjc`bhV#o+q;<-rWqr5tyJ^&CJTvW)T9i#j@eCdVj~0GB3%2%@|6#6vn9vYAb1ug6FtgKFxDAt8S zGu}H|E;h~@L>5;$~Jsijc&Ok0ZFn?lWR1vf z6oruNWEm=iEUD~UmXPfGI+n68A(YB!u_dOmOUAw=jV-b-BfHF42V;6p>U&++?|Gir z{d=DKzW=%ZD6d`>^Ep4~dq3XCF``r(_@GF%?iWptxoev~gWJ6=YC-6fj^E{eZ=g-9 z)4yU@SySm!k|4RkyTO~J%EfqVy0j`o?b>Ae({JA?j`MG^Y~01{sV&rmqCK4Q9Fq%1 zu8tVaty~rtD5QB$$c|P_@?W_1YCqRy_%;*K^y=k+JHc$>ONkL-GS~;3x%= zSaL$yj)fFlJA=Kf-q0GjUO$(ez7m<{v{70$e&q@hLFD$Cr$vJM{zALCBr{6yYSP42=k zQ!NTF?-EuyN&bDcveOEQ=h`$wZZ2c^ON)VhT_=4x*7w}~IPQ6ZM=HfSVh9XKvvmO1 zNY-xK7P3d08#cAuP-Ob~UFqoOMs*x5W4cQFA_FY4%L9q;1KSRT0*A(ESviGeTi^iE z1sfwOaNM3l@|&I9MHhDW3Dqu$x0KO?adNL=%GmXcI?fD7#xHHcmXsHA@LJ1q*gPH4 zb139i?~3KZifixeH5|K3xP>OP zuyu;)HC%UNPjO=Xiyp9-5uWpe?6fHib@GYP&e`E>1>ICw$qaUV%nfXBZ6=w+5;!^l z3mqW>9F#W@PN$Yq(l8{3zY9snv9|aWHpYib(zy|Rpk@XYYWfM%qD7tf?9qb}9O8&*3Sf6fc5PFNxu{98i zmE&Hw&}g2B8TJdi|1#(a$J*8;d3FeX$DSKlCTubwhaovQ7C9GBR{xy2W!|$%F`anD zSf(UHd|0~hX)8s_Q#L`Y!I2qUo_b*14`9L>z=T1%b?B#;s)$wugQm8%F*A+3!y)@s z>qkFb<6q~SbfpXLe$JRAAwL(!aFtsS2)iW{@^$c*RW;W+f)+CcKb6RQJ#!*0qUbqS zs=RU}w^y4eSalL-*r!fKPWm4n%&NDM*dzn{0$HxR??&JH$&yn+zVeP%-PyPj29F;` z02t|kVcRGg^P6fd&3AH_Zt0InN)a<0IwKG5pVEaH_?LW10*rf4&F_Y5-Xs=JC=2XLj;G$1IVP8dK<>abakw>o4S7>|`;FTUCrs~82-1U$pb=D2}TCJo7f?@K^-7ySLU zkf5I|4`2G1UK^;rpm@c_y~rAn6?}RgprCFnTeWJb7qoY6sLe;hln7|pzqco;*HT%i zPQ@+D?b_`-LdU!;ZTHk1w|B8ky;387)y@iUYSK?;Fus-Jrzegg=!1-;^Fw=OPL4Nm zuSTD%sPsLenYw&UKXB!TM@=t{ z+~c&bd`w$Zpk_GN&)B|_vM9WywKfrp;ERHYEBoQ4>d6I8*~hC;`jw8Sc9*bWat)4Kf^^AOl9Z}T{6Lx`w#x!Q z4+oS_oPZ511SbsVoIn27Soo^ykkkcFn>4PHpd^1qJ6~psKCn$e(Mq!Gb@RbQU#dgp z*1NjCx#}GoSkb}Q&XZ5=ekuDqp)Ez0L`Wm=NN>x=Y!-pgFlbtmFOcbc@7d_4z@aoK zLu>7AjFT}1ufOSKrO;7uB_RI!(cVhhrUNeC`!c;IzwWxTTuzxt)l&fzta^0NenFY2 zwn0x>{GPBw_K2QSU3a^L?dVA-zP_LGN(K1U6~??p)0WS9uU~2An|&SO03lK3!Nyr% zdddE|xUKgTtIvVi@m!x+2lm8gnYpLnnDub5IbKwqQ`59h&xB*z;9_BofEv$C#==5c zH^*)bK=H(HWp?|p)!waXDEGSKMSVFAb&%?5t%;qn+5rJ!y8~TZcaCJ* zM>_7s@~HGu`Z@`Fr1NJ?8W+=up1}Jyw4zrNz_e+DAAI>WXixm{B(__|J#NbzD{eQd zDpQ5E2ci@LxNN@)&ARgXLEf`(wl6z(C=1QkM5W+XoWxx0Vn!{reHi&b+_C|2D|tqh zTP;Ku-#JZ}oLpAzT5RpCUE~YMLzUOU(#oRh+j#@&Rk=R~h;jT0Yr=HR&sidv89B8FrBp)OW8N88_Ma5-(~K8kz@ zPvPH!WC3P>Cx68JEhLEnPI3)XY?|VUEW8I4yRF_2Yn~alyy7G2QlBK{{{>i79sPoHgoIrT;NO7w3FL-g0n7?)alzXH zkzpI@wRJa@nrDj3KUVY!v0>%ppnL+b0<`$w823*q07|xbP-D9gGMZXx^HcXC%XDl=j1$Co~Sv9=fKt=)Q%U58{3OGeP-vzlhurva|_$RubM%~PNN1i9GJY8B*} zulUjLi{?;ZNvu};wSVt~DdFhHh35 zkOHaAhZn%supz~gWg7uLiH&z4WaI|DGAYS-3Q^2-KHtTqM)ScYp`__i(7%;(MgK$b z)SF_#3bcm*3wsWhZlLZH9^N#PNHdkb`VhT(BfsC~g?$G9muCCqJ5YhyHp?M2RW0!> zt~03PRkVh70nVmK%J0Vcb|sa&u=OKhinDc-#E=8^2XOh9*`2{78rIpB5&3YP@lU3o zT}v8}wJw&*)K5TQ9Hg!TAWk;G)<9=pD+_EUpFpkXOcJvTI(^0 z4Y5BejDY6QcFP(pW~PY!TU_h}kpbUkfv>CfDkerqeMFuuwk@aqC91sexf=MtW)hlVdN%~* z_awVhx^Bi#qLatIo2jlfSkrJ;QCWFKp9@oBCA&zHK{C7YGnimy$q)^Oqm55Nn}=_u zCmzM;HY~%P?eJ_C$L;%T$7_%aSMJY@I;zZDl+JOCrezzD zr#f-mTI*Lo>gaykb-IvoD@*Q6ri~VhFN`<)HE#k>HAoo1@a+L|&5cJ8bg=xuzOAnC zzceTVF!&1$oE+!q{O29>P}%kUr~O|wIq^q&X7+eh>9H{gP`q(wHNE4)oQ^V zXm|Y?#m()?$o96AyZ^MgLlox|t`HUF{t5(Tu%FV*iBTsS=acV^vbCSYwMuTs81jcN z(?J3mT9G@vJYdpS3Ew{#xHIX4E+(HL9$9Ke&@L;==8v_ymX$wfUc2!9c^mt69^KK% zY;yJ{Fg1KGnHQv_H4{V@7*JqR0p(BA#11?dYy{j=JjV)qq<3K4w?qd?Tm~4^3|tW6sUuxoKtR288vO_%5C`uD0ig$b=o!RGedDhpg!(S*_jPT-{xQljF3e8BDjW z5-?P?Iz|2iI1~JS&Ap3@Y<@TXrOpatr-Yk_xB8=q24K83Z-pST9zHk^YEJ-c<$o#$ zlc`bU?$Mx!0hsy;s1?{y^lB;6Lfx-Rxtq#7qZ*!=TO+>N2E?PCd;eiN19C2SZFoJ>+aFl3$(?4`3f~=6|ihhM_zH>BjOW)8le&MSEw}P z@RG=@(BNsvTEC(Vc+nS;6}bm(R9#!*bTfVH#-OBss-L6zkP! z-~CkJ(N6!>xhXl6Tu1D(CXX)!*8AfdLhG$hrv$$& z)2sfHplVopB#-%j96G*_tWyd)zi8kE_fI*`aqK}GCKzUW>Q0UNYprcwAAGNy+Qk*X zC{+|0ruX$&Na>2@aUZ-!+arrE=1Z5%Uy0N3`{_S)XBemWVOIdqaTu!at!K>&n#hujqsUJl$D0oz*$rp0kdlu0+Qg*x{Y+Ij9X zj_kKC(9c35j;ZqOKmj4#?GVwu^T_sRMaUmlK>eB7K@r8 zxJCMi($hSs%vfQyOFFLqWpR?rE5e9Ydf27bGKm1FGQR?ng#P2Mo;G40H3AsNAqTQ-_X(Rv0j=}q^XqX*O5pA5rh zWUXMpQnbXdCOs>?q0xqX7zn0{imE)7Q#0DL>8IF5D@E=<)1b@XJrT4H3ozi-<9m1pVS5yYLHFV)`$P+X@a4bN@yfPYum63y!2sw+-Z}cLHHEa#5jWwF< zvgf9}rWCw8Q-C!OoD30!qPYc?4)n7aPVa+nF!N#v`9oK1pG7qIbKD*)PA?Lz=rY;b zx6zSuVFj@8rR-|mHolLwxx&Tsv|bLzH+Se+F#q-}Y)Cr9%uAvhxzQK1EA8Baxrg21 zDnswV0Lm|qBtw**{)A*-sEMj}%(BQvDf+36JI+WjTQ@NC@n~|Tg&orqh#V#b5nbN- z*E#DLsSbH~xSR-IIZpNUQ|u_)BSiemHfX{|4#Sgm%TIeE;G$jyGt>9xs9l*umkN{* z$PW)(Y7r8IA?-lXlu4PC;b;@~a68YRj~~IlK-l?b4t=WCp-=sX zIwBVBO>Dy_H}AG3{E~igY+Om5hNMuC&gaN}+Xxl}NDmG#JZNozXmT3aU5mt@pEyt} zWiZ))jG55^5PmRA0>Uz{YPTmdBiw3FPRW#i=T8{7)ZTh(b0y{Y`D0yUYzdwk?2~q$ zA=G~&@_}G#MCiZ-VPy)vU4ndkd;MpK1!1Dz6qQ&OFC^ulQPY>DZsu@07{BKUJ&>1{ zZ1|Vht=KNB-XCS@?jvOE#F~+}*d8c2djp)8c6U}M^Ba?y=0l|-%?Xsp^INP-Y!lj3 z%a-Rm!av05LzzHI1M$4J%k=NW;QtS)?SJ!cm3=_Ftc``4V0NlO9E5{cs4)Tddvuoo7NgwyE?8N|J2bN?UcqnST27jF zQV;QWc`#Y0+FEp#W_w<8;2Mm%2Zz^Mi7xk=a4H1(dKySlc^3URwYpi-HYGrM))xC4 zGKv8Q;$fxG4SQ!y3Lymhxp(+&$xj*`yViVK8|}_nexSt3+cNbV5}iR7C@W9Bg5**k zmaQMQ)A?eR-Bcop<=G^Rg)kEQ$U1W%c5i+IN%vI_2-x8XCF00IKai6k9pYF}<_`84 zM3ck-8CYnO_f(n@R zFHr+YacW`%A4+KA zLFCc`VuKnoQw}!m$b8RkSBFrU4KMusK;q-W*%q{!NXq@o0o zst-H)pBDqjV!PqJa6IT~F_yNM#3Z>{*_{0>W4_}bzH6Uos^aAdI-#TqSB(2bJ_4MA zX*Y8G*&+QB4~Eb-o%eEbUw2Tw`?XrFpg@+>BZ*~s>bFXs#|09seXs}Kn`ZAoejB_} zAioVzJ!Jq*@;K%%%h%>lKtK>(zZ$cheW?%4`~CHe0^|2i{A$#xd?zcRAJ}-4Ut;CB zSep?^ft$&3tM`L1JsL4Jxi-*-jZx4%BThT!&wi+o4#u4y-AE4<)c|D8n(St&gInF< z2?n9^jxcJUqn~03*|<+=`r!LlF3)w|NA!z8R|}c$PP7M~T?#bX5-W<#||f z8}+~sYc)kZ^3UH8a#|b&=cbJCZ`Cfe{Qhn-{K*rXKlCyCq$^I*FSn13Al3>0u&di+ zY57_HC!AXauSCYSR{+zb!TU!Xuwq~A&LWE1Wz1tk-h?Ilz@s9 zVtw#PSkm?f;E->Sr57+q$mbW8B`~!;oAW(r+l0|3DM6Ot59KDtfin>d@*NA|s3&_y zC83>GYd2gm%j{z0gt#!}mn$l{ufjO3#GNPLBu$xOj|*|UAK`8Mi42KWr^?~S7YUA8l-Dw2ilnwr^4gzQZG1aN=x7z6ke-==!~{&TvumEUb>3VuB5y%t9GQ(eFnW^+*$ zekI#oSr6^EpN0TwFnC7TdLU$%i$}=u@08r!t;LNEMd`0)p({u`DA6vu(#C11Zg~3H zkE4j2;j0$u0q_+~z__+JLTJ65uVEmdFjD5SVW-T9j0Q zj{CD}s~GiRJt%L|Q0(^}JW;j~V^aiw}5648KgpG9LSEy1?V8KkUJ zq5l64VS0!6Y+`faApPYo0O*_W2}UsuZC)|4sc6{GacE`MAfL$h8| z;C^zv*t=lG_h!Su^)>6A@J+pH!(M}z1bggCx|G268IgjC2l?=Rk);}L0(d^vgPQvv z&qoGsV(UF{6L-kctD!7}{gO|3GV+wBla#pW6_K>Yi*4d1#zNnya0$2x7D55~btpTs zqtba|bEyD1U#h~Tw%Uee)AW%XekLb6tS+wJ4HGZ%H{Y(u$>lqt;3Siy8xr^6b~P1- zJa*btmzPG5tyt;?0{q?&D@J8rbW|w3VVYCe>0FvVWA0m-W34GPQLcIrX#F=`n67iv zL5%-<+YjEt3i+TwqQO`0dxK2A*Wm#8I!5LSb{i}gV;OSFK9j^5?{fK8#T7TU#Mdc5 z&R1Hh>nV?}TO_xx#%^0u@5wxUqe7qS#fHNE`?lu^&!M6)|3z~VghMw{(xtyQzWW%0 zB)0m;9FD?qmQV2t(-|*u!h2l;77sbr1iQX^HNMnur7kp3Qngl{lUKSZxy}OWmSy03 z|Hv&Of!ZQ8pZJ-OY}~DACKEqJ#62*9q;)9XA+_>e99DoZ;If~4wfu=f|9G|(0+y^* z0g|T!mEImrb5Tf0G(NLU2ZuVoU$;2g*26Z!_Ymf}dxT1E*&i(Z&3*>?N%P0W zK_2TRGB!LO1Z_ST+x&~P*y8>@PYIf@h?J7*M1PVAM18P^I}ls zXtM4mE?u;e6rx5ZlD#Rm+nE_vqz(`SXa5l1{Ig*Czv(NFILY?SLs?p{e3kFo7Pon4(SXzQ&9?TIhDG+e#lE>EE-9m#O~^E!^o zIL-U6j02PHbKWC^&Z+9+DGLd{Xra&hSpy*OW1CwAX-;x^nylJe*1vZ@PC6R2!b~xF z%Bs{Rw_h({`lbkVeXYmP@?FbGnf2rnxdVTN{)g11_^_y^JBYQxc} z&*5!Kp|yjzHC5os@S$wWabZ0Y1VQqwtYyR>!>SOt^NI_XL<>cq9e??d_hPd2HNmfb zOe%`Fgh!ICs#~Ul6iz};AfY`=%)}cEwwe+L?w8B-U2xE^Jm38^!w_=RT9z)~v0>R= z4ko|(RuDReCEkt2%sc=2A)l?LFySJ>$8%mNf77VSy363DU>AVyJMDQnOb!ytzghAz z4ub#eyya0=^2x&_AXZC8dp^b2Q9Sf088Xa?V^0=@+%7n#GS_DmLo=SbYtaHD}^ri^y_Yb5@mzr$-P7fJcD)da5Y&eIVsR-oAA>U3V@%3-Elj9s>Jx|E`i&XpW`j+s>J2a2^U3Y$Z~&w z{QLw`y#GW1=vTcCHhV{OR_!bHc1C@Cq4`rY?|LlDEwKXy(yE!wjWc2L$%kik@6TD) zpreqKD~g=x-;n#3i!Ozujkzun&3BArDJ2=Je#yykP1@mgsdgsFC-ndrxV!Tm2qwW8 z((OQay&D%X&I&&4WK>h!wV(Hj{jBzZM5*oF1BucZYd{bn839qcIgM^KL-1IdO|%3L zhiqt_aSFyrTO7M~)Q|Q+tZ$B&ki<3Q7|pR9SRF`3j!&_)~C6Xexw@A}L=U#{+VTKw`PzaKE zNYcFTU}Tmo{%xVMWRO4UDbJI(dpn03?#O>1aR@ga*SgzUh>yQXI7)gnYPImee8=_X zkthrpmoLV+YsVe-nJCcs?=TiR1QihWl8nLxwTin|x<$L2FM~sJwF3beJS^ zD(f8R%NHW>kOS|ys?1C}TZ(7--bJchEm%B>e*7`!t2UQ8(@Q_DRFw{a_pS2SuPFRe zVm>)a2L!M`V#R})yBPMR8dS_`nZ-^0?A@W_tOe6q_0+3Ac9v|e!X^$n|1vw)n= z>4$dhqeb=+8o}Ii@bn;{;i!$EpRcIV%}U6qDEt5w2RM68<(YvO%#hay+67#0%@dFx z#BI%_fknUanhKGDL7I%Uj#kg76yf#T8)s@Ofo-|{+qQ(~CFHwAApx>Yh=_`88qlS! za4VPmWAu$S#_>nkV@CbB^P%rMCZ992eDI!A|o@4cYZcV8~I+g!d?MQN8R->US% zN`B0Lsm0GvVpMZl$5=7J_`xS{^R#T1YY-sKn8Q#hoJ90@`3Vd3A9A@5r0D->*d|D0qVYV{iD$k`}>r9i{ z&k%F}px@*k=0 zZlFPF8|Cl4Z^3$E80biHp*X=ymmD!~6pLUUO> z$!mexs1w7b=rgTG*#DL8RAl~R*r{2A$>d9q>*(8-Cy^P7xPv9o8R#O%bqBtIIvy~4 z#2#vz|21!`Yh-Ml!1rLy$&L||Dp`Yt^=JM+LoK?`ydOu#tS4+Ltp()JyP57k7ZvEw zf2KV+{H5zhnc><>1y1Mc@!`e(QZrr5#mhfN*$*Q1y#Hj9*HEuM42!~d zokVsI!Vj;2g8BdCg2Y2Ac6SfZvK=}(=)N?0anHq z@4?E-1iF?6PL_x{>*Hx*vOF^fhRPM47)oKfG|u~oGQ z+b20X9IDg3>P5JYs4TiGr=NSVC)1kwL1{adc58mff4c0)S0yN`94Rpf^xZm$o%Jx>GLp@nRO00hEdfntoRymY2t)RnI&jvM=}+@y;FGW`vj5*`(L5 zFPVG{7H{&W+qJ_A8z|vutqp2CYFN9~bT$!r=arKz)|Pe)z?M_5vAxNSd~^)&$(r9PI)G)>067WTe7N*+k`PhP0Z*P?+*y6o?{mDbgJ~z!fZv3&;QI5_dZt6$ zTo)jn3qAjzZ*&u6k?7XIt)gj#eb(Fk=P>0m#ooo9D_}AE;{}%3Z@rw4yxQ5? zQ<=yAOj8374~Ne4RCDdq>lYlnl4UMmykVsK$L@o37K0Z&MbIx|NDxx;K&%Nw=MPv3^P+lEf5cZEvHOH|3dNK5X*yjW*8N|CqFc__-NFb19$J#hZkLL zEY%OHxhXc72mc6!XADNs4n>D3q~V&z;WHcK%UqVj|qKmkTmuuw`b6bdsX zO)%Q09mbLm1eIF!VWYQ$tUeoe*yu(hc!yxO<&iN=(=qIdo>y7KwqYq!6moV5c z@X7Hl)ci{m?_ksNf9Y3AP@A$?Z!P<4nb$>~Wn6D=h>1z!6mB6cBR~7g#vmLKC&nz@ zx`;CGto1){$=X;04G&y)Oa!u1`V*3v0ob(K~&DL#ys)*R?bvCQYa6ET~ zE2XO4o{y4o!BSMV2_!=2)W@c2r0+jZUh!{b$dd$^kp7BIgy>b5FJ{hfEK`smUfDW-~T`dv$n&@>Ezot6H8s&VP-${{}%d}JcoXOmW4=CLWXA&Py zu10fYV79O<6T?o!4|;@7ucnT@(Z5E23#TNG+VU6<1HJE8%8ba4TH@<{V21kwECM9q z-;juj4RI6X6D=nk8?l;&LBzrq(k};DwA7$81;%sGsUzxkeI)?C7Wq;d(Guw_-iFy+PIAEN*fN7 zRF(kULJB;3zv?$+L2?kwR~#}_40d_)-;A0p_NbjrNJ$Y)ou@=xU>*=(7`Ok>uHE3u1%%d0-;rO}hgF15$#a5(gh_CpM z)@et@YC_&a`F&bL2;()YZ2o$hgs2Rueb>=Wrm7SZGw0WVMZSPn+;9Zwl zGlz0iPond!kA1bvJIH%4&P*gYa3;&OlNO^%0^{!|$^ zI{;J}?HRaI-rM^qQSlWN7En08dG&Yix;V(sIe$dcC&+`Pyw=vMQ~1Gco;TS?ZNPg7 znOxoFD6=9l#2-`=^ht4Fd!_gJ%-4}0e6&Tcbd1b~WK7u)J;(X>dOYW;iz<6~*vW&_ z6Zgm(=o7yoe}~F<27lCDAl{g=Vp$S>yO{B@cr>nHg)-YX*jog4Gf8qDmTQM>jKCvE zdlSgX8^Amub9EfxpemLoHdm0KXW13}(<;zu>(sDIf{fus{!`rmZ}jNSZ4I`TfX&tV z2}<*^F3$l!@u%k1*YXA9(|T06C;$uMxxsD}J7y-#-)I3;qi?z_lAhu<8cai$Wj%&Q zwR$evYvx^ekbGOoxs1RP^{mj*FC(x~-Fy4tgYmmZUQwhDDbm`?uf#!!2LG`FuI;wZ zQND6$SsXKs*PAREA#R=@|dadCwrzmMa)vt_)~y}jz#Ap?2$W6 z<;Y#2od7UG&SYj*AUr?32N12vM5CaoF6ySHETy`1u*7_-VyXO=kL}o#ZCh|OK#K(h?QSzyIw8s4MSJC{NBG?}Wgk>uA6kw%LO-EI4pa zl1Mugkw_5RtHA6T)Lu1pC;StuJhQ4+MyuAA}ybw>(DnkqDhY`9+A zmeTl^+^-ee%6z@2v(5?eds?=>d%$%p2DJxFaYX~2yOc?c%^^CNb#M8XjWjsWM43IcH?Ga&r-)XP ztltJVi`l4z+=*DbA%<9H6(c9cePST0C?7t|KNsRNbP5$+%lFc0P;|1Rc=dFaxsDg} zw5Kt}g9}O&-xOT2fIa4b*db~L`MQI=K)#LF$PL-E)SO<01`DH_D@&J#Uu2^Sv>G)h zIpS2Kc)1?)TR;D`{Vk{)*7>R~X%I8h4;&KO%xzytE29bc%!Yx_m8W*Ur+*!{dGKKI zLsd#?BMu>>$F^$uSv2v@uGhlaNb^=A`@@&QYi#YUqk*>IV#`X4AeKOA0idnj(A4WJLgQ;lM~T+s?8empAx4Mx zfT7fA5OsufLm;-6&UMg;PH8`7?E-s;$d7N0UjtLC~q-OoyM|Hc_^9M3>{1A^QgJqmaa4&k)R|Jvh$iMay>Mis$$@)~uUM zGX3ldhPUn_A_}DmEfC}P)!EK-PMi!WFX6f)H&R2t_=v64T z!jBcfTvlxzJ1%@fr1T=!nI}W42>dxHA%_k%95wAg*ew2}wL)nq*uiddTU*kV*H^$1 z+ijFGh$i>T$chuEhc{o|aTd+Gnu_~**U&~|s>{EV&lKi83`i@buS!&n%KUGub(7)MW zrr5A*HQQ??o}AjrTJ$m_T-oR&d2(b*`&d5(i@qrIs*D|sJF1&$7^@r zpK|dGvui$e$`IT0gnFkU?2=WKk)$a}1;01a#eaP`vDWVO39Yhl6aPi(RH|R}!Yg6I z)2N4AezFHU_z3RjV!~}m%CF20R7vc~lQ~&?mWNkd89^i{Zo`><9%MW5U*nLQrq!gX zGp*>!jug>_6}wB%BY57GxP%HU_L*NMKM|vdpy=Lqdf1L@e}VqkN3* zvo+#q(v4r=J!^Z^)BO@{wyyB7b1TyR8Rs)M>P5ekjSF;LU&<~t9Z_MWOm6+0^)K2N z2D<&ClKgm$?v>ZsS0e)QBw9-@x8FJ^af!d?8TXc z2csuw_uZZ6&OB8yweeD1jp|5;UUoevzC(n6J9pf)eNV9X%_G%)&Y|wbeT!9 zW$5&9h`Ng0=8?}fwMAP`KUUWgJrkX2zKEh_vq&x3q<2y>`L7k zqR&d6|BG@ezO(ca@1{)jK2Sy|M4GqwSWz$k5(_;;3=usYkgY(xYyDd8s{6NzhzLjM zifZpaOgR59Um4N?&Nt{&G%>~{E5MYk*jBvRJYA>hIsH(Z93Xu+-p_k?;T17*YlzpU zB70mq&Oq7lkCywZFs1D+-Rh^&;6G|yRCk%Vw$1&v?JKRyLMpS@vhc)G1HxWdA!dqj z6nC}LO*}$yhA;Xb-S*FM5Yssl(!;}fG9+|*z$nVmdnuMLXZ)I1piiKiFu_WO;i^xw0QO01IXkT~eykO86GcDVc;@_A zNJ>UueLk>=QeTziWm*O)U-w(N?S~-oXWf38rB}M2+_hHu;#oq1tZkmVc-F3Zo!kD~ z`xla9q>5Ekenaf`1XAtWIGaAS4N~gEVvRn8cCs>|lcYKC2*=m~$Y-+0@Yj#6%qgQ` zU6CUK1GT=NledgMRwFs?$#&B0XNW4ja`IS+oRbWCeqPm(oNlhaG#WU8iOs-E=rbDVh9rU62WC`0~+)2qw+wstOR8YbSsnZRQ0 zt_D$!+cmPl)WS3k`_8-<9XzUOpd`0oB?C$|w93&C>Eij_$k4By z`TI#$<++b9bmL|z?iva3d;y%@Pxj&}^UOh+b87pPmnJ2ly_hTIgW=6=5F6gUXtC^h z=UOgmF#N3%mOX32Vj#sX;-$hv*fTaC)yOA?j?3p7^Jn<_e~&Rd&o_MSm%6TUUY$Ss z)o;kXJ(|L@;$v%U+$$?HFYcxUp1#8~y6=nnQss;h?T^iTTOB(KJZD&sN zUW%~@mC&5yyRiUPyROG#|meW>WsM)aHd@I&C~j6Y4J-D$jR$4oRwMke@`Lu zKcW};_us+lr9dX?O5|)>^DksFjTLf`|QIUwTG1{F=z_=uuMr*_UQxPQ4r`nv#iENxDKX#F#p}ifNz@T^XOkM7N0gPnkk69v%?H^tGnFXQyzhyYV)6PIy_zGEV z1?Y@}hwK)FjvjQ=a^K!NRbz-!+IlHrHasE}VDHr!o{XKB7YMf$7id|1*s4A@r3pu2 z_Mf_530nI;8Vs;BsX8UUA=Rwp z95!3Ya&of#h5m=`7oZZRf zz*_*q#YS~KQZKY_XWr<$zsi4m z80~T7T795>crmrO3LAj3#{&6{hx?!}a&Qx-oh?-KA#Bj_r2aDfV`|u_GWOZjfDIi9 z6MGvhe@05Fqz`|pgp*6Hbj}EtYisx13)-IdcBu-+-yiQr98+@!z2**rZe=bpNFy7d zU0<9oRXbgouqr^jye-gM8%<_~HBRg@=Ykr+q7tlO0WLfNz=!7chY#&1Iuo#(q9}rO z@W(z(Gxu%ii0$!gO;KxzR6J@tD-Y|WO$bm5UsS&|3BQewCd0uR)7}(z@Axnd~6ECC0rP^Sxg5KZ-C zLmDq6uecThJ}?!=5ech(TkP~s{W#>4Di7=E^mTIjg2d9UdEoIu9x`K|NT^5kSo#Rj z56iZceQB_|G_QvY~(YWVCtE8Uw3O54ucT0TTANE8hzv zZCp8Rx_mf9R6ia0GRLmyz_xj&PA~sp`7ei|A~(AiQ_#KgT%y2zTh z{%*l|g~3c@;{?$NIDp3(q6lDqnO;C0b`$}y*CL;+X{89-f}3qkh!`Gf0syAnnfk=A zTvVYdkbbtzoij|b@6PGCxtDV=K1G~S8;Z%B0a_-$*sCMm+!i0CU$Tf9x4s4ml~;;3 z560nPWUblg-w>DIkd17up8Z_ROo0yQ!#@ITJry2MAJVZ;{)X(}2QaJ6RwU0)Xee<) zozVJZu!jE|hm4SD?WD*Do{`5)7d3bzG^VNGOg!bQ2!NVKf)-&_18F==;Wy+I$q|o^ zlJ4*>N(l+_fbck*v?irgdh@^Ei4L}-#mTp`Bk(<;px1+|mjYNx`u1w|D{cdkv=iQyx zyzNqQu+f@Np57Yd>#VALq4bphGU5!CG+rsB|GpL~8wU+r+Hw z&a1uN9UK<2*Go%NS_gvsnItsKnP9rYK*1HJb+CIeN%A1rHigpm_v!X?EG{l&2DLfH z&7K|!on;ev&#qk6jt#Kl3I^9J6^#ZZAGlK+0Q6V}##L(m2y(CwW{<#nFfzM>&ySPB zoO0}0%#tGC7i$uBC&>DE{77yNWRgcmEde$oJ@VBRBXT2rPg4*&G=DgfNDX5iRKg)x z7kFrUzr8H})}G+o#%wK>TKOs=`RgN?m&Ne~%j0`y$hOKT%tST$hW@ctl9W;8Z^&Gy zDImdQL=wKt*TM*9pwZNSu5Y8y%f&Z#mfz2@;4b&p3O|!V61_YKSA?H_z$qOdp{>B= zbK~k`iVb)(mdjtGN$4m#PPES;)o{EbmRh$Rf>40*3eItRdeyf&gDCkB1G>!*g%Efn za2s0yU!FE-;OT*m&>hebstHr>Bl=9E82n#fURM<37;bR=X^H(}oW-Q6_zF+4NW~8? zQCVyW-rM6xwzb6~M-P?SE#@g4!K1g=;M=0-(q~au`s_2*FRUrOx&KvKiC8>j6fIKAF?F6SWVr*t)Z)|>1{$_k_?z7n^ahglz%$zQ0| zTHNZpD_ybHoG4G8zuB>?hq!WrXkzzrlE1)z10f9&hMG%o9AfSL0ktQV|RZ0M90xF## zy-6<#B?JiB-$M7^zu!Lh+P}z^gUrG5q&FxmkfU;kkJ%!;5AXaCcTF4|xh~&cHg+BN?=Y z(6(fTW-->~n)-@#JXPj{bB}0uy}Fs*f;t{NfIL$0RHD-JPtp|pHPi{olKzQg3lOOa zLWw?JyVxgOuV%!gvk-QiON;k0AAjsho7}Q+H}WsxYLE_}MdEIeZC9`?G*Bz^F~F9R z;0{5e6~dQ!9=h4t@G(`qO3#F*WsMoLfMh8~##HS2`t~Hlu<2OSgEsbapXiUr>M)d( z%R*z^k1s_)cKv#-q(I2kBE*maM}7;ruZ-1E%#6a>2|&obl}B2{Ch;=Y`#^F{-yJ{| zrK3R}0oj_2o;W?=XFTQMkoBR%!`VH_(nMAQQkbpP@Gbo_`hhY87rrv>Y>OO?Yyf(( z4mi$MJenFix{R^EZ#~0i4cqKfI}S-}h5*HH^}p26k(=P=SX9{Hv;ka%4@j;h$+1OD zjc0w-S8w-jZ;F;$n@7XP0=u~GlI<)WM->TwM#KCN)fmN_4mnOE{zZi{k-QPQcNb2T zlF5)8P~z57x4(J8SkdRj0t?qN%fl4j>aQ$B%c(!4rl&bYO)OL4d66tUpZ-e+9;Du! zU}rlLg7<+nZA5I$78KMg+$xX2Tn|+ww2)OQno|sa&Hj~@Z>u{ubM)fXoHnB zg=dwVrqf?+zcpQMAt0~UZE}+P>KZ3r&@$}oq;BG_iGE5EYxN+lVyhc8vugSHKYW^jnwy z79W-zRBoK}dc6BO8y{sb^?tA*#cUWB`rBr0O!$57bZRYBmP;{&DPJ6v2C~DMRzIHK zaYzJsyp{lwHb$&_kSzB7m) z_cA*}B?8lfHL&=bpeoP+fop7hJ z7|}xc_`un|?G)|eiqqsebvw@CPu(IJ;{xie1C^fuSEOl!Z3_d{C^E4l)vUDx8HA(> zcBeQu-^nY^gy!%$Gt&hV6}2A^ZhoOmlk8?+R8>3@YX__}bl{{qXFw9FHwH1O#J;RVoxyN|kb<3Uy*(j2Xj|Ef@}w5K$X^@bB+Fwf z%$EpxNze9}cWW|}atn?PB(#%4ot;3I0Jb?G67J`Zo7vF7X-uSK+%YvD{qZSS=VC%L z`>mG*NHp0jRv<^1g2bWFg4~Ao{pk-p2~$=S%RY8M(v@~n>bjNH=$HV1LU`wEvheM0 z*>8R;Pp}(pmy_H_8^S6~IL-^23TwsEGZ;3W)T%}m`7zAAfU^<2ChwHezYy!?v%ab&7A~O5f|w5BbI?j^Aw4M99r%wrBfs z5DF*cYHIsEb!CgB1&zBS&B8Y4UIhBY2OsC&qgA5wNr3oifx{&%6252z0NPYmr1R*Q z!}L1Q4+OKOzTyg3{1a5Ma$iYFb6d8N%DrJ+swejzO7DFJc_JEt0Vt{3?V*b|)%#Yn z`(tqwW4|5oUSyg|o65?!n^lKdJCo(D_$Zj(rFBkX!==j;gf1JnDPR+c=)$&DIm{@X z=)c|nBY9#;rg%D7jyKZxq=us*nWg%H7DC|;PWeTy#bV<8S~=!bhbzyu$YQ1hZd_Hy zLK@U`XIYS922M~R+JF;JG3c2Go&s25>BV$1BH4_tE6j97J~7E7=f_eP->2HpcM_~o zFNLTm1XAOB$T~XM_pL$Ud=eju6$qCWW6r61zqI9ed8_TEglS8k4yPdL9EGfMy^>GF zW;a1a@m~5Kizo2Jc%ty4!3%JbWe9ei>f3&yz5b`)#I|X%HL(k*i3fr-wJU zk7zVosB)vTY^Uv_FL|*%T)i)#6a>J8YQo{Th4zAPoUN?MmO=L%+?*e@u_pdB7f;Y( z2xRliz!8}GnaS4o-l6Tm6}&vUg7WmvBNwb~PLu1~>86s2;FD0lPir9h`QxeHhvwB& ze#7_`6>dG2E&q)*?qZrZ==LlJz-+q3*}3KS`9QL5^|EBlRSQXc%n%)s?`H4nv7G7o z*D}vW*An?aRm7jA9kJ3rq|AynMFgq7bqN-IHx(m3u6xm+s>eY2^@^o5I0eE09S4O2 zat~xL01up9@EO?42BlN-&d=zwEE6T77uG*b`|hzhc6aq-T(R8`XSr~?*sa<6@3k{8 z^n@)-DyyP*hS+Q?>9u2{nNR9Wr7}ER$o8~iQx*X3QndE4XlUWar|V}$GWMK!eB#7Y zry?m-V3(1fj~&HMaP}qp6+0~tQ|cH|_9LXsA}0>N5kJ@irz_Djl7I=))?F>lE3n?@ z6yiHQz>$H>Km%yYI{(fLj26?O%ECIc1uwoS+$;n8SCHcNfvl_OOlSVGKDD*NCYw!W z2E&ad4Man$LDWrzaZ6OzV&dgp4>L0Sn~M~8#UY{Qj6FO74W@JO>Gz7DO83_*gPfzD z<%^#P z4s6zW5+={VP_u+Kkl^g08f4kJewam6C(2(2w;Bs4eCWfruQB?%6{)j^myc?T@GDrF z-Tm3GM1Ne4XE=jC2_KCOD4p?V)vN6l9l`x4ZiR&rROc{)jOW-iD2Tpqp8`}+l zJ@0aZZ36*>Kh{I{ki8B|9gwg7IeK|CbUwI^X>tuKLr{I+w79yg@b)SpNGwk0Iit=9 z8Sg259RZ5|N`vFGsKDg5f|-^zkE2v23RCANi+!vN#kTldkuiAOrY`kSB9Nwp zeFUo7*yirPNRj_82+%kG*3=zG{jI6{c4dv>If>4I8*zU-sm96VhEa8xiUis^4037t zR+y49y1f~^m!j1X5=HFK_v(R?{qwo`{Ce(u^dR(F#(*^)dKMT!zPxV`QRu0Jyk2&C zNTR6oTuPZ@93Pd6w;-9jgn&;B+!km7b zy^Pav)QQy8w*G3ODsnXt0)Z$1g4#RTKr5XJKIWQ<3i3%!K3EHL7oHz?!av3s->T-g zn47BAhk#Nm$gg@)mNE>hD>G$h_eP>4 zpT|}|&*o>i_cM}YjSj_zg+N1z(l+l#y7+s4WUBJN?mDFd&zB-A`vqG86$&dsQs3m} z&3SN4(?-Uk3JTiM5=7owTw3Yr(sx$sEGvQ$_QkKmJA;~Te9%_T2uWSwW8;NgN~>50 zGro4e3U!~%j<9^JB=-I`*w(>2Aao4i>f=7$O~|k~0(s}Ah32}#Tg~kt-jMTxy?>ip z@sSmMolpk;x6lH|GXekTExKd0fmR|w3mwq{^#hOTG`m)-K1w&Ao2{vHmps*ZIscf) z7eyYoXo*K~U;)3Z?Ozs<7w`?adF>3ikxvb;QHB7#Gx;k@rI^)iq8-6RCVB!;3hCBIPhPKx z`EuMXmZ`lu?Za_OAYZ@TxMB(Hk9KL6;mG&k8^j~uWBB1jx+;qKv-)B@pjnqR)$rN3 zn_`*Ow6fHf64Q6Zc{XAoo}=w*qR3Y=5p6aIE!n_AjA;htdt~)MPAqx_u*N@#!28?b zJuyi)t4S>b1CB<9ABKfbVD_2R@S>ZXPKS8Hp~wlb_~- zen!SF#ij$@vv#Z`Nk=p2FYB=QW=()MV-aS}UZ$djeR?d(EDYym#KHLEjNZc@@;`$S z*=EcdD9@ZzbKT#5w#Tt`V&7vH)^yOozKmPp;NE7mOkmVw=@n*IQNEFV>!m*kKyqwN zTY!_-&VcO?NH-p2LqSJ*R}Ez1Fb;_!Bpr7Brb2@MF2nw!Lh45z3*hLUqPx6PK~udj z;QGU_m;Rde^IH2GD*VKjIF;>oc12i>GP#p{dIYM1o90I8f%Iz8qYlJ+3}-EP{#oQ8 z{zO-|v-9#mtu*}r&@$c`)35?M0YF?TdG07I92v#mVF6P`uxdP=%-X!?Cva|{tjUu$ z*8FSr?eiKVt(w$8anv7BH*hc|U&SlL{OE4I;2~H!Q2zW3?G`_A*ct|@f+{Sf){9kL)2%G zWUA9$zExFDjGU%j=cz?G01c)bB~z*mG;LTJ9HjoT9^`N9k%fbK@ZTygzaV~#{~$mZ zYH!{M9I1?&Dd0A?8hD8AwhyTnlWE)cgBV>IDFY z5HMRu8K2^^CzP0vEzVgH1BsEES4$s=kclRM=mqfxG^Tw2m=)JGmqlfvg^8yI{0wID zJ?o*@%VXAF%P?9bT0}EL3_qj>L)9aB78Knkf$I7tP^~{?CH(>E`X#zhbNC7@;Rrm3lA3Lyk~<~EVg@FTTr zQ+Qc!B*jUw?VB45M;Ef9+dNrTTrQ2^v>GV9X>hZ~s6IB~2R%UXP04oo+svE#-? zMptan5Up`5a`$nxUGY8Yr=cVTD|MnjNTak+$XbS@R;yA5gLY4V6k@3BcGai)LCzah zQGY<5I18OEd@*+4n0KFN;#sz@OgoBuIe?~TwO@-m$@GQxi$`O7XH+^)Xdd6I?lw@j zx{!NBl9`Egg6}~Q-lSRsVjN%=293y=hDDon5)tHCEtn@*gBjFwPghQ}#gVn)0p14M z51T?+YMqfk*eG}dRiVQRkuW-zapEy>d1aZ%1{uW-T*`-YCA2uFABbz!Dk+zpGgB#QGgAF7Y+hMTz&gbv^m6S+Qv6;+bSUcimT7wAP7&`08z0 zL!{6aer+3wHx&i&BS^by1i)AI!k{AR;*Lfj?}IU3i7Qr0$`5V0>-`Iw;L~mhppSC_ z(9M6fBI^0L{^H(b{>{DV2)H*h7%T~{xRBO30!FuNNG#=6d409+?dQEGi}5G8wXc8S z2hzoSk#itBEoW$9Jaa-PJsRp z=;az77Q*N5^}<(%3=Zi7WswAP9*`GjMk5clSbxd2AHe-|Fht$riNz7(%)u_zvO*Y{ zrBe2-90(+&nsLknqv*Gg45FLs?|qmbM7LN&DpXrc#KgINMJJD}MPsdNRsGnfZ6Cf# zN`#4c(~GAd`5@YfNf9TYJVzy|F$5-*OXa?7?Td=vu`|x)lb8> zIqEZsj=1Rb52_yi`IXr&k&2@Z*O|5J+l*A9J@{?u`GI^=X#ik>T4TP!1fW&oe{0(V z?8*Z~$}M?;XP% z2vCeA@B7Xe5aYYS=0AK9Sha?nImE0&B_ZWs=Kr}DvkHrizX+?Ezg=s9LiDC(DK5P&X{4xiFfDJy@6_dA>wRujLY03%ZJ}hz=pLbYwmL`4IY0++ zEk;epIN6kb4vCnGIuG=vYEj|d*NN=P(z>#ty`kDqKM8Vmp8XEgKNLy{cJ+RwfJ+F% z0?*mkc2)7e?Cz~6X#b|dS0a#0cTs7A6n%@+X^ z8El3zLls!*2BMz^2}|YM8R-7~ z2tA**A-PS*Tcv9H$Io6m&u*<(tVGWY8>;3Iq*y;ZMQ}`(j?S>zBw<{+4kX{!zT&dO z6%0mArSCBT>OtrzaNdfdSe0Xl_V$hN8cgWL!t%Fti9=?u1A2Dv&^Eg8IR;a6%O8-W zSLe|VhXu$vw?5=544kJq0%bu4X*Qt*ygUMVumgU^WLa>a?5B;X(<_>^pWuCM`KrpP zy7)w>XY<)tjgXVxVqXIXzBg6vSQJ7R-u0Fx|`K|(jqtYzOW%B4K&)S^Qr zo-OkA=h|yuKa8e4nq_aKn2aBN`YZYZ*|tBUuT(l0a<0G$kZcXXfulF~KrwI%Kf63a z|7Sh7g0q|j7XrR+iSc-Mf+`OekK4{{w$G&{OR)ps!C&5d00ysqM11P20b%L#yNY9; zV8UAF7LBUKe{+oB(|UMu!BfQ`?^szhp`F_)9|b?mK+eTFrQ-@v|9jpu0C?+ z-CuTQs?0?goE4@sJ-$PyvV8kPqYl{go|$h2v+^EnZKE!8JDMR0gcpa?#6^mY$R@XzubNMm{{pyeCrO?e85KMT$5NuNnaB=Sn zkb*2HoLb;c4128Q@cn~$P}<61Nd3!Uk3I|g9&sO{L6ZEN`>?5K<5|Nd9KfJHCj!1u zdqAQBu2eDoKbBIgZUzj%HuXUL0!3)pW<@31L;eq?6yxtyphO+tFlUK@udE$$8~*As z3NG&(MVIE7FO9tYXkj{*)4}J~#Di|(QRNExGG7;M@MH4x`%;df_Zewaglhaxrz%2o zzU3mQQtlM2NldS;$4OmDP~HrSz4g+UOT1}WKH6#4&U)42`2zv|_&7a?zv?uoF_W13 zL35|W^`a+BP}O1b&zJ3IxMd(&V9>Z{TaIOnF!`B7E3(|HWGIu8w%j8U8ro1-G`iHY z%YrKoDOlz>U*w#(2UWbJXye-C!bDmVO)vOm@4 ze4Q;gLj@F2u~RaxSz67C1y8W!cs^asKiAs!M!Nl*lvgVYzh5evjf-iLi>dpD&OH3S z#ch(Wzm;zC&$*A~j5@S>!F+b2wcL#W1ZgLbFFlve;;Fl4)xJNFoY4Qh$D+%be*HOz zf>0hs;3Wl5IRWqld<}?_RiO=wa{0(fH;_Xu?Zd(z`!lFub$$9=ImF?y2UlGD3#H82IT#)6Klgo7gG7cbih=MU1J^lS9Mxbtwz<(h-$aM?}S#_H4 zDtLpsc)z+PVLsXT;-+@%o7X%1X*n+x@>h%B)s(-PN;}QK75=ERIg=_-c?8E0D&>!C z{+2oUL-V0p=yMjYJq1nX9OWedzZ-ju0-fV9Hvr3|lC~m(s$~l%523l~uk|@uX=ipX zN#^z?J>k1_-+RLZD1thqOI3L8O-%+{a$?&)o&Ip{)4+p}*S8n5&dS(91UtCPdN$=C z7024=&`=C zh7c9i6WNtEA6mah`c5P8`)@7x;VhF&!jAy5CGK0XDMjaA#FxxxnawBNFp&!s5S}a? z#WXC8XafxQiO7iO=mitoN+EQBcyLot2e;%>)1vq{VC#ez4Pyw2pDZfHME` zW$z?0(fXh}Meh_vu3o=0#8$C?9>rS2F@k{(HtYq@+pH_hjBg%H)qOjh zSLz@>Q;nFe=nhn78`yDrG3?^{n6LoQLJum3`672msAT~uS4c)<<4Mxjn(vwUgxmO` z{htdV25+n@>D_JAK8>bTZ$mSt+$@__cqCz~OVi~E>=jD-8qjk(k>pCU=9paF>3SY3 z`nP1|1o+3u0OKFQ(`pNfz6+^4-E!U1r69t#!SHU_UTfpqgSUKKVq%D1D0>*BPAuE8 z$Ff^#{@u&UX;+VuwwA|ZsZXGt1fJrRxa?BDnY1F(}Rqlc>Vo1Bm z<6Yh&v5wyOywy8uAJ0%r_MCb@Mk%Le-8xYCfEOve>Rt_3F zX58ZK&17lb;pQp%O{&mhF7}0F>9URzh@uyANZk|6RSjVd6~YHCnzVOgUy8h5^~#go z=hzv&bGD)?Vq!!pjP2D~Esjtl!%GnEP(QwV-X6K*8s0XFiMuCD5I>_LKBgOL_}5Nz zcI+^mSlq?9&YN4@X_7(($|z{QPo;;R54?Qr-^=~rYaZPqA_QBR{b6i;bj;FWNx}6% z3Eh2Ba?)k$(eVXtsA^E_tq}ZU?0^SfNB;6np%RfuLn@x9q20=Zs{k8&1SiS8>pde`=}tE$Clj_7Hr! zJbZ7t;dL&5I8m5tpM${QT|zAA16{hP+xYewy{M?7Q7xxDk>?%^MlVJ5S6=#t0Y-v> z#{#@ClcNHkaU!B?3KdG!Kpv6IJPD8&7}MC2Cjbsf}Fray`c%8Qz94G|_2X>X2HK#3QlU_1hBN z`Bs)7Lp{z--f?6rM5pbI{p~?nfcC8;apeUKL)58A&l2xM|#9jwqgs z(f7Un2rEXzpE687JU46Ks7PN<2FlyCE7Pv~^vhPeW8j48)j&t$TvUc0Tp6)Es7;AU zD4rKG7l7VvgwSsO3Xd>OOhrJKd<8K;sG;vzSITjbk-5ev*U#2DviWd{LqYZO(Iv8F z9wow2I>O;ZakJ*hs!POL!kSl%0yhmg1Fu^eNK=YkN$d>(zYk1u^H+A=s{y%(kojC= z7qYGWxQK^47A0{tfY?1gyeqKHMGHCRELECIyR4iO_h6JJVVOC0&-iolM1gOiqc=xb zzWc6m(nU)h^Ud~svLhZLdT=~IDZ}>`8+Lm>Bo8IrFIq~He^B6(blLmDNA1){S~jby zO_c7`l#fwiq6=}ypEAq^s%^!$%ai#7L#XR$jQdb*X+t^NdBkE!eI866>S|0Q6b3hJ?|C8A!^8byW zF$CI68QTBPB+uk^zW9tjS7`?7I}@fYwh^DF->2B~e@JL^TU1CJT$N_-)$*Wel@N)o z9_ei6jd^5GG36+{x)i{M5VG@*Z>|(sZ+-nrF}`C%2aIo^8Yj0!he5kV0E~Uj3JI@H z5{v?$!LT2QbO1Cl z9&@;fYztJDD6X2m82_YzsNOSE8slp=-s*Wq?sGdQm9>Ej74#EDy}#-|-`2@a1>5#) zdTQm}lGl(bY4_D@0WfqK@n4)i}q&5`xW%^=HASWSWeQU-94yugl!w8f;)S zxA#KBYT{e}C{+RlZJD1@Z!*6ffFnQtRllS@(lL%@?@rjmK;K!uE!0^hr}Zdgvnf1$I-1+VAPg^?;x1C~ zHkYl?3Os-4(ev**dj2XrW(NWqg4H-m(#`OzO>8ewop)II?FPtYcToa_=QK^T-=5e- z&B8}m>7JQ$v9kv?t*TP`L>vfT>3zIL)o8MG`Oy!v3nF~scWrm1oBUJ<15T^YP`zkF zTF2kiPLiui)H0G2u73BDUV-P4>LNg;rCVtc`O0eGAX5%Md%B1=(xPGYCO#Dv9#3}052dzVhy2#-2&0l*nuZeACIMotrg=!bn~6$mSigT|C{Rj~fwOt) z+ldQV_H!`?Rhn9C_s^cAWLz-|jt45S#^2K`o}PMZr(P}I5XX!7ni60!S{AuKTj9@Y z-rgb{Lu0yljOc_%uHs`=nD}&()+%nD7Fd+`oC#DGL|GjsPJ7|jwllffl06D8-7II~ zM^J9FH?xfzE}<=y?w@LFROw{Hk)#OU>RAYgv(E3L9>E z@uqfOFx3p>&WwXaB=>7a2j7nB`561juY#TD2W0?D69&_8YJ5Z7W}Dg{CR3> zxBV)3Bo^nskB#L!yy-d$C^TPQ_8w<>qjf|8T{FDbF3(Kl#Vz+j2)#rVu? zr3zZi{m@6pZ;dgrD^a;w$xj(ia8$mzUofm|lW(Tw;VjtP&ih6B8>mJVVr33#Zd>hk zfV~H@hruI*b3MGF>%}%E0}YzvrDa9&BRJbjSH5^}eFQv_}-> z>T%GA`?oBGaA85yYEm3sPjht7afrq%*)5@4*OA zFvr%ml8apk-;QB&D9Brs>F{;Dle2S7``&;=$uWZfm04&eGg+x>#$$jo-Uz?0BNL>k zN-JLeu_M>~?AZ$s9*AFpkae6#pWU!R8uNKzF-=w85(aM5qpXRyMb=6p7Iz+?UmQ;7G^nv$6%+Il;9(p@5>FLhpVq~71O6X|hfG#1ZP zP7+`l{Bw-ZnvmYI4-YNWm~Y6_x=(S*NQ6cbreQ=`H6_cHJdd3AIzpUef`c9ZIvfaM z`W#~knYLnGw7giEjAU@UfOsjZqjFnQO;gh-P*!nj77B%i7}XiiPSPUuXE>239ZP?7 zOC)KxxbiuFm-uBBZlF3VMb_VIi3G9+sOgPET0RhMj$Yi0<9zA8qNS2$dA&Z15p%lU z4))naEkmtGkF?YzRXCW9nF@)<*xOP0-MNnP%Ua$KN6*~~HX*Na`@|d>{nRexG{j6yi(ujE$tQ{ZA2!`O1R?wJK>+2xFH|PPK8;FpB7OPs2R>Szh=GL=7-y8(GQ(aCLk#ehMdBzn2|+LY{DZ*pHqR-FGONfp>h z<4jU_sjgV7|Cx81_rtHP8|j?*)A;|`D6Yb>rAM~b3^UZyc(JfL zo%e(keU8LXzof*-uPFm8XPy~$HnJ$~ff-~qp~|YM7}^y~gY)3FnQm=S=CzLkU%^~7 zz%-t*(jiKfgKB@6_s|(Lhsd&#iPu@f-r5|JRrc+2Yta?s$j?Ps_#urK?3W*|oQ;q@ zt9?%+T8hl*)?G3%R7r8dd-4y6FKAbFeqx>^ugc1m8Xc}FLDKs)2v>aLd^I@qSXePMEZzl^Hws4z_1uSkm;)nKFdeV1YdAxsS@rc_Icq{)h~Q2&ZBA{nxgxJXT*BzdGd9l&$UEdSk=OH2gM~ z0?m-^IXMbH+H(`Yo@)+p2`m#tnR=|ze7ct+)liz6w&Hf!`KW}_ru&aLp`~rIvWxZ< zWf?v4*Hsv=!QBK7a`%jzZZ?ZBDH;PYm$p_5uDkhAN=7Sx<2XXuErJ3pT#OHQvVZcX z@u>Uf5eMZ*RH}m6-t^B>fztqVmP zCa~ha1bk2S>EymDX)-1hl6G7qgKQmvvbj}NU2&p-tFkW#!Y0(IHK!>^F;!?(B!Aq+ z^3lduznD!rjAx`LH2H`LRNUKQAIW7}Eq zGS3wCvI#M|F@}BXGs&z|(``m2$8WAns&F#jQuedj0^pesiiC!RcgV?GWS0dS^Xe8G z7CwM)p7bKSZ-GN*9=Z96OG0AuVtdQ!yMsKBf7c};N#v0yUVMK+^2t5i(R#~D+%Y&< z4m{VG#lrgMH!*+y2SisxV?pB&z-vv~&-(KpRCr7d43Ql&F{FkW;K!C|6F|fLUeWhn zUNdXxcDrr}Ey;P{gU8P=p9}+|B_uou#!q`7PM}2mLjj-*JxJ(&Q0PXb()q&<1OU5A zF%mO!ge_d#20gSRHX@d3SVcacK6a6t79>ObC~p+kVQx8{)@|&1ihu5CId%ypJmx(2 zp~NbMC&ZxxaF2;&{IRU0t8LHLe+?al9f`WPg0V2mgagGz`KXN+Ur{wq@6*ZlPcELj z-)PbMF>gQ4$A=uu=nT$gi&A8RBYyJ)AWrQLmmF9&hVP3PtaA|BXV5`;ixbL2r}oD& zYce-1)Ee*GMvcGGhDS4GQhH_3E3oWnt+s@+y63&Rh3q~}jPiHD(x?vj z!KbL#D(va%`p1+y+uG-;o7`fDaeaYGrz8Km*R5x3=E#|hUg)8G0}Ly_uUrSkXZLv0 zJeR6>q~Kh+G+tox#5Q?MhV~09XZR|^^)<`d-&);A1thkkPR38zh)I>7{wup62cVU) z)kD&@wbg!SpUlUj(on@cxs+z_5V7toz z$V4$^Q+iu$wG32$M;u2gzSU~j`TlIBkfBlwtvabB;g)Ww)1raW?Dh?C3Mb$~CoDLm|-iVy7aI%sPljM`Q%=&x9 zi#_l`Jr$e`;TX);(fZ=MTm|PY-7>njL+P$W4G1HpQQ64*u6w6EU4a3?_eF*0C(r*< z%#KCn!5@sD25s;*PdaTK`T_waDzI*rxg|9I(&VXjDf1~b_<-3mwr1;NB;4kJKZ z-9f!ASk~>Pc7oy&U{u9vb!z#=L)2VIoSSTt{T~n~`=ymuzxugr@Sl-DqCMkS1;2TV z7EG4pxzSLOe|EANp=7zRr(te84z*)I<)Wc4If+bH*pV~ttNuMEk1b%1g=Z0(vGcC$MYG#C1Q z3I89w?|skY=%DaVUew8d2PhHH*aXP?AU33 z5*tr03qZBOp6v)nsT*K2sw9Sgn{C^{aAb^nHA4{uA>adsSnoH?Oif-~uld#2-LwJ* z1>mVbg5BXTv0O<75pOt@Hl9_Yd%Jwg+~#p1uCjL}ux{Vhrw7`{mkstOa)9FalMh9* z^Z|dIzE4`k?$>40^VbK0g{PxDR0qnrO&6{W6r~9l*!?&=%QqY{#`s0~6u}VI+!>PT~?sufBZ#!9V$MLBLNSL5|mU zHY8exT==yVZ!Fhts;nz5(t9q=(9tG+jJZ&Mgz8tBO~YcsOR<} z(0qP|eAH>z5o<_`Sm^uv&=CBT9Q%`(oh+?t>)m)ORco0Q6Z5#3qjpV!XsaxXs4+=r2Rvu4fHmI;ihVq)R8XNbwW`t zg51paroAV(+FgsHAuBOkzv&mwN(K-cOqwtV1!1pWkV3(x&heLEhFSSY*_$F#EDS&}O%xcOX5ZJ_7 zAav~Y2ju(JeKtcf!-=t``*up>V=tIbafuASebOu=2_(}ELj@D9qh}xLt(HTpz1cR zFe^iR38V2BL2kcUSxAQLhcr%dZ*QC;M9_ij)^!$J)3TN1j1_uw{BtI!reqJyoPG~w zfeJr5v3&W3w)(}K@e@_8@axF>%T~(&PYe~Y8x^3qMB>14e~-Wa9$kRH`LpZNCq}`S zOrKOnJfffCzH7>MfIKBE>G$$C1p@X3&siR2U|%c$LUjYJ9hs*@ja)*OdWccq+$3dl z=5o}yL3L5Y=Ee`eND&F&*^${|0UGVUE`;Y5sHTPy-r|JyvCJo*XE==>L`r#mI8eSe zRr-*No?dnilZI^81@Ei&KApe=7R8!0nEr1FZgCZu1yO{Mn@b>>6&RH}E0f3?zBnj; z!{N55M&y}-#oM$56zzyQgm1YBNmGmSlluNQep0zy{`QmF;Fz(3z7AK~Yrxr|*nkJn z_P7_I$aA*?4lr~hYx;m+}meSe#*(& zf#N5<^k^z)nO}F9@A5pWUKO4R*x9aH1%G>1vk2h8764c(8=_H6>h@A$&j1&CP=*=1wfpVv6O3%^o+a7lS1^6`v zqcV(Ct-r4R8@q~XJzVw13Lzwxa61(AT`AymuNGlDV5PR5Fdiz$+>wk#?_30fwprJl zYF z-_APQE=fA9T#qem6+d_%j$#tx#r| zqbg>ghkT|E**3|u4PF)zv7r62&&ufF!sgHPHF92l%5}rbT(g;q9#dau`f->J}j*;>54?V{XE8gcjBox z>KOr*r3`MOqvCG{SBUzP1D%m`104?2pK?OYpT=&GOQNie?=|5&_WMC1sn+ScXpP3M z4ppuXKS^BFHuPB=mfXX9FE@lvUIuxIGt0Ny=V#wghR_fBN}HcPiWEwf0!`scn5)Vuggk{bm7#*o+) z&PtlTLKFb#v;ctaF&W;q#g)f_MA196xktOp6jwRaIeP8AjpEc4ZDcCXySio)hb4TU z1$gSOm(Ici#+w)M*%itqnBv?!U|*k4AjiMHK*HgE<)CM*7KyMN2q=(-kzp zz#%4707pX#1(z8F?v?0{M!ySG2CbRU3;+0W%s>B6cx74eKYg)({@4HSUM!R#fTJ#f zHWQ!?qoSY!RkObz0l~q-(#eh02dxJcjXa!vMJUU?UIzEB&pJt7VsD3E6i?;CqnsOO zDcFG0=n()-t|2^tJEb*%ZeUwmK=+ButYMt&Idx$w$ZYDV>h6aN&}_f20mq4|#%CTw zro35S(vn=QEP-H1rzvYAier4avd7WK&&N4M`Ti&0@b|-6@u9c*(;6RlA1RHy`k4ME zscJ?m5-(xuZWfKK1n@i;vlSODA8H-CY*8Ej=xZb6ck$F{`K=gmZu|XsH;AcKM_fib zI{>9W2?^j?o}l~KP>1nq^7OCxEn1&?YAQB9Ed&UN%TWy3gFDHb;g&hDQyY-#mg!)v z>>GV^d4070>H<4^n~%{xc=}EaK_um{`8)4=XR5Eb7l?R23Vn%0xZHi@->5uLiKHO< zDvezlb6^~4S!bE|tv0cNO`N$?N+grsX=Nr`uwqo^86$*Dzm0h&b&&V%dveJW1HqB= z;%zsf#bY69MBIkuiGUM+YHQQLD*wFrLojkF&>R!OBU*)vPVp`Nk@LpRM$uc3&wwkt z^U~djyq3Axfhn^1jz+lN?*BxwYA=39pvjfhV3zLchDN!b{&&1xjmd5McY+qr1v^*i zRo5k`n^s=RbrWt_0iUq~9^Himau`;C^cMLUb;2AW*)%kw_e&Q&=Viv{0-1AK#Y%|o zY|q1d`=jLj*9QYP(B$kk#`%BCDav3@n;Bws8G2|$L)WrS+d@waiH5Q-S!#e*=*=01 zLmDt)DR<S_7fDILHE8Ji$+Y~;9zS?+AaWEL13w7iE~Y2-+X+D>7A?7&!DJ|CMnp~m{1u8LQj zm;5p=jov4kln#1=U~2@c1UsZ@2I2Y0JAZmx9YX^Zvn#k%`Gs)je*Lf}^Y;&~B;-EO zyhazP7Rbq;Mu6}Df$|85s_v06qhC9YX{zBx4lX+T$-5s~`6d%zcXiKpb7|<7T1RpR zeG$m}!Uru@*>_0{R95*b2L9zc+t8Rm0A*-N*riiQx{);_SxuJxggs(`Z%Fl7!$l6Xz|;Pz?V z7ccGvd>@KbW9584R;bOs_lfeC9kqfB2>P{3tiNpudPLB5lR$y(vc#s9`W7KVPko!9 zaNy4KjD3At^P-6k#4m%aLlFqs<2!+lqHAOWHSu~OqHoruM>K2TZbNg;^Bd=b64a(P z?o!Ks;hO|N5MdH?ZVQl&{{yn=4jKZ{#+ZSlV-+LTXg9b{2tV?^G^Ohkx}X30=9zOU zKWb&IA#>4^jbG;WS&lfYFfFch9WxtLjy}w+$DO77GNW~v6KIQz$=a=rDI;g)nwBg? z!4NYU;T(>VM+$7bqdyzKJrs%26KQa7CyFS2isBp+Q-b2A7ox8*sN}W8@B74B-R=e0 zMWA^9+lb2TZrAolhB~LyEQF0VTB}ex&nj~5a1+%@%;_!g|8V8q4aby$38~}W%WoW+ zP(+iJZGhv|{<5s|RBdNeXmtcqr^p6kGV*@M_WxBV^nd^Tf3Mqr+rWR@z<=Ao|Cepx z!)45h(oSoh8V9ZILa{yh4vVSosFZb_g<7P12rqQ64{2|L>>h*d?f__;==JNWiZ?GH zY)x2YJ6~$({;RXYqCaYA_G0wK-B43n+jxxBp4b|n`=2jD6)4hcge-rET-cl>kx+zz z5(DNXH2Z0do!gXP22GSX!T1*X^ywV0$ZASjiU>%R#@P~H#-2V`k{=AvI-k;$B>AQvOo8+T;?!#-B9@7c zdSB_@Nl&lJH47h#zty0pd@Qy%-Qy{lzaGT=T||~$p6KAu0qUnT?#z`w@#)wMnt^{W z;Th?v8|t2mtdV@fbvATI+S=i?;E0y25oNpz!@G}dB@I8|BRp49;Gbes?bwT`$Fw7s z-3uiI$*&5%ozJ$jpT1~!=XGSDsx+6+w~&wTy@kHEpSbh{o&d4GZsI&cC&%48&zs3h zdT6_D~3V>?u z3CPg9ygSQ7Nt@+fA6f5nM&F8Axc#6y3}9d1-`y5*ONJyz6dY#W2Q8&X$jM%#hZVIz zde@@wwC3oumwwFy9FT!`VEoG71oXJT6UHIi3MpPvFWQs+YY}dhkM1;>uruc-ozeI0 zIW_{$cqaq}SZ+Aq=%n4NGR)_$3Pzq$IlWl8!u?=kf3evm?h=JU=d?qqlekhv%LY?k zQ0GZyYe42GwFo4PtZPFbR6SZ8r+&A-tf0-|@R8ByrKAc|uOVmkxBTkJ=S&WFRhPw+ zAEYCI`BYyShAHy+oPuXVOuv~R#7k}Gl@z4pUB9=2B6$g!=cu-d=NUuOOMZEYaaMk-i5q<~}C61N&DDmgDj?68B|M)d`vY?7hq~w1=-vsx zqT;=xpY2X%0xlk@0guT_HLN#qP~v9UA+jxs#0%Ebgo~zM&+Q)vr zQqv0vZk{hGbX|WiC}wu8YD;nEQg@@FLh_GV`SxVx&dMA`hWgb1(cX1NHL-TES4a3IrUihs7?2r{L^@fiC8OrJfV4OoWQX{r zt$-Rp?{d@KT4!(Q1^U3el0u&E!s0A$=n;dIcEuJniq@<6-#Hgl$5W?yFoKdNbl58V zvV(m5*6A@SguxlCsBNmGRSu%S@-0{fB)Mx@!wcjCj-!CjXyI~YNKWv!arsnk%fkBB zs2dZy{_E%0)v4%4+|34DOQC>~?hp22tr2v^jo$bf&-g$&-vy+S84#7tF+BhUwx*-s$S{jC zc$&586klK=(hgoUz>G zZqVgEkK~uAH@IV=>SL)v)ZM^}fP{JZwugd9R~$^aw|ydNHTaPW-vC#7W6u%hA$eN!L{F*}SOCSGrt9 z-jcCWPh{?WX7eAe-Mg)sYTV*BCeC!5+q+S;cW1e|jg7;eZNr0V1Dd>~N)eoFP=2%*0HA!c-O@|GC$Pe)GENO(j z{SeXPAb0;UKMdaNQ>VtI^M{V0W*^h2-Q=C6vqs+HX?`gUKf#Ygu7ED6Z&if_ z&X4aV+jR0(=^dlbB^(DYt5PiLk4%QnFh+9~EVwf}%~6Lez#*&Zmmi2fI9(K0(P6O< zV4{Q<`)*7&dY0>!!+q>4?2NA_>kzq_p~EIdW#i^DxRq0n(qpcVsH!C?a;8*!wZooI z@3pwp)x2SJ0!cNg+kU7H304sp&*`dq))nNq9d?e5b->H1D6 zEJ*v=tBIgZpO0zFqi&>=TaQI3U1-pH9 zFSo7Ip2)rc#&cu#VC4*uvRV0jj+qR1x9EP)PG9J45FbHQlB@yX{>4nPVb zW1ED{;v+S1b<*0>M1^vP)95u(zYAr9UT_#fR+=&7Lc<3=z=v*in`bhvE}$s5E%^Hx zB|5Vms(D`zWaQ;{0+BQ`O+NO>|J#lqGH=kiAB8PN@Fbcj8+vY$GpTmx^O}fW*uIZN zyGN>l%2mM=h6UgRbr=xrS}SHE-XWgGJya35l@UA=GP`I|7irxyRHI3US{WrPeKFDP zwyPTo*rq8^XU}%DTP&x=5Y-n4USesUTLs3>mbbPOo9=ukixz-1jEfLakNa^Y=+gwk2IF!vqH&r0 z=+7+c^E@)CSAN>Ybnt>Xck+g!w(3Xs&A2Hx(Q{4#@Wh!_I;-gJf;QT#k6oo~a%6O3 ziHT-7H2NdqOD5j5-n0|}*&QyCgcRDGo{mW!;>tN+#Me!4cC*|g;n&Bc$_6Ich6DL= zBlDhuFk*4>kqGVAi`(zqQb`s!$BkGy7+H>gnaktylJUAJrI1F!Sq00zOImq(ZZ=r& zGZ250O!uv|k(8QxsYj=7Kk?*FdeG?hh|gs`%ly+vkSf-oz-!)5=6 znD0L!EdFoTCf&OCtR`RR12MhPm8~`aMd$JenSIKruL$ME+SKYQWspm#J#yt)ui(ku zl2|C|d2H8B5g8``*SD+qkd1tgSVEHt6vTeXp|6?^uvCEE#tScDQx_NHtxj(vawffh zKlv4^xAf5OV6{I|FnmIE*K?}FYUq0Z=6d;93rzfpYEGwZ5VL66oxkJHFS7;9VI+J_ z-JCbHy~2aDV?KK#gCioB+W_yoW5(u^j&G{udj4t)%}s@geYU1e7%o97xi>0zP4b4% z8UxQb|17pk|Kg1zed5H|_m%vP8KD$AEWPj*$aQ*FbQ~^r1 zx>or%KC30|o72M&(JnuSkK2?yP)N0T#SXv1;Hh%ayRE1gWgF3qR|QyB3f-w?+oArI zE5fFy455OH!zFbrAsQ@m2>|7)zXBjOoWx16kv15Nw6)h|Yh7BphTe+cWhj2?6Ie&+ zqS{_8@W-L#9f0Hr16v4$aPn3&O8)&{JnfoMOgPWGNaO{FdxH2FKaIijc>(8pHBi+X zb7w``d<#EDt8Q|XG!DUKi#5_4o=g?E-?^MZ^;_>1sPk1MgcKup`^KEa4lmy6StC0Og$@ zd+Aec{U;jaT&fD8n9my-;z}su^^57d7?;))Er;@6Y~9#wJ!E+Qp^XceqW`i#EZ&MY z-7f+#rv>m-elDZpm9`^u_UJ%1c?tt;FztT@8v_bkWdQHdbjLDZqdEM0SM~r0@5$q` z+Suh*a?ccFKW(i-JXS`)lSdM{kObP7VP=~(B2t|)y!7c~q2m?}s#_;BO`0+WOt~iV zFF<@D;4J)#0X*s`7hV>UB9J#Xm&=7R$c4|F{jwyg zJrVWErR`5XBV5D0TtsXmZPM!PT-|(>P@S9ji{_tyREhslm21izBpT}h2&M7#b+`9U z6?15sXwM+-N9F+B4nBPnNx0yO(J%Rq<$P@1^W!B@biuWLAM~k-+$UOEcLyCv#dfFr zU1A04#CM7FxPXUOcppT( z(Gcp$ywcJH_6bzW&{`0H0`{L2U3e!2$yh<@{ z_bBg-!#AQLNt#Eb1aMt-PFxt~3F_oSFT1|hmenreeFy)fVmMske5vYrlRnhGjfi^7 zzR#kBTAf$gK7!Y-^U+?N2c5y~rR3OewV{^leax^K^XVBhq_dfygr+E3w|tR}V3tkp zb-$X3BhNPn@5$LA5UKkm8!lOnCYXvg4-me<#XSlr*a%irEo}73V)wJIY)6JkefS0M zs_y&363=l|th1ByEj4Rsbfn!~F!jigKp$7Dy>*-kaddcT+a)lt>3$oSc(@C4etdf0 zAhUXWbtV4oUrVYs4eRI3s=dUso;oyd!A%Z826Zhy$}du{1Fpe|xSidhW|?#Pg$#wCe=GbTIsN&4!zGx0ZT1jpn02g8{8(&d$b_t>)9hOjRiHx}upb5I z%xIK&l{cDhWiY4){&^An2cWzCEcP2cQVLt`(pxVh)u=+xgZ(<+pSG31dS7_>MJrLU zMr3eF@rn8Y$m#^H++PQ(3JJN$Op!vZ&sx8uQmgZfKN5U7F(#n<{vAInYR+Xf5|4jT z*DU?hfjU8?y?(RF3BBv|kcYD{6?_3$-|lLd@5}ZR$6ru~{9dKgk_f|f;V@&l+dLdO zutzSETg;2QH+7K$=}2Bl@>iXvU5J(4c6wk?Ppt>{oT@i%I{>=)ypQ!+DVA7$FT?nW ztllzMTD0O#@bzaN0_jsmoMnm54_bquJ;;)xYuG%u`3wcBP&`!AA)ps+T{x*u9fex( z-i?)ED#a!H1iQMkTr+pjX~z#S$8A>iP>%Ea@mV-!j$-A{1=KZew2*T=sE@VR(l$0< zE#bAgB)D^4Z zhcXft4}sEV5Z4g@3!h7$#J%1eZH_Bwb6qQjwDQ36VZt{fX)PV4v%+tcO({0!h7$Ve zjHOu9J}c*jbg>phKBc@VWJebV3Ll!#ADkHJWa@BKRB1G0Gk7GAe&gxX)EKKSWF#2- zgrjC8H(@fCcv zu68^LnR5i0yI*u5J}4`9FU`eiq(3gAxq;tN0Tmu<5Ft~uRnp@k`P*8_pLNFCMusH0 z3lp;5xtIL+D*iG_Sn>*$5V)`W;++z4Z+9rT@AK!ch{!;BN@G^q1yC4j7o#KS6R3b` zdSo1oiH17`UX>?+AeqZB*e|b!#L1qkSjQL@i4E8{zLL3>dYbb+y76N|#VoYC zr1XyPUFS0c@M4LV5%anor=EhF|4oX5|E^wk|M)&hDgXub-aja+{#P#k_wjE|!tZ(b zJrBPz@EZfaG4LA$zcKI|1OMMKzj4odU0|VN^{$5>Xka>N!Ixf&TG?K literal 0 HcmV?d00001 diff --git a/frontend/src/api/admin/ops.ts b/frontend/src/api/admin/ops.ts index 5b96feda..9f980a12 100644 --- a/frontend/src/api/admin/ops.ts +++ b/frontend/src/api/admin/ops.ts @@ -376,7 +376,6 @@ export interface PlatformAvailability { total_accounts: number available_count: number rate_limit_count: number - scope_rate_limit_count?: Record error_count: number } @@ -387,7 +386,6 @@ export interface GroupAvailability { total_accounts: number available_count: number rate_limit_count: number - scope_rate_limit_count?: Record error_count: number } @@ -402,7 +400,6 @@ export interface AccountAvailability { is_rate_limited: boolean rate_limit_reset_at?: string rate_limit_remaining_sec?: number - scope_rate_limits?: Record is_overloaded: boolean overload_until?: string overload_remaining_sec?: number diff --git a/frontend/src/components/account/AccountStatusIndicator.vue b/frontend/src/components/account/AccountStatusIndicator.vue index 3474da44..a7e0735b 100644 --- a/frontend/src/components/account/AccountStatusIndicator.vue +++ b/frontend/src/components/account/AccountStatusIndicator.vue @@ -76,26 +76,6 @@ - - - @@ -410,6 +534,7 @@ import { useI18n } from 'vue-i18n' import { useAuthStore, useAppStore } from '@/stores' import LocaleSwitcher from '@/components/common/LocaleSwitcher.vue' import Icon from '@/components/icons/Icon.vue' +import WechatServiceButton from '@/components/common/WechatServiceButton.vue' const { t } = useI18n() @@ -419,7 +544,6 @@ const appStore = useAppStore() // Site settings - directly from appStore (already initialized from injected config) const siteName = computed(() => appStore.cachedPublicSettings?.site_name || appStore.siteName || 'Sub2API') const siteLogo = computed(() => appStore.cachedPublicSettings?.site_logo || appStore.siteLogo || '') -const siteSubtitle = computed(() => appStore.cachedPublicSettings?.site_subtitle || 'AI API Gateway Platform') const docUrl = computed(() => appStore.cachedPublicSettings?.doc_url || appStore.docUrl || '') const homeContent = computed(() => appStore.cachedPublicSettings?.home_content || '') @@ -432,9 +556,6 @@ const isHomeContentUrl = computed(() => { // Theme const isDark = ref(document.documentElement.classList.contains('dark')) -// GitHub URL -const githubUrl = 'https://github.com/Wei-Shaw/sub2api' - // Auth state const isAuthenticated = computed(() => authStore.isAuthenticated) const isAdmin = computed(() => authStore.isAdmin) diff --git a/frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue b/frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue index c4e3b5b9..63dd1c9a 100644 --- a/frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue +++ b/frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue @@ -56,7 +56,6 @@ interface SummaryRow { total_accounts: number available_accounts: number rate_limited_accounts: number - scope_rate_limit_count?: Record error_accounts: number // 并发统计 total_concurrency: number @@ -122,7 +121,6 @@ const platformRows = computed((): SummaryRow[] => { total_accounts: totalAccounts, available_accounts: availableAccounts, rate_limited_accounts: safeNumber(avail.rate_limit_count), - scope_rate_limit_count: avail.scope_rate_limit_count, error_accounts: safeNumber(avail.error_count), total_concurrency: totalConcurrency, used_concurrency: usedConcurrency, @@ -162,7 +160,6 @@ const groupRows = computed((): SummaryRow[] => { total_accounts: totalAccounts, available_accounts: availableAccounts, rate_limited_accounts: safeNumber(avail.rate_limit_count), - scope_rate_limit_count: avail.scope_rate_limit_count, error_accounts: safeNumber(avail.error_count), total_concurrency: totalConcurrency, used_concurrency: usedConcurrency, @@ -329,15 +326,6 @@ function formatDuration(seconds: number): string { return `${hours}h` } -function formatScopeName(scope: string): string { - const names: Record = { - claude: 'Claude', - gemini_text: 'Gemini', - gemini_image: 'Image' - } - return names[scope] || scope -} - watch( () => realtimeEnabled.value, async (enabled) => { @@ -505,18 +493,6 @@ watch( {{ t('admin.ops.concurrency.rateLimited', { count: row.rate_limited_accounts }) }} - - - "$output_file" 2>&1 + + echo "[Session $session_id Round $round] 完成" +} + +# 会话1:数学计算器(累加序列) +run_session_1() { + local sys_prompt="你是一个数学计算器,只返回计算结果数字,不要任何解释" + + # Round 1: 1+1=? + send_request 1 1 "$sys_prompt" '[{"role":"user","parts":[{"text":"1+1=?"}]}]' + + # Round 2: 继续 2+2=?(累加历史) + send_request 1 2 "$sys_prompt" '[{"role":"user","parts":[{"text":"1+1=?"}]},{"role":"model","parts":[{"text":"2"}]},{"role":"user","parts":[{"text":"2+2=?"}]}]' + + # Round 3: 继续 3+3=? + send_request 1 3 "$sys_prompt" '[{"role":"user","parts":[{"text":"1+1=?"}]},{"role":"model","parts":[{"text":"2"}]},{"role":"user","parts":[{"text":"2+2=?"}]},{"role":"model","parts":[{"text":"4"}]},{"role":"user","parts":[{"text":"3+3=?"}]}]' + + # Round 4: 批量计算 10+10, 20+20, 30+30 + send_request 1 4 "$sys_prompt" '[{"role":"user","parts":[{"text":"1+1=?"}]},{"role":"model","parts":[{"text":"2"}]},{"role":"user","parts":[{"text":"2+2=?"}]},{"role":"model","parts":[{"text":"4"}]},{"role":"user","parts":[{"text":"3+3=?"}]},{"role":"model","parts":[{"text":"6"}]},{"role":"user","parts":[{"text":"计算: 10+10=? 20+20=? 30+30=?"}]}]' +} + +# 会话2:英文翻译器(不同系统提示词 = 不同会话) +run_session_2() { + local sys_prompt="你是一个英文翻译器,将中文翻译成英文,只返回翻译结果" + + send_request 2 1 "$sys_prompt" '[{"role":"user","parts":[{"text":"你好"}]}]' + send_request 2 2 "$sys_prompt" '[{"role":"user","parts":[{"text":"你好"}]},{"role":"model","parts":[{"text":"Hello"}]},{"role":"user","parts":[{"text":"世界"}]}]' + send_request 2 3 "$sys_prompt" '[{"role":"user","parts":[{"text":"你好"}]},{"role":"model","parts":[{"text":"Hello"}]},{"role":"user","parts":[{"text":"世界"}]},{"role":"model","parts":[{"text":"World"}]},{"role":"user","parts":[{"text":"早上好"}]}]' +} + +# 会话3:日文翻译器 +run_session_3() { + local sys_prompt="你是一个日文翻译器,将中文翻译成日文,只返回翻译结果" + + send_request 3 1 "$sys_prompt" '[{"role":"user","parts":[{"text":"你好"}]}]' + send_request 3 2 "$sys_prompt" '[{"role":"user","parts":[{"text":"你好"}]},{"role":"model","parts":[{"text":"こんにちは"}]},{"role":"user","parts":[{"text":"谢谢"}]}]' + send_request 3 3 "$sys_prompt" '[{"role":"user","parts":[{"text":"你好"}]},{"role":"model","parts":[{"text":"こんにちは"}]},{"role":"user","parts":[{"text":"谢谢"}]},{"role":"model","parts":[{"text":"ありがとう"}]},{"role":"user","parts":[{"text":"再见"}]}]' +} + +# 会话4:乘法计算器(另一个数学会话,但系统提示词不同) +run_session_4() { + local sys_prompt="你是一个乘法专用计算器,只计算乘法,返回数字结果" + + send_request 4 1 "$sys_prompt" '[{"role":"user","parts":[{"text":"2*3=?"}]}]' + send_request 4 2 "$sys_prompt" '[{"role":"user","parts":[{"text":"2*3=?"}]},{"role":"model","parts":[{"text":"6"}]},{"role":"user","parts":[{"text":"4*5=?"}]}]' + send_request 4 3 "$sys_prompt" '[{"role":"user","parts":[{"text":"2*3=?"}]},{"role":"model","parts":[{"text":"6"}]},{"role":"user","parts":[{"text":"4*5=?"}]},{"role":"model","parts":[{"text":"20"}]},{"role":"user","parts":[{"text":"计算: 10*10=? 20*20=?"}]}]' +} + +# 会话5:诗人(完全不同的角色) +run_session_5() { + local sys_prompt="你是一位诗人,用简短的诗句回应每个话题,每次只写一句诗" + + send_request 5 1 "$sys_prompt" '[{"role":"user","parts":[{"text":"春天"}]}]' + send_request 5 2 "$sys_prompt" '[{"role":"user","parts":[{"text":"春天"}]},{"role":"model","parts":[{"text":"春风拂面花满枝"}]},{"role":"user","parts":[{"text":"夏天"}]}]' + send_request 5 3 "$sys_prompt" '[{"role":"user","parts":[{"text":"春天"}]},{"role":"model","parts":[{"text":"春风拂面花满枝"}]},{"role":"user","parts":[{"text":"夏天"}]},{"role":"model","parts":[{"text":"蝉鸣蛙声伴荷香"}]},{"role":"user","parts":[{"text":"秋天"}]}]' +} + +echo "" +echo "开始并发测试 5 个独立会话..." +echo "" + +# 并发运行所有会话 +run_session_1 & +run_session_2 & +run_session_3 & +run_session_4 & +run_session_5 & + +# 等待所有后台任务完成 +wait + +echo "" +echo "==========================================" +echo "所有请求完成,结果保存在: $RESULT_DIR" +echo "==========================================" + +# 显示结果摘要 +echo "" +echo "响应摘要:" +for f in "$RESULT_DIR"/*.json; do + filename=$(basename "$f") + response=$(cat "$f" | head -c 200) + echo "[$filename]: ${response}..." +done + +echo "" +echo "请检查服务器日志确认账号分配情况" From d46059a735c9ef9606a7c0cc103f609b5686805e Mon Sep 17 00:00:00 2001 From: erio Date: Mon, 9 Feb 2026 13:13:14 +0800 Subject: [PATCH 003/175] chore: bump version to 0.1.76.1 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index ad069db6..26c4b8a7 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.76 +0.1.76.1 From cb233bfa661f925fa753cba2ec35bbabacc66870 Mon Sep 17 00:00:00 2001 From: erio Date: Mon, 9 Feb 2026 13:20:14 +0800 Subject: [PATCH 004/175] fix: resolve merge conflict marker in OpsConcurrencyCard.vue --- .../src/views/admin/ops/components/OpsConcurrencyCard.vue | 4 ---- 1 file changed, 4 deletions(-) diff --git a/frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue b/frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue index 87f6a4d8..ca640ade 100644 --- a/frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue +++ b/frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue @@ -162,10 +162,6 @@ const groupRows = computed((): SummaryRow[] => { total_accounts: totalAccounts, available_accounts: availableAccounts, rate_limited_accounts: safeNumber(avail.rate_limit_count), -<<<<<<< HEAD -======= - ->>>>>>> v0.1.76 error_accounts: safeNumber(avail.error_count), total_concurrency: totalConcurrency, used_concurrency: usedConcurrency, From 64f60d15b05e4f4650ed579792480c9d93de21e0 Mon Sep 17 00:00:00 2001 From: erio Date: Mon, 9 Feb 2026 14:11:41 +0800 Subject: [PATCH 005/175] fix: pass platform prop to GroupBadge in GroupSelector for consistent colors chore: bump version to 0.1.76.2 --- backend/cmd/server/VERSION | 2 +- frontend/src/components/common/GroupSelector.vue | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index 26c4b8a7..71908b4f 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.76.1 +0.1.76.2 diff --git a/frontend/src/components/common/GroupSelector.vue b/frontend/src/components/common/GroupSelector.vue index d5f950f2..582b6f0b 100644 --- a/frontend/src/components/common/GroupSelector.vue +++ b/frontend/src/components/common/GroupSelector.vue @@ -22,6 +22,7 @@ /> Date: Mon, 9 Feb 2026 18:10:39 +0800 Subject: [PATCH 006/175] feat: ErrorPolicySkipped returns 500 instead of upstream status code When custom error codes are enabled and the upstream error code is NOT in the configured list, return HTTP 500 to the client instead of transparently forwarding the original status code. This matches the frontend description: "other errors will return 500". Also adds integration test TestCustomErrorCode599 verifying that 429, 500, 503, 401, 403 all return 500 without triggering SetRateLimited or SetError. --- backend/cmd/server/VERSION | 2 +- .../service/antigravity_gateway_service.go | 17 +-- .../service/error_policy_integration_test.go | 108 +++++++++++++++++- backend/internal/service/error_policy_test.go | 8 +- .../service/gemini_messages_compat_service.go | 4 +- 5 files changed, 126 insertions(+), 13 deletions(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index ad069db6..508699ff 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.76 +0.1.76.4 diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 014b3c86..c295627e 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -371,12 +371,12 @@ urlFallbackLoop: _ = resp.Body.Close() // ★ 统一入口:自定义错误码 + 临时不可调度 - if handled, policyErr := s.applyErrorPolicy(p, resp.StatusCode, resp.Header, respBody); handled { + if handled, outStatus, policyErr := s.applyErrorPolicy(p, resp.StatusCode, resp.Header, respBody); handled { if policyErr != nil { return nil, policyErr } resp = &http.Response{ - StatusCode: resp.StatusCode, + StatusCode: outStatus, Header: resp.Header.Clone(), Body: io.NopCloser(bytes.NewReader(respBody)), } @@ -610,21 +610,22 @@ func (s *AntigravityGatewayService) checkErrorPolicy(ctx context.Context, accoun return s.rateLimitService.CheckErrorPolicy(ctx, account, statusCode, body) } -// applyErrorPolicy 应用错误策略结果,返回是否应终止当前循环 -func (s *AntigravityGatewayService) applyErrorPolicy(p antigravityRetryLoopParams, statusCode int, headers http.Header, respBody []byte) (handled bool, retErr error) { +// applyErrorPolicy 应用错误策略结果,返回是否应终止当前循环及应返回的状态码。 +// ErrorPolicySkipped 时 outStatus 为 500(前端约定:未命中的错误返回 500)。 +func (s *AntigravityGatewayService) applyErrorPolicy(p antigravityRetryLoopParams, statusCode int, headers http.Header, respBody []byte) (handled bool, outStatus int, retErr error) { switch s.checkErrorPolicy(p.ctx, p.account, statusCode, respBody) { case ErrorPolicySkipped: - return true, nil + return true, http.StatusInternalServerError, nil case ErrorPolicyMatched: _ = p.handleError(p.ctx, p.prefix, p.account, statusCode, headers, respBody, p.requestedModel, p.groupID, p.sessionHash, p.isStickySession) - return true, nil + return true, statusCode, nil case ErrorPolicyTempUnscheduled: slog.Info("temp_unschedulable_matched", "prefix", p.prefix, "status_code", statusCode, "account_id", p.account.ID) - return true, &AntigravityAccountSwitchError{OriginalAccountID: p.account.ID, IsStickySession: p.isStickySession} + return true, statusCode, &AntigravityAccountSwitchError{OriginalAccountID: p.account.ID, IsStickySession: p.isStickySession} } - return false, nil + return false, statusCode, nil } // mapAntigravityModel 获取映射后的模型名 diff --git a/backend/internal/service/error_policy_integration_test.go b/backend/internal/service/error_policy_integration_test.go index 9f8ad938..a8b42a2c 100644 --- a/backend/internal/service/error_policy_integration_test.go +++ b/backend/internal/service/error_policy_integration_test.go @@ -116,7 +116,7 @@ func TestRetryLoop_ErrorPolicy_CustomErrorCodes(t *testing.T) { customCodes: []any{float64(500)}, expectHandleError: 0, expectUpstream: 1, - expectStatusCode: 429, + expectStatusCode: 500, }, { name: "500_in_custom_codes_matched", @@ -364,3 +364,109 @@ func TestRetryLoop_ErrorPolicy_NoPolicy_OriginalBehavior(t *testing.T) { require.Equal(t, antigravityMaxRetries, upstream.calls, "should exhaust all retries") require.Equal(t, 1, handleErrorCount, "handleError should be called once after retries exhausted") } + +// --------------------------------------------------------------------------- +// epTrackingRepo — records SetRateLimited / SetError calls for verification. +// --------------------------------------------------------------------------- + +type epTrackingRepo struct { + mockAccountRepoForGemini + rateLimitedCalls int + rateLimitedID int64 + setErrCalls int + setErrID int64 + tempCalls int +} + +func (r *epTrackingRepo) SetRateLimited(_ context.Context, id int64, _ time.Time) error { + r.rateLimitedCalls++ + r.rateLimitedID = id + return nil +} + +func (r *epTrackingRepo) SetError(_ context.Context, id int64, _ string) error { + r.setErrCalls++ + r.setErrID = id + return nil +} + +func (r *epTrackingRepo) SetTempUnschedulable(_ context.Context, _ int64, _ time.Time, _ string) error { + r.tempCalls++ + return nil +} + +// --------------------------------------------------------------------------- +// TestCustomErrorCode599_SkippedErrors_Return500_NoRateLimit +// +// 核心场景:自定义错误码设为 [599](一个不会真正出现的错误码), +// 当上游返回 429/500/503/401 时: +// - 返回给客户端的状态码必须是 500(而不是透传原始状态码) +// - 不调用 SetRateLimited(不进入限流状态) +// - 不调用 SetError(不停止调度) +// - 不调用 handleError +// --------------------------------------------------------------------------- + +func TestCustomErrorCode599_SkippedErrors_Return500_NoRateLimit(t *testing.T) { + errorCodes := []int{429, 500, 503, 401, 403} + + for _, upstreamStatus := range errorCodes { + t.Run(http.StatusText(upstreamStatus), func(t *testing.T) { + saveAndSetBaseURLs(t) + + upstream := &epFixedUpstream{ + statusCode: upstreamStatus, + body: `{"error":"some upstream error"}`, + } + repo := &epTrackingRepo{} + rlSvc := NewRateLimitService(repo, nil, &config.Config{}, nil, nil) + svc := &AntigravityGatewayService{rateLimitService: rlSvc} + + account := &Account{ + ID: 500, + Type: AccountTypeAPIKey, + Platform: PlatformAntigravity, + Schedulable: true, + Status: StatusActive, + Concurrency: 1, + Credentials: map[string]any{ + "custom_error_codes_enabled": true, + "custom_error_codes": []any{float64(599)}, + }, + } + + var handleErrorCount int + p := newRetryParams(account, upstream, func(_ context.Context, _ string, _ *Account, _ int, _ http.Header, _ []byte, _ string, _ int64, _ string, _ bool) *handleModelRateLimitResult { + handleErrorCount++ + return nil + }) + + result, err := svc.antigravityRetryLoop(p) + + // 不应返回 error(Skipped 不触发账号切换) + require.NoError(t, err, "should not return error") + require.NotNil(t, result, "result should not be nil") + require.NotNil(t, result.resp, "response should not be nil") + defer func() { _ = result.resp.Body.Close() }() + + // 状态码必须是 500(不透传原始状态码) + require.Equal(t, http.StatusInternalServerError, result.resp.StatusCode, + "skipped error should return 500, not %d", upstreamStatus) + + // 不调用 handleError + require.Equal(t, 0, handleErrorCount, + "handleError should NOT be called for skipped errors") + + // 不标记限流 + require.Equal(t, 0, repo.rateLimitedCalls, + "SetRateLimited should NOT be called for skipped errors") + + // 不停止调度 + require.Equal(t, 0, repo.setErrCalls, + "SetError should NOT be called for skipped errors") + + // 只调用一次上游(不重试) + require.Equal(t, 1, upstream.calls, + "should call upstream exactly once (no retry)") + }) + } +} diff --git a/backend/internal/service/error_policy_test.go b/backend/internal/service/error_policy_test.go index a8b69c22..9d7d025e 100644 --- a/backend/internal/service/error_policy_test.go +++ b/backend/internal/service/error_policy_test.go @@ -158,6 +158,7 @@ func TestApplyErrorPolicy(t *testing.T) { statusCode int body []byte expectedHandled bool + expectedStatus int // expected outStatus expectedSwitchErr bool // expect *AntigravityAccountSwitchError handleErrorCalls int }{ @@ -171,6 +172,7 @@ func TestApplyErrorPolicy(t *testing.T) { statusCode: 500, body: []byte(`"error"`), expectedHandled: false, + expectedStatus: 500, // passthrough handleErrorCalls: 0, }, { @@ -187,6 +189,7 @@ func TestApplyErrorPolicy(t *testing.T) { statusCode: 500, // not in custom codes body: []byte(`"error"`), expectedHandled: true, + expectedStatus: http.StatusInternalServerError, // skipped → 500 handleErrorCalls: 0, }, { @@ -203,6 +206,7 @@ func TestApplyErrorPolicy(t *testing.T) { statusCode: 500, body: []byte(`"error"`), expectedHandled: true, + expectedStatus: 500, // matched → original status handleErrorCalls: 1, }, { @@ -225,6 +229,7 @@ func TestApplyErrorPolicy(t *testing.T) { statusCode: 503, body: []byte(`overloaded`), expectedHandled: true, + expectedStatus: 503, // temp_unscheduled → original status expectedSwitchErr: true, handleErrorCalls: 0, }, @@ -250,9 +255,10 @@ func TestApplyErrorPolicy(t *testing.T) { isStickySession: true, } - handled, retErr := svc.applyErrorPolicy(p, tt.statusCode, http.Header{}, tt.body) + handled, outStatus, retErr := svc.applyErrorPolicy(p, tt.statusCode, http.Header{}, tt.body) require.Equal(t, tt.expectedHandled, handled, "handled mismatch") + require.Equal(t, tt.expectedStatus, outStatus, "outStatus mismatch") require.Equal(t, tt.handleErrorCalls, handleErrorCount, "handleError call count mismatch") if tt.expectedSwitchErr { diff --git a/backend/internal/service/gemini_messages_compat_service.go b/backend/internal/service/gemini_messages_compat_service.go index d77f6f92..9197021f 100644 --- a/backend/internal/service/gemini_messages_compat_service.go +++ b/backend/internal/service/gemini_messages_compat_service.go @@ -839,7 +839,7 @@ func (s *GeminiMessagesCompatService) Forward(ctx context.Context, c *gin.Contex if upstreamReqID == "" { upstreamReqID = resp.Header.Get("x-goog-request-id") } - return nil, s.writeGeminiMappedError(c, account, resp.StatusCode, upstreamReqID, respBody) + return nil, s.writeGeminiMappedError(c, account, http.StatusInternalServerError, upstreamReqID, respBody) case ErrorPolicyMatched, ErrorPolicyTempUnscheduled: s.handleGeminiUpstreamError(ctx, account, resp.StatusCode, resp.Header, respBody) upstreamReqID := resp.Header.Get(requestIDHeader) @@ -1283,7 +1283,7 @@ func (s *GeminiMessagesCompatService) ForwardNative(ctx context.Context, c *gin. if contentType == "" { contentType = "application/json" } - c.Data(resp.StatusCode, contentType, respBody) + c.Data(http.StatusInternalServerError, contentType, respBody) return nil, fmt.Errorf("gemini upstream error: %d (skipped by error policy)", resp.StatusCode) case ErrorPolicyMatched, ErrorPolicyTempUnscheduled: s.handleGeminiUpstreamError(ctx, account, resp.StatusCode, resp.Header, respBody) From e66635648371fc859c09945707313c1fe72b8d68 Mon Sep 17 00:00:00 2001 From: erio Date: Mon, 9 Feb 2026 18:12:39 +0800 Subject: [PATCH 007/175] fix: resolve merge conflict in OpsConcurrencyCard.vue --- .../src/views/admin/ops/components/OpsConcurrencyCard.vue | 4 ---- 1 file changed, 4 deletions(-) diff --git a/frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue b/frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue index 87f6a4d8..ca640ade 100644 --- a/frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue +++ b/frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue @@ -162,10 +162,6 @@ const groupRows = computed((): SummaryRow[] => { total_accounts: totalAccounts, available_accounts: availableAccounts, rate_limited_accounts: safeNumber(avail.rate_limit_count), -<<<<<<< HEAD -======= - ->>>>>>> v0.1.76 error_accounts: safeNumber(avail.error_count), total_concurrency: totalConcurrency, used_concurrency: usedConcurrency, From 30c30b1712bee579603b0ce6136f72f9d79b6efa Mon Sep 17 00:00:00 2001 From: erio Date: Mon, 9 Feb 2026 18:53:52 +0800 Subject: [PATCH 008/175] fix: skip rate limiting when custom error codes don't match upstream status Add ShouldHandleErrorCode guard at the entry of handleGeminiUpstreamError and AntigravityGatewayService.handleUpstreamError so that accounts with custom error codes (e.g. [599]) are not rate-limited when the upstream returns a non-matching status (e.g. 429). --- backend/internal/service/antigravity_gateway_service.go | 4 ++++ backend/internal/service/gemini_messages_compat_service.go | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index c295627e..81a1c149 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -2243,6 +2243,10 @@ func (s *AntigravityGatewayService) handleUpstreamError( requestedModel string, groupID int64, sessionHash string, isStickySession bool, ) *handleModelRateLimitResult { + // 遵守自定义错误码策略:未命中则跳过所有限流处理 + if !account.ShouldHandleErrorCode(statusCode) { + return nil + } // 模型级限流处理(优先) result := s.handleModelRateLimit(&handleModelRateLimitParams{ ctx: ctx, diff --git a/backend/internal/service/gemini_messages_compat_service.go b/backend/internal/service/gemini_messages_compat_service.go index 9197021f..335e1f81 100644 --- a/backend/internal/service/gemini_messages_compat_service.go +++ b/backend/internal/service/gemini_messages_compat_service.go @@ -2597,6 +2597,10 @@ func asInt(v any) (int, bool) { } func (s *GeminiMessagesCompatService) handleGeminiUpstreamError(ctx context.Context, account *Account, statusCode int, headers http.Header, body []byte) { + // 遵守自定义错误码策略:未命中则跳过所有限流处理 + if !account.ShouldHandleErrorCode(statusCode) { + return + } if s.rateLimitService != nil && (statusCode == 401 || statusCode == 403 || statusCode == 529) { s.rateLimitService.HandleUpstreamError(ctx, account, statusCode, headers, body) return From 3e3c015efa55fd7013331a2f3cdb547ea22de4ae Mon Sep 17 00:00:00 2001 From: erio Date: Mon, 9 Feb 2026 19:22:32 +0800 Subject: [PATCH 009/175] fix: Gemini error policy check should precede retry logic --- .../service/gemini_messages_compat_service.go | 36 +++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/backend/internal/service/gemini_messages_compat_service.go b/backend/internal/service/gemini_messages_compat_service.go index 335e1f81..792c8f4b 100644 --- a/backend/internal/service/gemini_messages_compat_service.go +++ b/backend/internal/service/gemini_messages_compat_service.go @@ -770,6 +770,14 @@ func (s *GeminiMessagesCompatService) Forward(ctx context.Context, c *gin.Contex break } + // 错误策略优先:匹配则跳过重试直接处理。 + if matched, rebuilt := s.checkErrorPolicyInLoop(ctx, account, resp); matched { + resp = rebuilt + break + } else { + resp = rebuilt + } + if resp.StatusCode >= 400 && s.shouldRetryGeminiUpstreamError(account, resp.StatusCode) { respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) _ = resp.Body.Close() @@ -1176,6 +1184,14 @@ func (s *GeminiMessagesCompatService) ForwardNative(ctx context.Context, c *gin. return nil, s.writeGoogleError(c, http.StatusBadGateway, "Upstream request failed after retries: "+safeErr) } + // 错误策略优先:匹配则跳过重试直接处理。 + if matched, rebuilt := s.checkErrorPolicyInLoop(ctx, account, resp); matched { + resp = rebuilt + break + } else { + resp = rebuilt + } + if resp.StatusCode >= 400 && s.shouldRetryGeminiUpstreamError(account, resp.StatusCode) { respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) _ = resp.Body.Close() @@ -1425,6 +1441,26 @@ func (s *GeminiMessagesCompatService) ForwardNative(ctx context.Context, c *gin. }, nil } +// checkErrorPolicyInLoop 在重试循环内预检查错误策略。 +// 返回 true 表示策略已匹配(调用者应 break),resp 已重建可直接使用。 +// 返回 false 表示 ErrorPolicyNone,resp 已重建,调用者继续走重试逻辑。 +func (s *GeminiMessagesCompatService) checkErrorPolicyInLoop( + ctx context.Context, account *Account, resp *http.Response, +) (matched bool, rebuilt *http.Response) { + if resp.StatusCode < 400 || s.rateLimitService == nil { + return false, resp + } + body, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + _ = resp.Body.Close() + rebuilt = &http.Response{ + StatusCode: resp.StatusCode, + Header: resp.Header.Clone(), + Body: io.NopCloser(bytes.NewReader(body)), + } + policy := s.rateLimitService.CheckErrorPolicy(ctx, account, resp.StatusCode, body) + return policy != ErrorPolicyNone, rebuilt +} + func (s *GeminiMessagesCompatService) shouldRetryGeminiUpstreamError(account *Account, statusCode int) bool { switch statusCode { case 429, 500, 502, 503, 504, 529: From 7e4637cd703d400f9106d496ef1f60b3de534bfc Mon Sep 17 00:00:00 2001 From: erio Date: Mon, 9 Feb 2026 20:08:00 +0800 Subject: [PATCH 010/175] fix: support clearing model-level rate limits from action menu and temp-unsched reset --- backend/internal/service/ratelimit_service.go | 4 ++++ .../components/admin/account/AccountActionMenu.vue | 14 +++++++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/backend/internal/service/ratelimit_service.go b/backend/internal/service/ratelimit_service.go index 63732dee..12c48ab8 100644 --- a/backend/internal/service/ratelimit_service.go +++ b/backend/internal/service/ratelimit_service.go @@ -623,6 +623,10 @@ func (s *RateLimitService) ClearTempUnschedulable(ctx context.Context, accountID slog.Warn("temp_unsched_cache_delete_failed", "account_id", accountID, "error", err) } } + // 同时清除模型级别限流 + if err := s.accountRepo.ClearModelRateLimits(ctx, accountID); err != nil { + slog.Warn("clear_model_rate_limits_on_temp_unsched_reset_failed", "account_id", accountID, "error", err) + } return nil } diff --git a/frontend/src/components/admin/account/AccountActionMenu.vue b/frontend/src/components/admin/account/AccountActionMenu.vue index bb753faa..2325f4b4 100644 --- a/frontend/src/components/admin/account/AccountActionMenu.vue +++ b/frontend/src/components/admin/account/AccountActionMenu.vue @@ -53,7 +53,19 @@ import type { Account } from '@/types' const props = defineProps<{ show: boolean; account: Account | null; position: { top: number; left: number } | null }>() const emit = defineEmits(['close', 'test', 'stats', 'reauth', 'refresh-token', 'reset-status', 'clear-rate-limit']) const { t } = useI18n() -const isRateLimited = computed(() => props.account?.rate_limit_reset_at && new Date(props.account.rate_limit_reset_at) > new Date()) +const isRateLimited = computed(() => { + if (props.account?.rate_limit_reset_at && new Date(props.account.rate_limit_reset_at) > new Date()) { + return true + } + const modelLimits = (props.account?.extra as Record | undefined)?.model_rate_limits as + | Record + | undefined + if (modelLimits) { + const now = new Date() + return Object.values(modelLimits).some(info => new Date(info.rate_limit_reset_at) > now) + } + return false +}) const isOverloaded = computed(() => props.account?.overload_until && new Date(props.account.overload_until) > new Date()) const handleKeydown = (event: KeyboardEvent) => { From 18b591bc3b0e38448d24413063ae9e0915402601 Mon Sep 17 00:00:00 2001 From: erio Date: Mon, 9 Feb 2026 22:13:44 +0800 Subject: [PATCH 011/175] feat: Antigravity extra failover retries after default retries exhausted When default failover retries are exhausted, continue retrying with Antigravity accounts only (up to 10 times, configurable via GATEWAY_ANTIGRAVITY_EXTRA_RETRIES). Each extra retry uses a fixed 500ms delay. Non-Antigravity accounts are skipped during the extra retry phase. Applied to all three endpoints: Gemini compat, Claude, and Gemini native API paths. --- backend/internal/config/config.go | 4 + backend/internal/handler/gateway_handler.go | 65 ++- .../gateway_handler_extra_retry_test.go | 417 ++++++++++++++++++ .../internal/handler/gemini_v1beta_handler.go | 26 +- 4 files changed, 504 insertions(+), 8 deletions(-) create mode 100644 backend/internal/handler/gateway_handler_extra_retry_test.go diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index 91437ba8..460bd05d 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -279,6 +279,9 @@ type GatewayConfig struct { // Antigravity 429 fallback 限流时间(分钟),解析重置时间失败时使用 AntigravityFallbackCooldownMinutes int `mapstructure:"antigravity_fallback_cooldown_minutes"` + // 默认重试用完后,额外使用 Antigravity 账号重试的最大次数(0 表示禁用) + AntigravityExtraRetries int `mapstructure:"antigravity_extra_retries"` + // Scheduling: 账号调度相关配置 Scheduling GatewaySchedulingConfig `mapstructure:"scheduling"` @@ -883,6 +886,7 @@ func setDefaults() { viper.SetDefault("gateway.max_account_switches", 10) viper.SetDefault("gateway.max_account_switches_gemini", 3) viper.SetDefault("gateway.antigravity_fallback_cooldown_minutes", 1) + viper.SetDefault("gateway.antigravity_extra_retries", 10) viper.SetDefault("gateway.max_body_size", int64(100*1024*1024)) viper.SetDefault("gateway.connection_pool_isolation", ConnectionPoolIsolationAccountProxy) // HTTP 上游连接池配置(针对 5000+ 并发用户优化) diff --git a/backend/internal/handler/gateway_handler.go b/backend/internal/handler/gateway_handler.go index 6900fa55..361cd8b5 100644 --- a/backend/internal/handler/gateway_handler.go +++ b/backend/internal/handler/gateway_handler.go @@ -39,6 +39,7 @@ type GatewayHandler struct { concurrencyHelper *ConcurrencyHelper maxAccountSwitches int maxAccountSwitchesGemini int + antigravityExtraRetries int } // NewGatewayHandler creates a new GatewayHandler @@ -57,6 +58,7 @@ func NewGatewayHandler( pingInterval := time.Duration(0) maxAccountSwitches := 10 maxAccountSwitchesGemini := 3 + antigravityExtraRetries := 10 if cfg != nil { pingInterval = time.Duration(cfg.Concurrency.PingInterval) * time.Second if cfg.Gateway.MaxAccountSwitches > 0 { @@ -65,6 +67,7 @@ func NewGatewayHandler( if cfg.Gateway.MaxAccountSwitchesGemini > 0 { maxAccountSwitchesGemini = cfg.Gateway.MaxAccountSwitchesGemini } + antigravityExtraRetries = cfg.Gateway.AntigravityExtraRetries } return &GatewayHandler{ gatewayService: gatewayService, @@ -78,6 +81,7 @@ func NewGatewayHandler( concurrencyHelper: NewConcurrencyHelper(concurrencyService, SSEPingFormatClaude, pingInterval), maxAccountSwitches: maxAccountSwitches, maxAccountSwitchesGemini: maxAccountSwitchesGemini, + antigravityExtraRetries: antigravityExtraRetries, } } @@ -234,6 +238,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { if platform == service.PlatformGemini { maxAccountSwitches := h.maxAccountSwitchesGemini switchCount := 0 + antigravityExtraCount := 0 failedAccountIDs := make(map[int64]struct{}) var lastFailoverErr *service.UpstreamFailoverError var forceCacheBilling bool // 粘性会话切换时的缓存计费标记 @@ -255,6 +260,15 @@ func (h *GatewayHandler) Messages(c *gin.Context) { account := selection.Account setOpsSelectedAccount(c, account.ID) + // 额外重试阶段:跳过非 Antigravity 账号 + if switchCount >= maxAccountSwitches && account.Platform != service.PlatformAntigravity { + failedAccountIDs[account.ID] = struct{}{} + if selection.Acquired && selection.ReleaseFunc != nil { + selection.ReleaseFunc() + } + continue + } + // 检查请求拦截(预热请求、SUGGESTION MODE等) if account.IsInterceptWarmupEnabled() { interceptType := detectInterceptType(body, reqModel, parsedReq.MaxTokens, reqStream, isClaudeCodeClient) @@ -345,8 +359,17 @@ func (h *GatewayHandler) Messages(c *gin.Context) { forceCacheBilling = true } if switchCount >= maxAccountSwitches { - h.handleFailoverExhausted(c, failoverErr, service.PlatformGemini, streamStarted) - return + // 默认重试用完,进入 Antigravity 额外重试 + antigravityExtraCount++ + if antigravityExtraCount > h.antigravityExtraRetries { + h.handleFailoverExhausted(c, failoverErr, service.PlatformGemini, streamStarted) + return + } + log.Printf("Account %d: antigravity extra retry %d/%d", account.ID, antigravityExtraCount, h.antigravityExtraRetries) + if !sleepFixedDelay(c.Request.Context(), antigravityExtraRetryDelay) { + return + } + continue } switchCount++ log.Printf("Account %d: upstream error %d, switching account %d/%d", account.ID, failoverErr.StatusCode, switchCount, maxAccountSwitches) @@ -399,6 +422,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { for { maxAccountSwitches := h.maxAccountSwitches switchCount := 0 + antigravityExtraCount := 0 failedAccountIDs := make(map[int64]struct{}) var lastFailoverErr *service.UpstreamFailoverError retryWithFallback := false @@ -422,6 +446,15 @@ func (h *GatewayHandler) Messages(c *gin.Context) { account := selection.Account setOpsSelectedAccount(c, account.ID) + // 额外重试阶段:跳过非 Antigravity 账号 + if switchCount >= maxAccountSwitches && account.Platform != service.PlatformAntigravity { + failedAccountIDs[account.ID] = struct{}{} + if selection.Acquired && selection.ReleaseFunc != nil { + selection.ReleaseFunc() + } + continue + } + // 检查请求拦截(预热请求、SUGGESTION MODE等) if account.IsInterceptWarmupEnabled() { interceptType := detectInterceptType(body, reqModel, parsedReq.MaxTokens, reqStream, isClaudeCodeClient) @@ -545,8 +578,17 @@ func (h *GatewayHandler) Messages(c *gin.Context) { forceCacheBilling = true } if switchCount >= maxAccountSwitches { - h.handleFailoverExhausted(c, failoverErr, account.Platform, streamStarted) - return + // 默认重试用完,进入 Antigravity 额外重试 + antigravityExtraCount++ + if antigravityExtraCount > h.antigravityExtraRetries { + h.handleFailoverExhausted(c, failoverErr, account.Platform, streamStarted) + return + } + log.Printf("Account %d: antigravity extra retry %d/%d", account.ID, antigravityExtraCount, h.antigravityExtraRetries) + if !sleepFixedDelay(c.Request.Context(), antigravityExtraRetryDelay) { + return + } + continue } switchCount++ log.Printf("Account %d: upstream error %d, switching account %d/%d", account.ID, failoverErr.StatusCode, switchCount, maxAccountSwitches) @@ -838,6 +880,21 @@ func sleepFailoverDelay(ctx context.Context, switchCount int) bool { } } +const antigravityExtraRetryDelay = 500 * time.Millisecond + +// sleepFixedDelay 固定延时等待,返回 false 表示 context 已取消。 +func sleepFixedDelay(ctx context.Context, delay time.Duration) bool { + if delay <= 0 { + return true + } + select { + case <-ctx.Done(): + return false + case <-time.After(delay): + return true + } +} + func (h *GatewayHandler) handleFailoverExhausted(c *gin.Context, failoverErr *service.UpstreamFailoverError, platform string, streamStarted bool) { statusCode := failoverErr.StatusCode responseBody := failoverErr.ResponseBody diff --git a/backend/internal/handler/gateway_handler_extra_retry_test.go b/backend/internal/handler/gateway_handler_extra_retry_test.go new file mode 100644 index 00000000..a0777941 --- /dev/null +++ b/backend/internal/handler/gateway_handler_extra_retry_test.go @@ -0,0 +1,417 @@ +//go:build unit + +package handler + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/require" +) + +// --- sleepFixedDelay --- + +func TestSleepFixedDelay_ZeroDelay(t *testing.T) { + got := sleepFixedDelay(context.Background(), 0) + require.True(t, got, "zero delay should return true immediately") +} + +func TestSleepFixedDelay_NegativeDelay(t *testing.T) { + got := sleepFixedDelay(context.Background(), -1*time.Second) + require.True(t, got, "negative delay should return true immediately") +} + +func TestSleepFixedDelay_NormalDelay(t *testing.T) { + start := time.Now() + got := sleepFixedDelay(context.Background(), 50*time.Millisecond) + elapsed := time.Since(start) + require.True(t, got, "normal delay should return true") + require.GreaterOrEqual(t, elapsed, 40*time.Millisecond, "should sleep at least ~50ms") +} + +func TestSleepFixedDelay_ContextCancelled(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + cancel() // cancel immediately + got := sleepFixedDelay(ctx, 10*time.Second) + require.False(t, got, "cancelled context should return false") +} + +func TestSleepFixedDelay_ContextTimeout(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + defer cancel() + got := sleepFixedDelay(ctx, 5*time.Second) + require.False(t, got, "context timeout should return false before delay completes") +} + +// --- antigravityExtraRetryDelay constant --- + +func TestAntigravityExtraRetryDelayValue(t *testing.T) { + require.Equal(t, 500*time.Millisecond, antigravityExtraRetryDelay) +} + +// --- NewGatewayHandler antigravityExtraRetries field --- + +func TestNewGatewayHandler_AntigravityExtraRetries_Default(t *testing.T) { + h := NewGatewayHandler(nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) + require.Equal(t, 10, h.antigravityExtraRetries, "default should be 10 when cfg is nil") +} + +func TestNewGatewayHandler_AntigravityExtraRetries_FromConfig(t *testing.T) { + cfg := &config.Config{ + Gateway: config.GatewayConfig{ + AntigravityExtraRetries: 5, + }, + } + h := NewGatewayHandler(nil, nil, nil, nil, nil, nil, nil, nil, nil, cfg) + require.Equal(t, 5, h.antigravityExtraRetries, "should use config value") +} + +func TestNewGatewayHandler_AntigravityExtraRetries_ZeroDisables(t *testing.T) { + cfg := &config.Config{ + Gateway: config.GatewayConfig{ + AntigravityExtraRetries: 0, + }, + } + h := NewGatewayHandler(nil, nil, nil, nil, nil, nil, nil, nil, nil, cfg) + require.Equal(t, 0, h.antigravityExtraRetries, "zero should disable extra retries") +} + +// --- handleFailoverAllAccountsExhausted (renamed: using handleFailoverExhausted) --- +// We test the error response format helpers that the extra retry path uses. + +func TestHandleFailoverExhausted_JSON(t *testing.T) { + gin.SetMode(gin.TestMode) + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + + h := &GatewayHandler{} + failoverErr := &service.UpstreamFailoverError{StatusCode: 429} + h.handleFailoverExhausted(c, failoverErr, service.PlatformAntigravity, false) + + require.Equal(t, http.StatusTooManyRequests, rec.Code) + + var body map[string]any + err := json.Unmarshal(rec.Body.Bytes(), &body) + require.NoError(t, err) + errObj, ok := body["error"].(map[string]any) + require.True(t, ok) + require.Equal(t, "rate_limit_error", errObj["type"]) +} + +func TestHandleFailoverExhaustedSimple_JSON(t *testing.T) { + gin.SetMode(gin.TestMode) + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + + h := &GatewayHandler{} + h.handleFailoverExhaustedSimple(c, 502, false) + + require.Equal(t, http.StatusBadGateway, rec.Code) + + var body map[string]any + err := json.Unmarshal(rec.Body.Bytes(), &body) + require.NoError(t, err) + errObj, ok := body["error"].(map[string]any) + require.True(t, ok) + require.Equal(t, "upstream_error", errObj["type"]) +} + +// --- Extra retry platform filter logic --- + +func TestExtraRetryPlatformFilter(t *testing.T) { + tests := []struct { + name string + switchCount int + maxAccountSwitch int + platform string + expectSkip bool + }{ + { + name: "default_retry_phase_antigravity_not_skipped", + switchCount: 1, + maxAccountSwitch: 3, + platform: service.PlatformAntigravity, + expectSkip: false, + }, + { + name: "default_retry_phase_gemini_not_skipped", + switchCount: 1, + maxAccountSwitch: 3, + platform: service.PlatformGemini, + expectSkip: false, + }, + { + name: "extra_retry_phase_antigravity_not_skipped", + switchCount: 3, + maxAccountSwitch: 3, + platform: service.PlatformAntigravity, + expectSkip: false, + }, + { + name: "extra_retry_phase_gemini_skipped", + switchCount: 3, + maxAccountSwitch: 3, + platform: service.PlatformGemini, + expectSkip: true, + }, + { + name: "extra_retry_phase_anthropic_skipped", + switchCount: 3, + maxAccountSwitch: 3, + platform: service.PlatformAnthropic, + expectSkip: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Replicate the filter condition from the handler + shouldSkip := tt.switchCount >= tt.maxAccountSwitch && tt.platform != service.PlatformAntigravity + require.Equal(t, tt.expectSkip, shouldSkip) + }) + } +} + +// --- Extra retry counter logic --- + +func TestExtraRetryCounterExhaustion(t *testing.T) { + tests := []struct { + name string + maxExtraRetries int + currentExtraCount int + expectExhausted bool + }{ + { + name: "first_extra_retry", + maxExtraRetries: 10, + currentExtraCount: 1, + expectExhausted: false, + }, + { + name: "at_limit", + maxExtraRetries: 10, + currentExtraCount: 10, + expectExhausted: false, + }, + { + name: "exceeds_limit", + maxExtraRetries: 10, + currentExtraCount: 11, + expectExhausted: true, + }, + { + name: "zero_disables_extra_retry", + maxExtraRetries: 0, + currentExtraCount: 1, + expectExhausted: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Replicate the exhaustion condition: antigravityExtraCount > h.antigravityExtraRetries + exhausted := tt.currentExtraCount > tt.maxExtraRetries + require.Equal(t, tt.expectExhausted, exhausted) + }) + } +} + +// --- mapUpstreamError (used by handleFailoverExhausted) --- + +func TestMapUpstreamError(t *testing.T) { + h := &GatewayHandler{} + tests := []struct { + name string + statusCode int + expectedStatus int + expectedType string + }{ + {"429", 429, http.StatusTooManyRequests, "rate_limit_error"}, + {"529", 529, http.StatusServiceUnavailable, "overloaded_error"}, + {"500", 500, http.StatusBadGateway, "upstream_error"}, + {"502", 502, http.StatusBadGateway, "upstream_error"}, + {"503", 503, http.StatusBadGateway, "upstream_error"}, + {"504", 504, http.StatusBadGateway, "upstream_error"}, + {"401", 401, http.StatusBadGateway, "upstream_error"}, + {"403", 403, http.StatusBadGateway, "upstream_error"}, + {"unknown", 418, http.StatusBadGateway, "upstream_error"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + status, errType, _ := h.mapUpstreamError(tt.statusCode) + require.Equal(t, tt.expectedStatus, status) + require.Equal(t, tt.expectedType, errType) + }) + } +} + +// --- Gemini native path: handleGeminiFailoverExhausted --- + +func TestHandleGeminiFailoverExhausted_NilError(t *testing.T) { + gin.SetMode(gin.TestMode) + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + + h := &GatewayHandler{} + h.handleGeminiFailoverExhausted(c, nil) + + require.Equal(t, http.StatusBadGateway, rec.Code) + var body map[string]any + err := json.Unmarshal(rec.Body.Bytes(), &body) + require.NoError(t, err) + errObj, ok := body["error"].(map[string]any) + require.True(t, ok) + require.Equal(t, "Upstream request failed", errObj["message"]) +} + +func TestHandleGeminiFailoverExhausted_429(t *testing.T) { + gin.SetMode(gin.TestMode) + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + + h := &GatewayHandler{} + failoverErr := &service.UpstreamFailoverError{StatusCode: 429} + h.handleGeminiFailoverExhausted(c, failoverErr) + + require.Equal(t, http.StatusTooManyRequests, rec.Code) +} + +// --- handleStreamingAwareError streaming mode --- + +func TestHandleStreamingAwareError_StreamStarted(t *testing.T) { + gin.SetMode(gin.TestMode) + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + + // Simulate stream already started: set content type and write initial data + c.Writer.Header().Set("Content-Type", "text/event-stream") + c.Writer.WriteHeaderNow() + + h := &GatewayHandler{} + h.handleStreamingAwareError(c, http.StatusTooManyRequests, "rate_limit_error", "test error", true) + + body := rec.Body.String() + require.Contains(t, body, "rate_limit_error") + require.Contains(t, body, "test error") + require.Contains(t, body, "data: ") +} + +func TestHandleStreamingAwareError_NotStreaming(t *testing.T) { + gin.SetMode(gin.TestMode) + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + + h := &GatewayHandler{} + h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "no model", false) + + require.Equal(t, http.StatusServiceUnavailable, rec.Code) + var body map[string]any + err := json.Unmarshal(rec.Body.Bytes(), &body) + require.NoError(t, err) + errObj, ok := body["error"].(map[string]any) + require.True(t, ok) + require.Equal(t, "api_error", errObj["type"]) + require.Equal(t, "no model", errObj["message"]) +} + +// --- Integration: extra retry flow simulation --- + +func TestExtraRetryFlowSimulation(t *testing.T) { + // Simulate the full extra retry flow logic + maxAccountSwitches := 3 + maxExtraRetries := 2 + switchCount := 0 + antigravityExtraCount := 0 + + type attempt struct { + platform string + isFailover bool + } + + // Simulate: 3 default retries (all fail), then 2 extra retries (all fail), then exhausted + attempts := []attempt{ + {service.PlatformAntigravity, true}, // switchCount 0 -> 1 + {service.PlatformGemini, true}, // switchCount 1 -> 2 + {service.PlatformAntigravity, true}, // switchCount 2 -> 3 (reaches max) + {service.PlatformAntigravity, true}, // extra retry 1 + {service.PlatformAntigravity, true}, // extra retry 2 + {service.PlatformAntigravity, true}, // extra retry 3 -> exhausted + } + + var exhausted bool + var skipped int + + for _, a := range attempts { + if exhausted { + break + } + + // Extra retry phase: skip non-Antigravity + if switchCount >= maxAccountSwitches && a.platform != service.PlatformAntigravity { + skipped++ + continue + } + + if a.isFailover { + if switchCount >= maxAccountSwitches { + antigravityExtraCount++ + if antigravityExtraCount > maxExtraRetries { + exhausted = true + continue + } + // extra retry delay + continue + continue + } + switchCount++ + } + } + + require.Equal(t, 3, switchCount, "should have 3 default retries") + require.Equal(t, 3, antigravityExtraCount, "counter incremented 3 times") + require.True(t, exhausted, "should be exhausted after exceeding max extra retries") + require.Equal(t, 0, skipped, "no non-antigravity accounts in this simulation") +} + +func TestExtraRetryFlowSimulation_SkipsNonAntigravity(t *testing.T) { + maxAccountSwitches := 2 + switchCount := 2 // already past default retries + antigravityExtraCount := 0 + maxExtraRetries := 5 + + type accountSelection struct { + platform string + } + + selections := []accountSelection{ + {service.PlatformGemini}, // should be skipped + {service.PlatformAnthropic}, // should be skipped + {service.PlatformAntigravity}, // should be attempted + } + + var skippedCount int + var attemptedCount int + + for _, sel := range selections { + if switchCount >= maxAccountSwitches && sel.platform != service.PlatformAntigravity { + skippedCount++ + continue + } + // Simulate failover + antigravityExtraCount++ + if antigravityExtraCount > maxExtraRetries { + break + } + attemptedCount++ + } + + require.Equal(t, 2, skippedCount, "gemini and anthropic accounts should be skipped") + require.Equal(t, 1, attemptedCount, "only antigravity account should be attempted") + require.Equal(t, 1, antigravityExtraCount) +} diff --git a/backend/internal/handler/gemini_v1beta_handler.go b/backend/internal/handler/gemini_v1beta_handler.go index d5149f22..5a576ab0 100644 --- a/backend/internal/handler/gemini_v1beta_handler.go +++ b/backend/internal/handler/gemini_v1beta_handler.go @@ -323,6 +323,7 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { maxAccountSwitches := h.maxAccountSwitchesGemini switchCount := 0 + antigravityExtraCount := 0 failedAccountIDs := make(map[int64]struct{}) var lastFailoverErr *service.UpstreamFailoverError var forceCacheBilling bool // 粘性会话切换时的缓存计费标记 @@ -340,6 +341,15 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { account := selection.Account setOpsSelectedAccount(c, account.ID) + // 额外重试阶段:跳过非 Antigravity 账号 + if switchCount >= maxAccountSwitches && account.Platform != service.PlatformAntigravity { + failedAccountIDs[account.ID] = struct{}{} + if selection.Acquired && selection.ReleaseFunc != nil { + selection.ReleaseFunc() + } + continue + } + // 检测账号切换:如果粘性会话绑定的账号与当前选择的账号不同,清除 thoughtSignature // 注意:Gemini 原生 API 的 thoughtSignature 与具体上游账号强相关;跨账号透传会导致 400。 if sessionBoundAccountID > 0 && sessionBoundAccountID != account.ID { @@ -424,15 +434,23 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { var failoverErr *service.UpstreamFailoverError if errors.As(err, &failoverErr) { failedAccountIDs[account.ID] = struct{}{} + lastFailoverErr = failoverErr if needForceCacheBilling(hasBoundSession, failoverErr) { forceCacheBilling = true } if switchCount >= maxAccountSwitches { - lastFailoverErr = failoverErr - h.handleGeminiFailoverExhausted(c, lastFailoverErr) - return + // 默认重试用完,进入 Antigravity 额外重试 + antigravityExtraCount++ + if antigravityExtraCount > h.antigravityExtraRetries { + h.handleGeminiFailoverExhausted(c, failoverErr) + return + } + log.Printf("Gemini account %d: antigravity extra retry %d/%d", account.ID, antigravityExtraCount, h.antigravityExtraRetries) + if !sleepFixedDelay(c.Request.Context(), antigravityExtraRetryDelay) { + return + } + continue } - lastFailoverErr = failoverErr switchCount++ log.Printf("Gemini account %d: upstream error %d, switching account %d/%d", account.ID, failoverErr.StatusCode, switchCount, maxAccountSwitches) if account.Platform == service.PlatformAntigravity { From 345f853b5d8f34273a43bb80e69b87162edd57b2 Mon Sep 17 00:00:00 2001 From: erio Date: Mon, 9 Feb 2026 22:27:47 +0800 Subject: [PATCH 012/175] chore: bump version to 0.1.77.1 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index 508699ff..af6111e5 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.76.4 +0.1.77.1 From 4c1fd570f02430c648e97b4217d602000b6aec89 Mon Sep 17 00:00:00 2001 From: Edric Li Date: Mon, 9 Feb 2026 22:22:19 +0800 Subject: [PATCH 013/175] feat: failover and temp-unschedule on Google "Invalid project resource name" 400 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Google 后端间歇性返回 400 "Invalid project resource name" 错误, 此前该错误直接透传给客户端且不触发账号切换,导致请求失败。 - 在 Antigravity 和 Gemini 两个平台的所有转发路径中, 精确匹配该错误消息后触发 failover 自动换号重试 - 命中后将账号临时封禁 1 小时,避免反复调度到同一故障账号 - 提取共享函数 isGoogleProjectConfigError / tempUnscheduleGoogleConfigError 消除跨 Service 的代码重复 --- .../service/antigravity_gateway_service.go | 62 +++++++++++++++++++ .../service/gemini_messages_compat_service.go | 61 ++++++++++++++++++ 2 files changed, 123 insertions(+) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 81a1c149..71dee705 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -1285,6 +1285,28 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, originalModel, 0, "", isStickySession) + // 精确匹配服务端配置类 400 错误,触发 failover + 临时封禁 + if resp.StatusCode == http.StatusBadRequest { + msg := strings.ToLower(strings.TrimSpace(extractAntigravityErrorMessage(respBody))) + if isGoogleProjectConfigError(msg) { + upstreamMsg := sanitizeUpstreamErrorMessage(strings.TrimSpace(extractAntigravityErrorMessage(respBody))) + upstreamDetail := s.getUpstreamErrorDetail(respBody) + log.Printf("%s status=400 google_config_error failover=true upstream_message=%q account=%d", prefix, upstreamMsg, account.ID) + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "failover", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + tempUnscheduleGoogleConfigError(ctx, s.accountRepo, account.ID, prefix) + return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode, ResponseBody: respBody} + } + } + if s.shouldFailoverUpstreamError(resp.StatusCode) { upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(respBody)) upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) @@ -1825,6 +1847,23 @@ func (s *AntigravityGatewayService) ForwardGemini(ctx context.Context, c *gin.Co // Always record upstream context for Ops error logs, even when we will failover. setOpsUpstreamError(c, resp.StatusCode, upstreamMsg, upstreamDetail) + // 精确匹配服务端配置类 400 错误,触发 failover + 临时封禁 + if resp.StatusCode == http.StatusBadRequest && isGoogleProjectConfigError(strings.ToLower(upstreamMsg)) { + log.Printf("%s status=400 google_config_error failover=true upstream_message=%q account=%d", prefix, upstreamMsg, account.ID) + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: requestID, + Kind: "failover", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + tempUnscheduleGoogleConfigError(ctx, s.accountRepo, account.ID, prefix) + return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode, ResponseBody: unwrappedForOps} + } + if s.shouldFailoverUpstreamError(resp.StatusCode) { appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ Platform: account.Platform, @@ -1920,6 +1959,29 @@ func (s *AntigravityGatewayService) shouldFailoverUpstreamError(statusCode int) } } +// isGoogleProjectConfigError 判断(已提取的小写)错误消息是否属于 Google 服务端配置类问题。 +// 只精确匹配已知的服务端侧错误,避免对客户端请求错误做无意义重试。 +// 适用于所有走 Google 后端的平台(Antigravity、Gemini)。 +func isGoogleProjectConfigError(lowerMsg string) bool { + // Google 间歇性 Bug:Project ID 有效但被临时识别失败 + return strings.Contains(lowerMsg, "invalid project resource name") +} + +// googleConfigErrorCooldown 服务端配置类 400 错误的临时封禁时长 +const googleConfigErrorCooldown = 60 * time.Minute + +// tempUnscheduleGoogleConfigError 对服务端配置类 400 错误触发临时封禁, +// 避免短时间内反复调度到同一个有问题的账号。 +func tempUnscheduleGoogleConfigError(ctx context.Context, repo AccountRepository, accountID int64, logPrefix string) { + until := time.Now().Add(googleConfigErrorCooldown) + reason := "400: invalid project resource name (auto temp-unschedule 1h)" + if err := repo.SetTempUnschedulable(ctx, accountID, until, reason); err != nil { + log.Printf("%s temp_unschedule_failed account=%d error=%v", logPrefix, accountID, err) + } else { + log.Printf("%s temp_unscheduled account=%d until=%v reason=%q", logPrefix, accountID, until.Format("15:04:05"), reason) + } +} + // sleepAntigravityBackoffWithContext 带 context 取消检查的退避等待 // 返回 true 表示正常完成等待,false 表示 context 已取消 func sleepAntigravityBackoffWithContext(ctx context.Context, attempt int) bool { diff --git a/backend/internal/service/gemini_messages_compat_service.go b/backend/internal/service/gemini_messages_compat_service.go index 792c8f4b..1e59c5fd 100644 --- a/backend/internal/service/gemini_messages_compat_service.go +++ b/backend/internal/service/gemini_messages_compat_service.go @@ -880,6 +880,38 @@ func (s *GeminiMessagesCompatService) Forward(ctx context.Context, c *gin.Contex // ErrorPolicyNone → 原有逻辑 s.handleGeminiUpstreamError(ctx, account, resp.StatusCode, resp.Header, respBody) + // 精确匹配服务端配置类 400 错误,触发 failover + 临时封禁 + if resp.StatusCode == http.StatusBadRequest { + msg400 := strings.ToLower(strings.TrimSpace(extractUpstreamErrorMessage(respBody))) + if isGoogleProjectConfigError(msg400) { + upstreamReqID := resp.Header.Get(requestIDHeader) + if upstreamReqID == "" { + upstreamReqID = resp.Header.Get("x-goog-request-id") + } + upstreamMsg := sanitizeUpstreamErrorMessage(strings.TrimSpace(extractUpstreamErrorMessage(respBody))) + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(respBody), maxBytes) + } + log.Printf("[Gemini] status=400 google_config_error failover=true upstream_message=%q account=%d", upstreamMsg, account.ID) + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: upstreamReqID, + Kind: "failover", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + tempUnscheduleGoogleConfigError(ctx, s.accountRepo, account.ID, "[Gemini]") + return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode, ResponseBody: respBody} + } + } if s.shouldFailoverGeminiUpstreamError(resp.StatusCode) { upstreamReqID := resp.Header.Get(requestIDHeader) if upstreamReqID == "" { @@ -1330,6 +1362,35 @@ func (s *GeminiMessagesCompatService) ForwardNative(ctx context.Context, c *gin. // ErrorPolicyNone → 原有逻辑 s.handleGeminiUpstreamError(ctx, account, resp.StatusCode, resp.Header, respBody) + // 精确匹配服务端配置类 400 错误,触发 failover + 临时封禁 + if resp.StatusCode == http.StatusBadRequest { + msg400 := strings.ToLower(strings.TrimSpace(extractUpstreamErrorMessage(respBody))) + if isGoogleProjectConfigError(msg400) { + evBody := unwrapIfNeeded(isOAuth, respBody) + upstreamMsg := sanitizeUpstreamErrorMessage(strings.TrimSpace(extractUpstreamErrorMessage(evBody))) + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(evBody), maxBytes) + } + log.Printf("[Gemini] status=400 google_config_error failover=true upstream_message=%q account=%d", upstreamMsg, account.ID) + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: requestID, + Kind: "failover", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + tempUnscheduleGoogleConfigError(ctx, s.accountRepo, account.ID, "[Gemini]") + return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode, ResponseBody: evBody} + } + } if s.shouldFailoverGeminiUpstreamError(resp.StatusCode) { evBody := unwrapIfNeeded(isOAuth, respBody) upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(evBody)) From 425dfb80d95bc4121c42664fc2931b3633f16216 Mon Sep 17 00:00:00 2001 From: Edric Li Date: Mon, 9 Feb 2026 23:25:30 +0800 Subject: [PATCH 014/175] feat: failover and temp-unschedule on empty stream response - Empty stream responses now return UpstreamFailoverError instead of plain 502, triggering automatic account switching (up to 10 retries) - Add tempUnscheduleEmptyResponse: accounts returning empty responses are temp-unscheduled for 30 minutes - Apply to both Claude and Gemini non-streaming paths - Align googleConfigErrorCooldown from 60m to 30m for consistency --- .../service/antigravity_gateway_service.go | 44 ++++++++++++++++--- 1 file changed, 37 insertions(+), 7 deletions(-) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 71dee705..a5fd1535 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -1351,6 +1351,10 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, streamRes, err := s.handleClaudeStreamToNonStreaming(c, resp, startTime, originalModel) if err != nil { log.Printf("%s status=stream_collect_error error=%v", prefix, err) + var failoverErr *UpstreamFailoverError + if errors.As(err, &failoverErr) && failoverErr.StatusCode == http.StatusBadGateway { + tempUnscheduleEmptyResponse(ctx, s.accountRepo, account.ID, prefix) + } return nil, err } usage = streamRes.usage @@ -1920,6 +1924,10 @@ handleSuccess: streamRes, err := s.handleGeminiStreamToNonStreaming(c, resp, startTime) if err != nil { log.Printf("%s status=stream_collect_error error=%v", prefix, err) + var failoverErr *UpstreamFailoverError + if errors.As(err, &failoverErr) && failoverErr.StatusCode == http.StatusBadGateway { + tempUnscheduleEmptyResponse(ctx, s.accountRepo, account.ID, prefix) + } return nil, err } usage = streamRes.usage @@ -1968,13 +1976,28 @@ func isGoogleProjectConfigError(lowerMsg string) bool { } // googleConfigErrorCooldown 服务端配置类 400 错误的临时封禁时长 -const googleConfigErrorCooldown = 60 * time.Minute +const googleConfigErrorCooldown = 30 * time.Minute // tempUnscheduleGoogleConfigError 对服务端配置类 400 错误触发临时封禁, // 避免短时间内反复调度到同一个有问题的账号。 func tempUnscheduleGoogleConfigError(ctx context.Context, repo AccountRepository, accountID int64, logPrefix string) { until := time.Now().Add(googleConfigErrorCooldown) - reason := "400: invalid project resource name (auto temp-unschedule 1h)" + reason := "400: invalid project resource name (auto temp-unschedule 30m)" + if err := repo.SetTempUnschedulable(ctx, accountID, until, reason); err != nil { + log.Printf("%s temp_unschedule_failed account=%d error=%v", logPrefix, accountID, err) + } else { + log.Printf("%s temp_unscheduled account=%d until=%v reason=%q", logPrefix, accountID, until.Format("15:04:05"), reason) + } +} + +// emptyResponseCooldown 空流式响应的临时封禁时长 +const emptyResponseCooldown = 30 * time.Minute + +// tempUnscheduleEmptyResponse 对空流式响应触发临时封禁, +// 避免短时间内反复调度到同一个返回空响应的账号。 +func tempUnscheduleEmptyResponse(ctx context.Context, repo AccountRepository, accountID int64, logPrefix string) { + until := time.Now().Add(emptyResponseCooldown) + reason := "empty stream response (auto temp-unschedule 30m)" if err := repo.SetTempUnschedulable(ctx, accountID, until, reason); err != nil { log.Printf("%s temp_unschedule_failed account=%d error=%v", logPrefix, accountID, err) } else { @@ -2786,9 +2809,13 @@ returnResponse: // 选择最后一个有效响应 finalResponse := pickGeminiCollectResult(last, lastWithParts) - // 处理空响应情况 + // 处理空响应情况 — 触发 failover 切换账号重试 if last == nil && lastWithParts == nil { - log.Printf("[antigravity-Forward] warning: empty stream response, no valid chunks received") + log.Printf("[antigravity-Forward] warning: empty stream response (gemini non-stream), triggering failover") + return nil, &UpstreamFailoverError{ + StatusCode: http.StatusBadGateway, + ResponseBody: []byte(`{"error":"empty stream response from upstream"}`), + } } // 如果收集到了图片 parts,需要合并到最终响应中 @@ -3201,10 +3228,13 @@ returnResponse: // 选择最后一个有效响应 finalResponse := pickGeminiCollectResult(last, lastWithParts) - // 处理空响应情况 + // 处理空响应情况 — 触发 failover 切换账号重试 if last == nil && lastWithParts == nil { - log.Printf("[antigravity-Forward] warning: empty stream response, no valid chunks received") - return nil, s.writeClaudeError(c, http.StatusBadGateway, "upstream_error", "Empty response from upstream") + log.Printf("[antigravity-Forward] warning: empty stream response (claude non-stream), triggering failover") + return nil, &UpstreamFailoverError{ + StatusCode: http.StatusBadGateway, + ResponseBody: []byte(`{"error":"empty stream response from upstream"}`), + } } // 将收集的所有 parts 合并到最终响应中 From 6328e694417662ca6a25500655095a88de4249cf Mon Sep 17 00:00:00 2001 From: Edric Li Date: Tue, 10 Feb 2026 00:53:54 +0800 Subject: [PATCH 015/175] feat: same-account retry before failover for transient errors For retryable transient errors (Google 400 "invalid project resource name" and empty stream responses), retry on the same account up to 2 times (with 500ms delay) before switching to another account. - Add RetryableOnSameAccount field to UpstreamFailoverError - Add same-account retry loop in both Gemini and Claude/OpenAI handler paths - Move temp-unschedule from service layer to handler layer (only after all same-account retries exhausted) - Reduce temp-unschedule cooldown from 30 minutes to 1 minute --- backend/internal/handler/gateway_handler.go | 57 ++++++++++++++++++- .../service/antigravity_gateway_service.go | 40 ++++++------- backend/internal/service/gateway_service.go | 21 ++++++- .../service/gemini_messages_compat_service.go | 6 +- 4 files changed, 91 insertions(+), 33 deletions(-) diff --git a/backend/internal/handler/gateway_handler.go b/backend/internal/handler/gateway_handler.go index 361cd8b5..3003b5ae 100644 --- a/backend/internal/handler/gateway_handler.go +++ b/backend/internal/handler/gateway_handler.go @@ -240,6 +240,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { switchCount := 0 antigravityExtraCount := 0 failedAccountIDs := make(map[int64]struct{}) + sameAccountRetryCount := make(map[int64]int) // 同账号重试计数 var lastFailoverErr *service.UpstreamFailoverError var forceCacheBilling bool // 粘性会话切换时的缓存计费标记 @@ -353,11 +354,28 @@ func (h *GatewayHandler) Messages(c *gin.Context) { if err != nil { var failoverErr *service.UpstreamFailoverError if errors.As(err, &failoverErr) { - failedAccountIDs[account.ID] = struct{}{} lastFailoverErr = failoverErr if needForceCacheBilling(hasBoundSession, failoverErr) { forceCacheBilling = true } + + // 同账号重试:对 RetryableOnSameAccount 的临时性错误,先在同一账号上重试 + if failoverErr.RetryableOnSameAccount && sameAccountRetryCount[account.ID] < maxSameAccountRetries { + sameAccountRetryCount[account.ID]++ + log.Printf("Account %d: retryable error %d, same-account retry %d/%d", + account.ID, failoverErr.StatusCode, sameAccountRetryCount[account.ID], maxSameAccountRetries) + if !sleepSameAccountRetryDelay(c.Request.Context()) { + return + } + continue + } + + // 同账号重试用尽,执行临时封禁并切换账号 + if failoverErr.RetryableOnSameAccount { + h.gatewayService.TempUnscheduleRetryableError(c.Request.Context(), account.ID, failoverErr) + } + + failedAccountIDs[account.ID] = struct{}{} if switchCount >= maxAccountSwitches { // 默认重试用完,进入 Antigravity 额外重试 antigravityExtraCount++ @@ -424,6 +442,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { switchCount := 0 antigravityExtraCount := 0 failedAccountIDs := make(map[int64]struct{}) + sameAccountRetryCount := make(map[int64]int) // 同账号重试计数 var lastFailoverErr *service.UpstreamFailoverError retryWithFallback := false var forceCacheBilling bool // 粘性会话切换时的缓存计费标记 @@ -572,11 +591,28 @@ func (h *GatewayHandler) Messages(c *gin.Context) { } var failoverErr *service.UpstreamFailoverError if errors.As(err, &failoverErr) { - failedAccountIDs[account.ID] = struct{}{} lastFailoverErr = failoverErr if needForceCacheBilling(hasBoundSession, failoverErr) { forceCacheBilling = true } + + // 同账号重试:对 RetryableOnSameAccount 的临时性错误,先在同一账号上重试 + if failoverErr.RetryableOnSameAccount && sameAccountRetryCount[account.ID] < maxSameAccountRetries { + sameAccountRetryCount[account.ID]++ + log.Printf("Account %d: retryable error %d, same-account retry %d/%d", + account.ID, failoverErr.StatusCode, sameAccountRetryCount[account.ID], maxSameAccountRetries) + if !sleepSameAccountRetryDelay(c.Request.Context()) { + return + } + continue + } + + // 同账号重试用尽,执行临时封禁并切换账号 + if failoverErr.RetryableOnSameAccount { + h.gatewayService.TempUnscheduleRetryableError(c.Request.Context(), account.ID, failoverErr) + } + + failedAccountIDs[account.ID] = struct{}{} if switchCount >= maxAccountSwitches { // 默认重试用完,进入 Antigravity 额外重试 antigravityExtraCount++ @@ -865,6 +901,23 @@ func needForceCacheBilling(hasBoundSession bool, failoverErr *service.UpstreamFa return hasBoundSession || (failoverErr != nil && failoverErr.ForceCacheBilling) } +const ( + // maxSameAccountRetries 同账号重试次数上限(针对 RetryableOnSameAccount 错误) + maxSameAccountRetries = 2 + // sameAccountRetryDelay 同账号重试间隔 + sameAccountRetryDelay = 500 * time.Millisecond +) + +// sleepSameAccountRetryDelay 同账号重试固定延时,返回 false 表示 context 已取消。 +func sleepSameAccountRetryDelay(ctx context.Context) bool { + select { + case <-ctx.Done(): + return false + case <-time.After(sameAccountRetryDelay): + return true + } +} + // sleepFailoverDelay 账号切换线性递增延时:第1次0s、第2次1s、第3次2s… // 返回 false 表示 context 已取消。 func sleepFailoverDelay(ctx context.Context, switchCount int) bool { diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index a5fd1535..9c2b9027 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -1285,7 +1285,7 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, originalModel, 0, "", isStickySession) - // 精确匹配服务端配置类 400 错误,触发 failover + 临时封禁 + // 精确匹配服务端配置类 400 错误,触发同账号重试 + failover if resp.StatusCode == http.StatusBadRequest { msg := strings.ToLower(strings.TrimSpace(extractAntigravityErrorMessage(respBody))) if isGoogleProjectConfigError(msg) { @@ -1302,8 +1302,7 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, Message: upstreamMsg, Detail: upstreamDetail, }) - tempUnscheduleGoogleConfigError(ctx, s.accountRepo, account.ID, prefix) - return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode, ResponseBody: respBody} + return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode, ResponseBody: respBody, RetryableOnSameAccount: true} } } @@ -1351,10 +1350,6 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, streamRes, err := s.handleClaudeStreamToNonStreaming(c, resp, startTime, originalModel) if err != nil { log.Printf("%s status=stream_collect_error error=%v", prefix, err) - var failoverErr *UpstreamFailoverError - if errors.As(err, &failoverErr) && failoverErr.StatusCode == http.StatusBadGateway { - tempUnscheduleEmptyResponse(ctx, s.accountRepo, account.ID, prefix) - } return nil, err } usage = streamRes.usage @@ -1851,7 +1846,7 @@ func (s *AntigravityGatewayService) ForwardGemini(ctx context.Context, c *gin.Co // Always record upstream context for Ops error logs, even when we will failover. setOpsUpstreamError(c, resp.StatusCode, upstreamMsg, upstreamDetail) - // 精确匹配服务端配置类 400 错误,触发 failover + 临时封禁 + // 精确匹配服务端配置类 400 错误,触发同账号重试 + failover if resp.StatusCode == http.StatusBadRequest && isGoogleProjectConfigError(strings.ToLower(upstreamMsg)) { log.Printf("%s status=400 google_config_error failover=true upstream_message=%q account=%d", prefix, upstreamMsg, account.ID) appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ @@ -1864,8 +1859,7 @@ func (s *AntigravityGatewayService) ForwardGemini(ctx context.Context, c *gin.Co Message: upstreamMsg, Detail: upstreamDetail, }) - tempUnscheduleGoogleConfigError(ctx, s.accountRepo, account.ID, prefix) - return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode, ResponseBody: unwrappedForOps} + return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode, ResponseBody: unwrappedForOps, RetryableOnSameAccount: true} } if s.shouldFailoverUpstreamError(resp.StatusCode) { @@ -1924,10 +1918,6 @@ handleSuccess: streamRes, err := s.handleGeminiStreamToNonStreaming(c, resp, startTime) if err != nil { log.Printf("%s status=stream_collect_error error=%v", prefix, err) - var failoverErr *UpstreamFailoverError - if errors.As(err, &failoverErr) && failoverErr.StatusCode == http.StatusBadGateway { - tempUnscheduleEmptyResponse(ctx, s.accountRepo, account.ID, prefix) - } return nil, err } usage = streamRes.usage @@ -1976,13 +1966,13 @@ func isGoogleProjectConfigError(lowerMsg string) bool { } // googleConfigErrorCooldown 服务端配置类 400 错误的临时封禁时长 -const googleConfigErrorCooldown = 30 * time.Minute +const googleConfigErrorCooldown = 1 * time.Minute // tempUnscheduleGoogleConfigError 对服务端配置类 400 错误触发临时封禁, // 避免短时间内反复调度到同一个有问题的账号。 func tempUnscheduleGoogleConfigError(ctx context.Context, repo AccountRepository, accountID int64, logPrefix string) { until := time.Now().Add(googleConfigErrorCooldown) - reason := "400: invalid project resource name (auto temp-unschedule 30m)" + reason := "400: invalid project resource name (auto temp-unschedule 1m)" if err := repo.SetTempUnschedulable(ctx, accountID, until, reason); err != nil { log.Printf("%s temp_unschedule_failed account=%d error=%v", logPrefix, accountID, err) } else { @@ -1991,13 +1981,13 @@ func tempUnscheduleGoogleConfigError(ctx context.Context, repo AccountRepository } // emptyResponseCooldown 空流式响应的临时封禁时长 -const emptyResponseCooldown = 30 * time.Minute +const emptyResponseCooldown = 1 * time.Minute // tempUnscheduleEmptyResponse 对空流式响应触发临时封禁, // 避免短时间内反复调度到同一个返回空响应的账号。 func tempUnscheduleEmptyResponse(ctx context.Context, repo AccountRepository, accountID int64, logPrefix string) { until := time.Now().Add(emptyResponseCooldown) - reason := "empty stream response (auto temp-unschedule 30m)" + reason := "empty stream response (auto temp-unschedule 1m)" if err := repo.SetTempUnschedulable(ctx, accountID, until, reason); err != nil { log.Printf("%s temp_unschedule_failed account=%d error=%v", logPrefix, accountID, err) } else { @@ -2809,12 +2799,13 @@ returnResponse: // 选择最后一个有效响应 finalResponse := pickGeminiCollectResult(last, lastWithParts) - // 处理空响应情况 — 触发 failover 切换账号重试 + // 处理空响应情况 — 触发同账号重试 + failover 切换账号 if last == nil && lastWithParts == nil { log.Printf("[antigravity-Forward] warning: empty stream response (gemini non-stream), triggering failover") return nil, &UpstreamFailoverError{ - StatusCode: http.StatusBadGateway, - ResponseBody: []byte(`{"error":"empty stream response from upstream"}`), + StatusCode: http.StatusBadGateway, + ResponseBody: []byte(`{"error":"empty stream response from upstream"}`), + RetryableOnSameAccount: true, } } @@ -3228,12 +3219,13 @@ returnResponse: // 选择最后一个有效响应 finalResponse := pickGeminiCollectResult(last, lastWithParts) - // 处理空响应情况 — 触发 failover 切换账号重试 + // 处理空响应情况 — 触发同账号重试 + failover 切换账号 if last == nil && lastWithParts == nil { log.Printf("[antigravity-Forward] warning: empty stream response (claude non-stream), triggering failover") return nil, &UpstreamFailoverError{ - StatusCode: http.StatusBadGateway, - ResponseBody: []byte(`{"error":"empty stream response from upstream"}`), + StatusCode: http.StatusBadGateway, + ResponseBody: []byte(`{"error":"empty stream response from upstream"}`), + RetryableOnSameAccount: true, } } diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 4e723232..01e1acb4 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -362,15 +362,30 @@ type ForwardResult struct { // UpstreamFailoverError indicates an upstream error that should trigger account failover. type UpstreamFailoverError struct { - StatusCode int - ResponseBody []byte // 上游响应体,用于错误透传规则匹配 - ForceCacheBilling bool // Antigravity 粘性会话切换时设为 true + StatusCode int + ResponseBody []byte // 上游响应体,用于错误透传规则匹配 + ForceCacheBilling bool // Antigravity 粘性会话切换时设为 true + RetryableOnSameAccount bool // 临时性错误(如 Google 间歇性 400、空响应),应在同一账号上重试 N 次再切换 } func (e *UpstreamFailoverError) Error() string { return fmt.Sprintf("upstream error: %d (failover)", e.StatusCode) } +// TempUnscheduleRetryableError 对 RetryableOnSameAccount 类型的 failover 错误触发临时封禁。 +// 由 handler 层在同账号重试全部用尽、切换账号时调用。 +func (s *GatewayService) TempUnscheduleRetryableError(ctx context.Context, accountID int64, failoverErr *UpstreamFailoverError) { + if failoverErr == nil || !failoverErr.RetryableOnSameAccount { + return + } + // 根据状态码选择封禁策略 + if failoverErr.StatusCode == http.StatusBadRequest { + tempUnscheduleGoogleConfigError(ctx, s.accountRepo, accountID, "[handler]") + } else if failoverErr.StatusCode == http.StatusBadGateway { + tempUnscheduleEmptyResponse(ctx, s.accountRepo, accountID, "[handler]") + } +} + // GatewayService handles API gateway operations type GatewayService struct { accountRepo AccountRepository diff --git a/backend/internal/service/gemini_messages_compat_service.go b/backend/internal/service/gemini_messages_compat_service.go index 1e59c5fd..7fa375ca 100644 --- a/backend/internal/service/gemini_messages_compat_service.go +++ b/backend/internal/service/gemini_messages_compat_service.go @@ -908,8 +908,7 @@ func (s *GeminiMessagesCompatService) Forward(ctx context.Context, c *gin.Contex Message: upstreamMsg, Detail: upstreamDetail, }) - tempUnscheduleGoogleConfigError(ctx, s.accountRepo, account.ID, "[Gemini]") - return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode, ResponseBody: respBody} + return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode, ResponseBody: respBody, RetryableOnSameAccount: true} } } if s.shouldFailoverGeminiUpstreamError(resp.StatusCode) { @@ -1387,8 +1386,7 @@ func (s *GeminiMessagesCompatService) ForwardNative(ctx context.Context, c *gin. Message: upstreamMsg, Detail: upstreamDetail, }) - tempUnscheduleGoogleConfigError(ctx, s.accountRepo, account.ID, "[Gemini]") - return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode, ResponseBody: evBody} + return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode, ResponseBody: evBody, RetryableOnSameAccount: true} } } if s.shouldFailoverGeminiUpstreamError(resp.StatusCode) { From 662625a09114a000cfd0897d322fc894b5a8097c Mon Sep 17 00:00:00 2001 From: erio Date: Tue, 10 Feb 2026 03:47:40 +0800 Subject: [PATCH 016/175] feat: optimize MODEL_CAPACITY_EXHAUSTED retry and remove extra failover retries - MODEL_CAPACITY_EXHAUSTED now uses independent retry strategy: - retryDelay < 20s: wait actual retryDelay then retry once - retryDelay >= 20s or missing: retry up to 5 times at 20s intervals - Still capacity exhausted after retries: switch account (failover) - Different error during retry (e.g. 429): handle by actual error code - No model rate limit set (capacity != rate limit) - Remove Antigravity extra failover retries feature: Same-account retry mechanism (cherry-picked) makes it redundant. Removed: antigravityExtraRetries config, sleepFixedDelay, skip-non-antigravity logic. --- backend/internal/config/config.go | 3 - backend/internal/handler/gateway_handler.go | 65 +-- .../gateway_handler_extra_retry_test.go | 417 ------------------ .../internal/handler/gemini_v1beta_handler.go | 23 +- .../service/antigravity_gateway_service.go | 151 ++++++- .../service/antigravity_rate_limit_test.go | 40 +- .../service/antigravity_smart_retry_test.go | 142 ++++-- 7 files changed, 282 insertions(+), 559 deletions(-) delete mode 100644 backend/internal/handler/gateway_handler_extra_retry_test.go diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index 460bd05d..7b6b4a37 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -279,9 +279,6 @@ type GatewayConfig struct { // Antigravity 429 fallback 限流时间(分钟),解析重置时间失败时使用 AntigravityFallbackCooldownMinutes int `mapstructure:"antigravity_fallback_cooldown_minutes"` - // 默认重试用完后,额外使用 Antigravity 账号重试的最大次数(0 表示禁用) - AntigravityExtraRetries int `mapstructure:"antigravity_extra_retries"` - // Scheduling: 账号调度相关配置 Scheduling GatewaySchedulingConfig `mapstructure:"scheduling"` diff --git a/backend/internal/handler/gateway_handler.go b/backend/internal/handler/gateway_handler.go index 3003b5ae..b5fb379e 100644 --- a/backend/internal/handler/gateway_handler.go +++ b/backend/internal/handler/gateway_handler.go @@ -39,7 +39,6 @@ type GatewayHandler struct { concurrencyHelper *ConcurrencyHelper maxAccountSwitches int maxAccountSwitchesGemini int - antigravityExtraRetries int } // NewGatewayHandler creates a new GatewayHandler @@ -58,7 +57,6 @@ func NewGatewayHandler( pingInterval := time.Duration(0) maxAccountSwitches := 10 maxAccountSwitchesGemini := 3 - antigravityExtraRetries := 10 if cfg != nil { pingInterval = time.Duration(cfg.Concurrency.PingInterval) * time.Second if cfg.Gateway.MaxAccountSwitches > 0 { @@ -67,7 +65,6 @@ func NewGatewayHandler( if cfg.Gateway.MaxAccountSwitchesGemini > 0 { maxAccountSwitchesGemini = cfg.Gateway.MaxAccountSwitchesGemini } - antigravityExtraRetries = cfg.Gateway.AntigravityExtraRetries } return &GatewayHandler{ gatewayService: gatewayService, @@ -81,7 +78,6 @@ func NewGatewayHandler( concurrencyHelper: NewConcurrencyHelper(concurrencyService, SSEPingFormatClaude, pingInterval), maxAccountSwitches: maxAccountSwitches, maxAccountSwitchesGemini: maxAccountSwitchesGemini, - antigravityExtraRetries: antigravityExtraRetries, } } @@ -238,7 +234,6 @@ func (h *GatewayHandler) Messages(c *gin.Context) { if platform == service.PlatformGemini { maxAccountSwitches := h.maxAccountSwitchesGemini switchCount := 0 - antigravityExtraCount := 0 failedAccountIDs := make(map[int64]struct{}) sameAccountRetryCount := make(map[int64]int) // 同账号重试计数 var lastFailoverErr *service.UpstreamFailoverError @@ -261,15 +256,6 @@ func (h *GatewayHandler) Messages(c *gin.Context) { account := selection.Account setOpsSelectedAccount(c, account.ID) - // 额外重试阶段:跳过非 Antigravity 账号 - if switchCount >= maxAccountSwitches && account.Platform != service.PlatformAntigravity { - failedAccountIDs[account.ID] = struct{}{} - if selection.Acquired && selection.ReleaseFunc != nil { - selection.ReleaseFunc() - } - continue - } - // 检查请求拦截(预热请求、SUGGESTION MODE等) if account.IsInterceptWarmupEnabled() { interceptType := detectInterceptType(body, reqModel, parsedReq.MaxTokens, reqStream, isClaudeCodeClient) @@ -377,17 +363,8 @@ func (h *GatewayHandler) Messages(c *gin.Context) { failedAccountIDs[account.ID] = struct{}{} if switchCount >= maxAccountSwitches { - // 默认重试用完,进入 Antigravity 额外重试 - antigravityExtraCount++ - if antigravityExtraCount > h.antigravityExtraRetries { - h.handleFailoverExhausted(c, failoverErr, service.PlatformGemini, streamStarted) - return - } - log.Printf("Account %d: antigravity extra retry %d/%d", account.ID, antigravityExtraCount, h.antigravityExtraRetries) - if !sleepFixedDelay(c.Request.Context(), antigravityExtraRetryDelay) { - return - } - continue + h.handleFailoverExhausted(c, failoverErr, service.PlatformGemini, streamStarted) + return } switchCount++ log.Printf("Account %d: upstream error %d, switching account %d/%d", account.ID, failoverErr.StatusCode, switchCount, maxAccountSwitches) @@ -440,7 +417,6 @@ func (h *GatewayHandler) Messages(c *gin.Context) { for { maxAccountSwitches := h.maxAccountSwitches switchCount := 0 - antigravityExtraCount := 0 failedAccountIDs := make(map[int64]struct{}) sameAccountRetryCount := make(map[int64]int) // 同账号重试计数 var lastFailoverErr *service.UpstreamFailoverError @@ -465,15 +441,6 @@ func (h *GatewayHandler) Messages(c *gin.Context) { account := selection.Account setOpsSelectedAccount(c, account.ID) - // 额外重试阶段:跳过非 Antigravity 账号 - if switchCount >= maxAccountSwitches && account.Platform != service.PlatformAntigravity { - failedAccountIDs[account.ID] = struct{}{} - if selection.Acquired && selection.ReleaseFunc != nil { - selection.ReleaseFunc() - } - continue - } - // 检查请求拦截(预热请求、SUGGESTION MODE等) if account.IsInterceptWarmupEnabled() { interceptType := detectInterceptType(body, reqModel, parsedReq.MaxTokens, reqStream, isClaudeCodeClient) @@ -614,17 +581,8 @@ func (h *GatewayHandler) Messages(c *gin.Context) { failedAccountIDs[account.ID] = struct{}{} if switchCount >= maxAccountSwitches { - // 默认重试用完,进入 Antigravity 额外重试 - antigravityExtraCount++ - if antigravityExtraCount > h.antigravityExtraRetries { - h.handleFailoverExhausted(c, failoverErr, account.Platform, streamStarted) - return - } - log.Printf("Account %d: antigravity extra retry %d/%d", account.ID, antigravityExtraCount, h.antigravityExtraRetries) - if !sleepFixedDelay(c.Request.Context(), antigravityExtraRetryDelay) { - return - } - continue + h.handleFailoverExhausted(c, failoverErr, account.Platform, streamStarted) + return } switchCount++ log.Printf("Account %d: upstream error %d, switching account %d/%d", account.ID, failoverErr.StatusCode, switchCount, maxAccountSwitches) @@ -933,21 +891,6 @@ func sleepFailoverDelay(ctx context.Context, switchCount int) bool { } } -const antigravityExtraRetryDelay = 500 * time.Millisecond - -// sleepFixedDelay 固定延时等待,返回 false 表示 context 已取消。 -func sleepFixedDelay(ctx context.Context, delay time.Duration) bool { - if delay <= 0 { - return true - } - select { - case <-ctx.Done(): - return false - case <-time.After(delay): - return true - } -} - func (h *GatewayHandler) handleFailoverExhausted(c *gin.Context, failoverErr *service.UpstreamFailoverError, platform string, streamStarted bool) { statusCode := failoverErr.StatusCode responseBody := failoverErr.ResponseBody diff --git a/backend/internal/handler/gateway_handler_extra_retry_test.go b/backend/internal/handler/gateway_handler_extra_retry_test.go deleted file mode 100644 index a0777941..00000000 --- a/backend/internal/handler/gateway_handler_extra_retry_test.go +++ /dev/null @@ -1,417 +0,0 @@ -//go:build unit - -package handler - -import ( - "context" - "encoding/json" - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/Wei-Shaw/sub2api/internal/config" - "github.com/Wei-Shaw/sub2api/internal/service" - "github.com/gin-gonic/gin" - "github.com/stretchr/testify/require" -) - -// --- sleepFixedDelay --- - -func TestSleepFixedDelay_ZeroDelay(t *testing.T) { - got := sleepFixedDelay(context.Background(), 0) - require.True(t, got, "zero delay should return true immediately") -} - -func TestSleepFixedDelay_NegativeDelay(t *testing.T) { - got := sleepFixedDelay(context.Background(), -1*time.Second) - require.True(t, got, "negative delay should return true immediately") -} - -func TestSleepFixedDelay_NormalDelay(t *testing.T) { - start := time.Now() - got := sleepFixedDelay(context.Background(), 50*time.Millisecond) - elapsed := time.Since(start) - require.True(t, got, "normal delay should return true") - require.GreaterOrEqual(t, elapsed, 40*time.Millisecond, "should sleep at least ~50ms") -} - -func TestSleepFixedDelay_ContextCancelled(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - cancel() // cancel immediately - got := sleepFixedDelay(ctx, 10*time.Second) - require.False(t, got, "cancelled context should return false") -} - -func TestSleepFixedDelay_ContextTimeout(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) - defer cancel() - got := sleepFixedDelay(ctx, 5*time.Second) - require.False(t, got, "context timeout should return false before delay completes") -} - -// --- antigravityExtraRetryDelay constant --- - -func TestAntigravityExtraRetryDelayValue(t *testing.T) { - require.Equal(t, 500*time.Millisecond, antigravityExtraRetryDelay) -} - -// --- NewGatewayHandler antigravityExtraRetries field --- - -func TestNewGatewayHandler_AntigravityExtraRetries_Default(t *testing.T) { - h := NewGatewayHandler(nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) - require.Equal(t, 10, h.antigravityExtraRetries, "default should be 10 when cfg is nil") -} - -func TestNewGatewayHandler_AntigravityExtraRetries_FromConfig(t *testing.T) { - cfg := &config.Config{ - Gateway: config.GatewayConfig{ - AntigravityExtraRetries: 5, - }, - } - h := NewGatewayHandler(nil, nil, nil, nil, nil, nil, nil, nil, nil, cfg) - require.Equal(t, 5, h.antigravityExtraRetries, "should use config value") -} - -func TestNewGatewayHandler_AntigravityExtraRetries_ZeroDisables(t *testing.T) { - cfg := &config.Config{ - Gateway: config.GatewayConfig{ - AntigravityExtraRetries: 0, - }, - } - h := NewGatewayHandler(nil, nil, nil, nil, nil, nil, nil, nil, nil, cfg) - require.Equal(t, 0, h.antigravityExtraRetries, "zero should disable extra retries") -} - -// --- handleFailoverAllAccountsExhausted (renamed: using handleFailoverExhausted) --- -// We test the error response format helpers that the extra retry path uses. - -func TestHandleFailoverExhausted_JSON(t *testing.T) { - gin.SetMode(gin.TestMode) - rec := httptest.NewRecorder() - c, _ := gin.CreateTestContext(rec) - - h := &GatewayHandler{} - failoverErr := &service.UpstreamFailoverError{StatusCode: 429} - h.handleFailoverExhausted(c, failoverErr, service.PlatformAntigravity, false) - - require.Equal(t, http.StatusTooManyRequests, rec.Code) - - var body map[string]any - err := json.Unmarshal(rec.Body.Bytes(), &body) - require.NoError(t, err) - errObj, ok := body["error"].(map[string]any) - require.True(t, ok) - require.Equal(t, "rate_limit_error", errObj["type"]) -} - -func TestHandleFailoverExhaustedSimple_JSON(t *testing.T) { - gin.SetMode(gin.TestMode) - rec := httptest.NewRecorder() - c, _ := gin.CreateTestContext(rec) - - h := &GatewayHandler{} - h.handleFailoverExhaustedSimple(c, 502, false) - - require.Equal(t, http.StatusBadGateway, rec.Code) - - var body map[string]any - err := json.Unmarshal(rec.Body.Bytes(), &body) - require.NoError(t, err) - errObj, ok := body["error"].(map[string]any) - require.True(t, ok) - require.Equal(t, "upstream_error", errObj["type"]) -} - -// --- Extra retry platform filter logic --- - -func TestExtraRetryPlatformFilter(t *testing.T) { - tests := []struct { - name string - switchCount int - maxAccountSwitch int - platform string - expectSkip bool - }{ - { - name: "default_retry_phase_antigravity_not_skipped", - switchCount: 1, - maxAccountSwitch: 3, - platform: service.PlatformAntigravity, - expectSkip: false, - }, - { - name: "default_retry_phase_gemini_not_skipped", - switchCount: 1, - maxAccountSwitch: 3, - platform: service.PlatformGemini, - expectSkip: false, - }, - { - name: "extra_retry_phase_antigravity_not_skipped", - switchCount: 3, - maxAccountSwitch: 3, - platform: service.PlatformAntigravity, - expectSkip: false, - }, - { - name: "extra_retry_phase_gemini_skipped", - switchCount: 3, - maxAccountSwitch: 3, - platform: service.PlatformGemini, - expectSkip: true, - }, - { - name: "extra_retry_phase_anthropic_skipped", - switchCount: 3, - maxAccountSwitch: 3, - platform: service.PlatformAnthropic, - expectSkip: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Replicate the filter condition from the handler - shouldSkip := tt.switchCount >= tt.maxAccountSwitch && tt.platform != service.PlatformAntigravity - require.Equal(t, tt.expectSkip, shouldSkip) - }) - } -} - -// --- Extra retry counter logic --- - -func TestExtraRetryCounterExhaustion(t *testing.T) { - tests := []struct { - name string - maxExtraRetries int - currentExtraCount int - expectExhausted bool - }{ - { - name: "first_extra_retry", - maxExtraRetries: 10, - currentExtraCount: 1, - expectExhausted: false, - }, - { - name: "at_limit", - maxExtraRetries: 10, - currentExtraCount: 10, - expectExhausted: false, - }, - { - name: "exceeds_limit", - maxExtraRetries: 10, - currentExtraCount: 11, - expectExhausted: true, - }, - { - name: "zero_disables_extra_retry", - maxExtraRetries: 0, - currentExtraCount: 1, - expectExhausted: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Replicate the exhaustion condition: antigravityExtraCount > h.antigravityExtraRetries - exhausted := tt.currentExtraCount > tt.maxExtraRetries - require.Equal(t, tt.expectExhausted, exhausted) - }) - } -} - -// --- mapUpstreamError (used by handleFailoverExhausted) --- - -func TestMapUpstreamError(t *testing.T) { - h := &GatewayHandler{} - tests := []struct { - name string - statusCode int - expectedStatus int - expectedType string - }{ - {"429", 429, http.StatusTooManyRequests, "rate_limit_error"}, - {"529", 529, http.StatusServiceUnavailable, "overloaded_error"}, - {"500", 500, http.StatusBadGateway, "upstream_error"}, - {"502", 502, http.StatusBadGateway, "upstream_error"}, - {"503", 503, http.StatusBadGateway, "upstream_error"}, - {"504", 504, http.StatusBadGateway, "upstream_error"}, - {"401", 401, http.StatusBadGateway, "upstream_error"}, - {"403", 403, http.StatusBadGateway, "upstream_error"}, - {"unknown", 418, http.StatusBadGateway, "upstream_error"}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - status, errType, _ := h.mapUpstreamError(tt.statusCode) - require.Equal(t, tt.expectedStatus, status) - require.Equal(t, tt.expectedType, errType) - }) - } -} - -// --- Gemini native path: handleGeminiFailoverExhausted --- - -func TestHandleGeminiFailoverExhausted_NilError(t *testing.T) { - gin.SetMode(gin.TestMode) - rec := httptest.NewRecorder() - c, _ := gin.CreateTestContext(rec) - - h := &GatewayHandler{} - h.handleGeminiFailoverExhausted(c, nil) - - require.Equal(t, http.StatusBadGateway, rec.Code) - var body map[string]any - err := json.Unmarshal(rec.Body.Bytes(), &body) - require.NoError(t, err) - errObj, ok := body["error"].(map[string]any) - require.True(t, ok) - require.Equal(t, "Upstream request failed", errObj["message"]) -} - -func TestHandleGeminiFailoverExhausted_429(t *testing.T) { - gin.SetMode(gin.TestMode) - rec := httptest.NewRecorder() - c, _ := gin.CreateTestContext(rec) - - h := &GatewayHandler{} - failoverErr := &service.UpstreamFailoverError{StatusCode: 429} - h.handleGeminiFailoverExhausted(c, failoverErr) - - require.Equal(t, http.StatusTooManyRequests, rec.Code) -} - -// --- handleStreamingAwareError streaming mode --- - -func TestHandleStreamingAwareError_StreamStarted(t *testing.T) { - gin.SetMode(gin.TestMode) - rec := httptest.NewRecorder() - c, _ := gin.CreateTestContext(rec) - - // Simulate stream already started: set content type and write initial data - c.Writer.Header().Set("Content-Type", "text/event-stream") - c.Writer.WriteHeaderNow() - - h := &GatewayHandler{} - h.handleStreamingAwareError(c, http.StatusTooManyRequests, "rate_limit_error", "test error", true) - - body := rec.Body.String() - require.Contains(t, body, "rate_limit_error") - require.Contains(t, body, "test error") - require.Contains(t, body, "data: ") -} - -func TestHandleStreamingAwareError_NotStreaming(t *testing.T) { - gin.SetMode(gin.TestMode) - rec := httptest.NewRecorder() - c, _ := gin.CreateTestContext(rec) - - h := &GatewayHandler{} - h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "no model", false) - - require.Equal(t, http.StatusServiceUnavailable, rec.Code) - var body map[string]any - err := json.Unmarshal(rec.Body.Bytes(), &body) - require.NoError(t, err) - errObj, ok := body["error"].(map[string]any) - require.True(t, ok) - require.Equal(t, "api_error", errObj["type"]) - require.Equal(t, "no model", errObj["message"]) -} - -// --- Integration: extra retry flow simulation --- - -func TestExtraRetryFlowSimulation(t *testing.T) { - // Simulate the full extra retry flow logic - maxAccountSwitches := 3 - maxExtraRetries := 2 - switchCount := 0 - antigravityExtraCount := 0 - - type attempt struct { - platform string - isFailover bool - } - - // Simulate: 3 default retries (all fail), then 2 extra retries (all fail), then exhausted - attempts := []attempt{ - {service.PlatformAntigravity, true}, // switchCount 0 -> 1 - {service.PlatformGemini, true}, // switchCount 1 -> 2 - {service.PlatformAntigravity, true}, // switchCount 2 -> 3 (reaches max) - {service.PlatformAntigravity, true}, // extra retry 1 - {service.PlatformAntigravity, true}, // extra retry 2 - {service.PlatformAntigravity, true}, // extra retry 3 -> exhausted - } - - var exhausted bool - var skipped int - - for _, a := range attempts { - if exhausted { - break - } - - // Extra retry phase: skip non-Antigravity - if switchCount >= maxAccountSwitches && a.platform != service.PlatformAntigravity { - skipped++ - continue - } - - if a.isFailover { - if switchCount >= maxAccountSwitches { - antigravityExtraCount++ - if antigravityExtraCount > maxExtraRetries { - exhausted = true - continue - } - // extra retry delay + continue - continue - } - switchCount++ - } - } - - require.Equal(t, 3, switchCount, "should have 3 default retries") - require.Equal(t, 3, antigravityExtraCount, "counter incremented 3 times") - require.True(t, exhausted, "should be exhausted after exceeding max extra retries") - require.Equal(t, 0, skipped, "no non-antigravity accounts in this simulation") -} - -func TestExtraRetryFlowSimulation_SkipsNonAntigravity(t *testing.T) { - maxAccountSwitches := 2 - switchCount := 2 // already past default retries - antigravityExtraCount := 0 - maxExtraRetries := 5 - - type accountSelection struct { - platform string - } - - selections := []accountSelection{ - {service.PlatformGemini}, // should be skipped - {service.PlatformAnthropic}, // should be skipped - {service.PlatformAntigravity}, // should be attempted - } - - var skippedCount int - var attemptedCount int - - for _, sel := range selections { - if switchCount >= maxAccountSwitches && sel.platform != service.PlatformAntigravity { - skippedCount++ - continue - } - // Simulate failover - antigravityExtraCount++ - if antigravityExtraCount > maxExtraRetries { - break - } - attemptedCount++ - } - - require.Equal(t, 2, skippedCount, "gemini and anthropic accounts should be skipped") - require.Equal(t, 1, attemptedCount, "only antigravity account should be attempted") - require.Equal(t, 1, antigravityExtraCount) -} diff --git a/backend/internal/handler/gemini_v1beta_handler.go b/backend/internal/handler/gemini_v1beta_handler.go index 5a576ab0..0475c332 100644 --- a/backend/internal/handler/gemini_v1beta_handler.go +++ b/backend/internal/handler/gemini_v1beta_handler.go @@ -323,7 +323,6 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { maxAccountSwitches := h.maxAccountSwitchesGemini switchCount := 0 - antigravityExtraCount := 0 failedAccountIDs := make(map[int64]struct{}) var lastFailoverErr *service.UpstreamFailoverError var forceCacheBilling bool // 粘性会话切换时的缓存计费标记 @@ -341,15 +340,6 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { account := selection.Account setOpsSelectedAccount(c, account.ID) - // 额外重试阶段:跳过非 Antigravity 账号 - if switchCount >= maxAccountSwitches && account.Platform != service.PlatformAntigravity { - failedAccountIDs[account.ID] = struct{}{} - if selection.Acquired && selection.ReleaseFunc != nil { - selection.ReleaseFunc() - } - continue - } - // 检测账号切换:如果粘性会话绑定的账号与当前选择的账号不同,清除 thoughtSignature // 注意:Gemini 原生 API 的 thoughtSignature 与具体上游账号强相关;跨账号透传会导致 400。 if sessionBoundAccountID > 0 && sessionBoundAccountID != account.ID { @@ -439,17 +429,8 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { forceCacheBilling = true } if switchCount >= maxAccountSwitches { - // 默认重试用完,进入 Antigravity 额外重试 - antigravityExtraCount++ - if antigravityExtraCount > h.antigravityExtraRetries { - h.handleGeminiFailoverExhausted(c, failoverErr) - return - } - log.Printf("Gemini account %d: antigravity extra retry %d/%d", account.ID, antigravityExtraCount, h.antigravityExtraRetries) - if !sleepFixedDelay(c.Request.Context(), antigravityExtraRetryDelay) { - return - } - continue + h.handleGeminiFailoverExhausted(c, failoverErr) + return } switchCount++ log.Printf("Gemini account %d: upstream error %d, switching account %d/%d", account.ID, failoverErr.StatusCode, switchCount, maxAccountSwitches) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 9c2b9027..84e78eaa 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -39,6 +39,15 @@ const ( antigravitySmartRetryMaxAttempts = 1 // 智能重试最大次数(仅重试 1 次,防止重复限流/长期等待) antigravityDefaultRateLimitDuration = 30 * time.Second // 默认限流时间(无 retryDelay 时使用) + // MODEL_CAPACITY_EXHAUSTED 专用常量 + // 容量不足是临时状态,所有账号共享容量池,与限流不同 + // - retryDelay < antigravityModelCapacityWaitThreshold: 按实际 retryDelay 等待后重试 1 次 + // - retryDelay >= antigravityModelCapacityWaitThreshold 或无 retryDelay: 每 20s 重试最多 5 次 + // - 重试仍为容量不足: 切换账号 + // - 重试遇到其他错误: 按实际错误码处理 + antigravityModelCapacityWaitThreshold = 20 * time.Second // 容量不足等待阈值 + antigravityModelCapacityMaxAttempts = 5 // 容量不足长等待重试次数 + // Google RPC 状态和类型常量 googleRPCStatusResourceExhausted = "RESOURCE_EXHAUSTED" googleRPCStatusUnavailable = "UNAVAILABLE" @@ -144,7 +153,12 @@ func (s *AntigravityGatewayService) handleSmartRetry(p antigravityRetryLoopParam } // 判断是否触发智能重试 - shouldSmartRetry, shouldRateLimitModel, waitDuration, modelName := shouldTriggerAntigravitySmartRetry(p.account, respBody) + shouldSmartRetry, shouldRateLimitModel, waitDuration, modelName, isModelCapacityExhausted := shouldTriggerAntigravitySmartRetry(p.account, respBody) + + // MODEL_CAPACITY_EXHAUSTED: 独立处理 + if isModelCapacityExhausted { + return s.handleModelCapacityExhaustedRetry(p, resp, respBody, baseURL, waitDuration, modelName) + } // 情况1: retryDelay >= 阈值,限流模型并切换账号 if shouldRateLimitModel { @@ -229,7 +243,7 @@ func (s *AntigravityGatewayService) handleSmartRetry(p antigravityRetryLoopParam // 解析新的重试信息,用于下次重试的等待时间 if attempt < antigravitySmartRetryMaxAttempts && lastRetryBody != nil { - newShouldRetry, _, newWaitDuration, _ := shouldTriggerAntigravitySmartRetry(p.account, lastRetryBody) + newShouldRetry, _, newWaitDuration, _, _ := shouldTriggerAntigravitySmartRetry(p.account, lastRetryBody) if newShouldRetry && newWaitDuration > 0 { waitDuration = newWaitDuration } @@ -279,6 +293,100 @@ func (s *AntigravityGatewayService) handleSmartRetry(p antigravityRetryLoopParam return &smartRetryResult{action: smartRetryActionContinue} } +// handleModelCapacityExhaustedRetry 处理 MODEL_CAPACITY_EXHAUSTED 的重试逻辑 +// 策略: +// - retryDelay < antigravityModelCapacityWaitThreshold: 按实际 retryDelay 等待后重试 1 次 +// - retryDelay >= antigravityModelCapacityWaitThreshold 或无 retryDelay: 每 20s 重试最多 5 次 +// - 重试成功: 直接返回 +// - 重试仍为 MODEL_CAPACITY_EXHAUSTED: 继续重试直到次数用完,然后切换账号 +// - 重试遇到其他错误 (429 限流等): 返回该响应,让上层按实际错误码处理 +func (s *AntigravityGatewayService) handleModelCapacityExhaustedRetry( + p antigravityRetryLoopParams, resp *http.Response, respBody []byte, + baseURL string, retryDelay time.Duration, modelName string, +) *smartRetryResult { + // 确定重试参数 + maxAttempts := 1 + waitDuration := retryDelay + if retryDelay <= 0 || retryDelay >= antigravityModelCapacityWaitThreshold { + // 无 retryDelay 或 >= 20s: 固定 20s 间隔,最多 5 次 + maxAttempts = antigravityModelCapacityMaxAttempts + waitDuration = antigravityModelCapacityWaitThreshold + } + + for attempt := 1; attempt <= maxAttempts; attempt++ { + log.Printf("%s status=%d model_capacity_exhausted_retry attempt=%d/%d delay=%v model=%s account=%d", + p.prefix, resp.StatusCode, attempt, maxAttempts, waitDuration, modelName, p.account.ID) + + select { + case <-p.ctx.Done(): + log.Printf("%s status=context_canceled_during_capacity_retry", p.prefix) + return &smartRetryResult{action: smartRetryActionBreakWithResp, err: p.ctx.Err()} + case <-time.After(waitDuration): + } + + retryReq, err := antigravity.NewAPIRequestWithURL(p.ctx, baseURL, p.action, p.accessToken, p.body) + if err != nil { + log.Printf("%s status=capacity_retry_request_build_failed error=%v", p.prefix, err) + return &smartRetryResult{ + action: smartRetryActionBreakWithResp, + resp: &http.Response{ + StatusCode: resp.StatusCode, + Header: resp.Header.Clone(), + Body: io.NopCloser(bytes.NewReader(respBody)), + }, + } + } + + retryResp, retryErr := p.httpUpstream.Do(retryReq, p.proxyURL, p.account.ID, p.account.Concurrency) + + // 网络错误: 继续重试 + if retryErr != nil || retryResp == nil { + log.Printf("%s status=capacity_retry_network_error attempt=%d/%d error=%v", + p.prefix, attempt, maxAttempts, retryErr) + continue + } + + // 成功 (非 429/503): 直接返回 + if retryResp.StatusCode != http.StatusTooManyRequests && retryResp.StatusCode != http.StatusServiceUnavailable { + log.Printf("%s status=%d model_capacity_retry_success attempt=%d/%d", + p.prefix, retryResp.StatusCode, attempt, maxAttempts) + return &smartRetryResult{action: smartRetryActionBreakWithResp, resp: retryResp} + } + + // 读取重试响应体,判断是否仍为容量不足 + retryBody, _ := io.ReadAll(io.LimitReader(retryResp.Body, 2<<20)) + _ = retryResp.Body.Close() + + retryInfo := parseAntigravitySmartRetryInfo(retryBody) + + // 不再是 MODEL_CAPACITY_EXHAUSTED(例如变成了 429 限流): 返回该响应让上层处理 + if retryInfo == nil || !retryInfo.IsModelCapacityExhausted { + log.Printf("%s status=%d capacity_retry_got_different_error attempt=%d/%d body=%s", + p.prefix, retryResp.StatusCode, attempt, maxAttempts, truncateForLog(retryBody, 200)) + retryResp.Body = io.NopCloser(bytes.NewReader(retryBody)) + return &smartRetryResult{action: smartRetryActionBreakWithResp, resp: retryResp} + } + + // 仍然是 MODEL_CAPACITY_EXHAUSTED: 更新等待时间,继续重试 + if retryInfo.RetryDelay > 0 && retryInfo.RetryDelay < antigravityModelCapacityWaitThreshold { + waitDuration = retryInfo.RetryDelay + } + } + + // 所有重试都失败且仍为容量不足: 切换账号 + log.Printf("%s status=%d model_capacity_exhausted_retry_exhausted attempts=%d model=%s account=%d (switch account)", + p.prefix, resp.StatusCode, maxAttempts, modelName, p.account.ID) + + return &smartRetryResult{ + action: smartRetryActionBreakWithResp, + switchError: &AntigravityAccountSwitchError{ + OriginalAccountID: p.account.ID, + RateLimitedModel: modelName, + IsStickySession: p.isStickySession, + }, + } +} + // antigravityRetryLoop 执行带 URL fallback 的重试循环 func (s *AntigravityGatewayService) antigravityRetryLoop(p antigravityRetryLoopParams) (*antigravityRetryLoopResult, error) { // 预检查:如果账号已限流,直接返回切换信号 @@ -2053,8 +2161,9 @@ func antigravityFallbackCooldownSeconds() (time.Duration, bool) { // antigravitySmartRetryInfo 智能重试所需的信息 type antigravitySmartRetryInfo struct { - RetryDelay time.Duration // 重试延迟时间 - ModelName string // 限流的模型名称(如 "claude-sonnet-4-5") + RetryDelay time.Duration // 重试延迟时间 + ModelName string // 限流的模型名称(如 "claude-sonnet-4-5") + IsModelCapacityExhausted bool // 是否为 MODEL_CAPACITY_EXHAUSTED(503 容量不足,与 429 限流处理策略不同) } // parseAntigravitySmartRetryInfo 解析 Google RPC RetryInfo 和 ErrorInfo 信息 @@ -2163,14 +2272,16 @@ func parseAntigravitySmartRetryInfo(body []byte) *antigravitySmartRetryInfo { return nil } - // 如果上游未提供 retryDelay,使用默认限流时间 - if retryDelay <= 0 { + // MODEL_CAPACITY_EXHAUSTED: retryDelay 可以为 0(由调用方决定默认等待策略) + // RATE_LIMIT_EXCEEDED: 无 retryDelay 时使用默认限流时间 + if retryDelay <= 0 && !hasModelCapacityExhausted { retryDelay = antigravityDefaultRateLimitDuration } return &antigravitySmartRetryInfo{ - RetryDelay: retryDelay, - ModelName: modelName, + RetryDelay: retryDelay, + ModelName: modelName, + IsModelCapacityExhausted: hasModelCapacityExhausted, } } @@ -2178,22 +2289,28 @@ func parseAntigravitySmartRetryInfo(body []byte) *antigravitySmartRetryInfo { // 返回: // - shouldRetry: 是否应该智能重试(retryDelay < antigravityRateLimitThreshold) // - shouldRateLimitModel: 是否应该限流模型(retryDelay >= antigravityRateLimitThreshold) -// - waitDuration: 等待时间(智能重试时使用,shouldRateLimitModel=true 时为 0) +// - waitDuration: 等待时间(智能重试时使用,shouldRateLimitModel=true 时为限流时长) // - modelName: 限流的模型名称 -func shouldTriggerAntigravitySmartRetry(account *Account, respBody []byte) (shouldRetry bool, shouldRateLimitModel bool, waitDuration time.Duration, modelName string) { +// - isModelCapacityExhausted: 是否为 MODEL_CAPACITY_EXHAUSTED(需要独立处理) +func shouldTriggerAntigravitySmartRetry(account *Account, respBody []byte) (shouldRetry bool, shouldRateLimitModel bool, waitDuration time.Duration, modelName string, isModelCapacityExhausted bool) { if account.Platform != PlatformAntigravity { - return false, false, 0, "" + return false, false, 0, "", false } info := parseAntigravitySmartRetryInfo(respBody) if info == nil { - return false, false, 0, "" + return false, false, 0, "", false + } + + // MODEL_CAPACITY_EXHAUSTED: 独立处理,不走 7s 阈值判断 + if info.IsModelCapacityExhausted { + return true, false, info.RetryDelay, info.ModelName, true } // retryDelay >= 阈值:直接限流模型,不重试 // 注意:如果上游未提供 retryDelay,parseAntigravitySmartRetryInfo 已设置为默认 30s if info.RetryDelay >= antigravityRateLimitThreshold { - return false, true, info.RetryDelay, info.ModelName + return false, true, info.RetryDelay, info.ModelName, false } // retryDelay < 阈值:智能重试 @@ -2202,7 +2319,7 @@ func shouldTriggerAntigravitySmartRetry(account *Account, respBody []byte) (shou waitDuration = antigravitySmartRetryMinWait } - return true, false, waitDuration, info.ModelName + return true, false, waitDuration, info.ModelName, false } // handleModelRateLimitParams 模型级限流处理参数 @@ -2240,6 +2357,12 @@ func (s *AntigravityGatewayService) handleModelRateLimit(p *handleModelRateLimit return &handleModelRateLimitResult{Handled: false} } + // MODEL_CAPACITY_EXHAUSTED: 容量不足由 handleSmartRetry 独立处理,此处仅标记已处理 + // 不设置模型限流(容量不足是临时的,不等同于限流) + if info.IsModelCapacityExhausted { + return &handleModelRateLimitResult{Handled: true} + } + // < antigravityRateLimitThreshold: 等待后重试 if info.RetryDelay < antigravityRateLimitThreshold { log.Printf("%s status=%d model_rate_limit_wait model=%s wait=%v", diff --git a/backend/internal/service/antigravity_rate_limit_test.go b/backend/internal/service/antigravity_rate_limit_test.go index 59cc9331..c8b0d779 100644 --- a/backend/internal/service/antigravity_rate_limit_test.go +++ b/backend/internal/service/antigravity_rate_limit_test.go @@ -188,13 +188,14 @@ func TestHandleUpstreamError_429_NonModelRateLimit(t *testing.T) { require.Equal(t, "claude-sonnet-4-5", repo.modelRateLimitCalls[0].modelKey) } -// TestHandleUpstreamError_503_ModelRateLimit 测试 503 模型限流场景 -func TestHandleUpstreamError_503_ModelRateLimit(t *testing.T) { +// TestHandleUpstreamError_503_ModelCapacityExhausted 测试 503 模型容量不足场景 +// MODEL_CAPACITY_EXHAUSTED 标记 Handled 但不设模型限流(由 handleSmartRetry 独立处理) +func TestHandleUpstreamError_503_ModelCapacityExhausted(t *testing.T) { repo := &stubAntigravityAccountRepo{} svc := &AntigravityGatewayService{accountRepo: repo} account := &Account{ID: 3, Name: "acc-3", Platform: PlatformAntigravity} - // 503 + MODEL_CAPACITY_EXHAUSTED → 模型限流 + // 503 + MODEL_CAPACITY_EXHAUSTED → 标记已处理,不设模型限流 body := []byte(`{ "error": { "status": "UNAVAILABLE", @@ -207,13 +208,11 @@ func TestHandleUpstreamError_503_ModelRateLimit(t *testing.T) { result := svc.handleUpstreamError(context.Background(), "[test]", account, http.StatusServiceUnavailable, http.Header{}, body, "gemini-3-pro-high", 0, "", false) - // 应该触发模型限流 + // 应该标记已处理,但不设模型限流 require.NotNil(t, result) require.True(t, result.Handled) - require.NotNil(t, result.SwitchError) - require.Equal(t, "gemini-3-pro-high", result.SwitchError.RateLimitedModel) - require.Len(t, repo.modelRateLimitCalls, 1) - require.Equal(t, "gemini-3-pro-high", repo.modelRateLimitCalls[0].modelKey) + require.Nil(t, result.SwitchError, "MODEL_CAPACITY_EXHAUSTED should not trigger switch error in handleModelRateLimit") + require.Empty(t, repo.modelRateLimitCalls, "MODEL_CAPACITY_EXHAUSTED should not set model rate limit") } // TestHandleUpstreamError_503_NonModelRateLimit 测试 503 非模型限流场景(不处理) @@ -496,6 +495,7 @@ func TestShouldTriggerAntigravitySmartRetry(t *testing.T) { body string expectedShouldRetry bool expectedShouldRateLimit bool + expectedCapacityExhaust bool minWait time.Duration modelName string }{ @@ -611,8 +611,9 @@ func TestShouldTriggerAntigravitySmartRetry(t *testing.T) { ] } }`, - expectedShouldRetry: false, - expectedShouldRateLimit: true, + expectedShouldRetry: true, + expectedShouldRateLimit: false, + expectedCapacityExhaust: true, minWait: 39 * time.Second, modelName: "gemini-3-pro-high", }, @@ -629,9 +630,10 @@ func TestShouldTriggerAntigravitySmartRetry(t *testing.T) { "message": "No capacity available for model gemini-2.5-flash on the server" } }`, - expectedShouldRetry: false, - expectedShouldRateLimit: true, - minWait: 30 * time.Second, + expectedShouldRetry: true, + expectedShouldRateLimit: false, + expectedCapacityExhaust: true, + minWait: 0, // 无 retryDelay,由 handleModelCapacityExhaustedRetry 决定默认 20s modelName: "gemini-2.5-flash", }, { @@ -656,18 +658,26 @@ func TestShouldTriggerAntigravitySmartRetry(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - shouldRetry, shouldRateLimit, wait, model := shouldTriggerAntigravitySmartRetry(tt.account, []byte(tt.body)) + shouldRetry, shouldRateLimit, wait, model, isCapacityExhausted := shouldTriggerAntigravitySmartRetry(tt.account, []byte(tt.body)) if shouldRetry != tt.expectedShouldRetry { t.Errorf("shouldRetry = %v, want %v", shouldRetry, tt.expectedShouldRetry) } if shouldRateLimit != tt.expectedShouldRateLimit { t.Errorf("shouldRateLimit = %v, want %v", shouldRateLimit, tt.expectedShouldRateLimit) } - if shouldRetry { + if isCapacityExhausted != tt.expectedCapacityExhaust { + t.Errorf("isCapacityExhausted = %v, want %v", isCapacityExhausted, tt.expectedCapacityExhaust) + } + if shouldRetry && !isCapacityExhausted { if wait < tt.minWait { t.Errorf("wait = %v, want >= %v", wait, tt.minWait) } } + if isCapacityExhausted && tt.minWait > 0 { + if wait < tt.minWait { + t.Errorf("capacity exhausted wait = %v, want >= %v", wait, tt.minWait) + } + } if shouldRateLimit && tt.minWait > 0 { if wait < tt.minWait { t.Errorf("rate limit wait = %v, want >= %v", wait, tt.minWait) diff --git a/backend/internal/service/antigravity_smart_retry_test.go b/backend/internal/service/antigravity_smart_retry_test.go index a7e0d296..7a6050a7 100644 --- a/backend/internal/service/antigravity_smart_retry_test.go +++ b/backend/internal/service/antigravity_smart_retry_test.go @@ -9,6 +9,7 @@ import ( "net/http" "strings" "testing" + "time" "github.com/stretchr/testify/require" ) @@ -294,8 +295,20 @@ func TestHandleSmartRetry_ShortDelay_SmartRetryFailed_ReturnsSwitchError(t *test require.Len(t, upstream.calls, 1, "should have made one retry call (max attempts)") } -// TestHandleSmartRetry_503_ModelCapacityExhausted_ReturnsSwitchError 测试 503 MODEL_CAPACITY_EXHAUSTED 返回 switchError -func TestHandleSmartRetry_503_ModelCapacityExhausted_ReturnsSwitchError(t *testing.T) { +// TestHandleSmartRetry_503_ModelCapacityExhausted_ShortDelay_RetrySuccess +// 503 MODEL_CAPACITY_EXHAUSTED + retryDelay < 20s → 按实际 retryDelay 等待后重试 1 次,成功返回 +func TestHandleSmartRetry_503_ModelCapacityExhausted_ShortDelay_RetrySuccess(t *testing.T) { + // 重试成功的响应 + successResp := &http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{}, + Body: io.NopCloser(strings.NewReader(`{"ok":true}`)), + } + upstream := &mockSmartRetryUpstream{ + responses: []*http.Response{successResp}, + errors: []error{nil}, + } + repo := &stubAntigravityAccountRepo{} account := &Account{ ID: 3, @@ -304,7 +317,89 @@ func TestHandleSmartRetry_503_ModelCapacityExhausted_ReturnsSwitchError(t *testi Platform: PlatformAntigravity, } - // 503 + MODEL_CAPACITY_EXHAUSTED + 39s >= 7s 阈值 + // 503 + MODEL_CAPACITY_EXHAUSTED + 0.5s < 20s 阈值 → 按实际 retryDelay 重试 1 次 + respBody := []byte(`{ + "error": { + "code": 503, + "status": "UNAVAILABLE", + "details": [ + {"@type": "type.googleapis.com/google.rpc.ErrorInfo", "metadata": {"model": "gemini-3-pro-high"}, "reason": "MODEL_CAPACITY_EXHAUSTED"}, + {"@type": "type.googleapis.com/google.rpc.RetryInfo", "retryDelay": "0.5s"} + ], + "message": "No capacity available for model gemini-3-pro-high on the server" + } + }`) + resp := &http.Response{ + StatusCode: http.StatusServiceUnavailable, + Header: http.Header{}, + Body: io.NopCloser(bytes.NewReader(respBody)), + } + + params := antigravityRetryLoopParams{ + ctx: context.Background(), + prefix: "[test]", + account: account, + accessToken: "token", + action: "generateContent", + body: []byte(`{"input":"test"}`), + httpUpstream: upstream, + accountRepo: repo, + handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, requestedModel string, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { + return nil + }, + } + + availableURLs := []string{"https://ag-1.test"} + + svc := &AntigravityGatewayService{} + result := svc.handleSmartRetry(params, resp, respBody, "https://ag-1.test", 0, availableURLs) + + require.NotNil(t, result) + require.Equal(t, smartRetryActionBreakWithResp, result.action) + require.NotNil(t, result.resp) + require.Equal(t, http.StatusOK, result.resp.StatusCode, "should return success after retry") + require.Nil(t, result.switchError, "should not switch account on success") + require.Empty(t, repo.modelRateLimitCalls, "should not set model rate limit for capacity exhausted") +} + +// TestHandleSmartRetry_503_ModelCapacityExhausted_LongDelay_SwitchAccount +// 503 MODEL_CAPACITY_EXHAUSTED + retryDelay >= 20s → 每 20s 重试最多 5 次,全失败后切换账号 +func TestHandleSmartRetry_503_ModelCapacityExhausted_LongDelay_SwitchAccount(t *testing.T) { + // 构造 5 个仍然容量不足的重试响应 + capacityBody := `{ + "error": { + "code": 503, + "status": "UNAVAILABLE", + "details": [ + {"@type": "type.googleapis.com/google.rpc.ErrorInfo", "metadata": {"model": "gemini-3-pro-high"}, "reason": "MODEL_CAPACITY_EXHAUSTED"}, + {"@type": "type.googleapis.com/google.rpc.RetryInfo", "retryDelay": "30s"} + ] + } + }` + var responses []*http.Response + var errs []error + for i := 0; i < 5; i++ { + responses = append(responses, &http.Response{ + StatusCode: http.StatusServiceUnavailable, + Header: http.Header{}, + Body: io.NopCloser(strings.NewReader(capacityBody)), + }) + errs = append(errs, nil) + } + upstream := &mockSmartRetryUpstream{ + responses: responses, + errors: errs, + } + + repo := &stubAntigravityAccountRepo{} + account := &Account{ + ID: 3, + Name: "acc-3", + Type: AccountTypeOAuth, + Platform: PlatformAntigravity, + } + + // 503 + MODEL_CAPACITY_EXHAUSTED + 39s >= 20s 阈值 respBody := []byte(`{ "error": { "code": 503, @@ -322,13 +417,18 @@ func TestHandleSmartRetry_503_ModelCapacityExhausted_ReturnsSwitchError(t *testi Body: io.NopCloser(bytes.NewReader(respBody)), } + // 使用可取消的 context 避免测试真的等待 5×20s + ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) + defer cancel() + params := antigravityRetryLoopParams{ - ctx: context.Background(), + ctx: ctx, prefix: "[test]", account: account, accessToken: "token", action: "generateContent", body: []byte(`{"input":"test"}`), + httpUpstream: upstream, accountRepo: repo, isStickySession: true, handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, requestedModel string, groupID int64, sessionHash string, isStickySession bool) *handleModelRateLimitResult { @@ -343,16 +443,9 @@ func TestHandleSmartRetry_503_ModelCapacityExhausted_ReturnsSwitchError(t *testi require.NotNil(t, result) require.Equal(t, smartRetryActionBreakWithResp, result.action) - require.Nil(t, result.resp) - require.Nil(t, result.err) - require.NotNil(t, result.switchError, "should return switchError for 503 model capacity exhausted") - require.Equal(t, account.ID, result.switchError.OriginalAccountID) - require.Equal(t, "gemini-3-pro-high", result.switchError.RateLimitedModel) - require.True(t, result.switchError.IsStickySession) - - // 验证模型限流已设置 - require.Len(t, repo.modelRateLimitCalls, 1) - require.Equal(t, "gemini-3-pro-high", repo.modelRateLimitCalls[0].modelKey) + // context 超时会导致提前返回,switchError 可能为 nil(context canceled) + // 验证不设置模型限流 + require.Empty(t, repo.modelRateLimitCalls, "should not set model rate limit for capacity exhausted") } // TestHandleSmartRetry_NonAntigravityAccount_ContinuesDefaultLogic 测试非 Antigravity 平台账号走默认逻辑 @@ -1128,9 +1221,9 @@ func TestHandleSmartRetry_ShortDelay_NetworkError_StickySession_ClearsSession(t require.Equal(t, "sticky-net-error", cache.deleteCalls[0].sessionHash) } -// TestHandleSmartRetry_ShortDelay_503_StickySession_FailedRetry_ClearsSession -// 503 + 短延迟 + 粘性会话 + 重试失败 → 清除粘性绑定 -func TestHandleSmartRetry_ShortDelay_503_StickySession_FailedRetry_ClearsSession(t *testing.T) { +// TestHandleSmartRetry_ShortDelay_503_StickySession_FailedRetry_SwitchesAccount +// 503 + 短延迟 + 容量不足 + 重试失败 → 切换账号(不设模型限流) +func TestHandleSmartRetry_ShortDelay_503_StickySession_FailedRetry_SwitchesAccount(t *testing.T) { failRespBody := `{ "error": { "code": 503, @@ -1152,7 +1245,6 @@ func TestHandleSmartRetry_ShortDelay_503_StickySession_FailedRetry_ClearsSession } repo := &stubAntigravityAccountRepo{} - cache := &stubSmartRetryCache{} account := &Account{ ID: 16, Name: "acc-16", @@ -1195,21 +1287,15 @@ func TestHandleSmartRetry_ShortDelay_503_StickySession_FailedRetry_ClearsSession availableURLs := []string{"https://ag-1.test"} - svc := &AntigravityGatewayService{cache: cache} + svc := &AntigravityGatewayService{} result := svc.handleSmartRetry(params, resp, respBody, "https://ag-1.test", 0, availableURLs) require.NotNil(t, result) - require.NotNil(t, result.switchError) + require.NotNil(t, result.switchError, "should switch account after capacity retry exhausted") require.True(t, result.switchError.IsStickySession) - // 验证粘性绑定被清除 - require.Len(t, cache.deleteCalls, 1) - require.Equal(t, int64(77), cache.deleteCalls[0].groupID) - require.Equal(t, "sticky-503-short", cache.deleteCalls[0].sessionHash) - - // 验证模型限流已设置 - require.Len(t, repo.modelRateLimitCalls, 1) - require.Equal(t, "gemini-3-pro", repo.modelRateLimitCalls[0].modelKey) + // MODEL_CAPACITY_EXHAUSTED 不应设置模型限流 + require.Empty(t, repo.modelRateLimitCalls, "should not set model rate limit for capacity exhausted") } // TestAntigravityRetryLoop_SmartRetryFailed_StickySession_SwitchErrorPropagates From 05f5a8b61db4960528074ab7f281404a8426e49f Mon Sep 17 00:00:00 2001 From: erio Date: Tue, 10 Feb 2026 03:59:39 +0800 Subject: [PATCH 017/175] fix: use switch statement for staticcheck QF1003 compliance --- backend/internal/service/gateway_service.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 01e1acb4..910e04a4 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -379,9 +379,10 @@ func (s *GatewayService) TempUnscheduleRetryableError(ctx context.Context, accou return } // 根据状态码选择封禁策略 - if failoverErr.StatusCode == http.StatusBadRequest { + switch failoverErr.StatusCode { + case http.StatusBadRequest: tempUnscheduleGoogleConfigError(ctx, s.accountRepo, accountID, "[handler]") - } else if failoverErr.StatusCode == http.StatusBadGateway { + case http.StatusBadGateway: tempUnscheduleEmptyResponse(ctx, s.accountRepo, accountID, "[handler]") } } From f06048eccfb1e4f8373b6348f3a425a3c36fa1a5 Mon Sep 17 00:00:00 2001 From: erio Date: Tue, 10 Feb 2026 04:05:20 +0800 Subject: [PATCH 018/175] fix: simplify MODEL_CAPACITY_EXHAUSTED to single retry for all cases Both short (<20s) and long (>=20s/missing) retryDelay now retry once: - Short: wait actual retryDelay, retry once - Long/missing: wait 20s, retry once - Still capacity exhausted: switch account - Different error: let upper layer handle --- .../service/antigravity_gateway_service.go | 128 +++++++++--------- .../service/antigravity_smart_retry_test.go | 31 ++--- 2 files changed, 75 insertions(+), 84 deletions(-) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 84e78eaa..efff2e18 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -42,11 +42,10 @@ const ( // MODEL_CAPACITY_EXHAUSTED 专用常量 // 容量不足是临时状态,所有账号共享容量池,与限流不同 // - retryDelay < antigravityModelCapacityWaitThreshold: 按实际 retryDelay 等待后重试 1 次 - // - retryDelay >= antigravityModelCapacityWaitThreshold 或无 retryDelay: 每 20s 重试最多 5 次 + // - retryDelay >= antigravityModelCapacityWaitThreshold 或无 retryDelay: 等待 20s 后重试 1 次 // - 重试仍为容量不足: 切换账号 // - 重试遇到其他错误: 按实际错误码处理 antigravityModelCapacityWaitThreshold = 20 * time.Second // 容量不足等待阈值 - antigravityModelCapacityMaxAttempts = 5 // 容量不足长等待重试次数 // Google RPC 状态和类型常量 googleRPCStatusResourceExhausted = "RESOURCE_EXHAUSTED" @@ -296,86 +295,83 @@ func (s *AntigravityGatewayService) handleSmartRetry(p antigravityRetryLoopParam // handleModelCapacityExhaustedRetry 处理 MODEL_CAPACITY_EXHAUSTED 的重试逻辑 // 策略: // - retryDelay < antigravityModelCapacityWaitThreshold: 按实际 retryDelay 等待后重试 1 次 -// - retryDelay >= antigravityModelCapacityWaitThreshold 或无 retryDelay: 每 20s 重试最多 5 次 +// - retryDelay >= antigravityModelCapacityWaitThreshold 或无 retryDelay: 等待 20s 后重试 1 次 // - 重试成功: 直接返回 -// - 重试仍为 MODEL_CAPACITY_EXHAUSTED: 继续重试直到次数用完,然后切换账号 +// - 重试仍为 MODEL_CAPACITY_EXHAUSTED: 切换账号 // - 重试遇到其他错误 (429 限流等): 返回该响应,让上层按实际错误码处理 func (s *AntigravityGatewayService) handleModelCapacityExhaustedRetry( p antigravityRetryLoopParams, resp *http.Response, respBody []byte, baseURL string, retryDelay time.Duration, modelName string, ) *smartRetryResult { - // 确定重试参数 - maxAttempts := 1 + // 确定等待时间 waitDuration := retryDelay if retryDelay <= 0 || retryDelay >= antigravityModelCapacityWaitThreshold { - // 无 retryDelay 或 >= 20s: 固定 20s 间隔,最多 5 次 - maxAttempts = antigravityModelCapacityMaxAttempts + // 无 retryDelay 或 >= 20s: 固定等待 20s waitDuration = antigravityModelCapacityWaitThreshold } - for attempt := 1; attempt <= maxAttempts; attempt++ { - log.Printf("%s status=%d model_capacity_exhausted_retry attempt=%d/%d delay=%v model=%s account=%d", - p.prefix, resp.StatusCode, attempt, maxAttempts, waitDuration, modelName, p.account.ID) + log.Printf("%s status=%d model_capacity_exhausted_retry delay=%v model=%s account=%d", + p.prefix, resp.StatusCode, waitDuration, modelName, p.account.ID) - select { - case <-p.ctx.Done(): - log.Printf("%s status=context_canceled_during_capacity_retry", p.prefix) - return &smartRetryResult{action: smartRetryActionBreakWithResp, err: p.ctx.Err()} - case <-time.After(waitDuration): - } + select { + case <-p.ctx.Done(): + log.Printf("%s status=context_canceled_during_capacity_retry", p.prefix) + return &smartRetryResult{action: smartRetryActionBreakWithResp, err: p.ctx.Err()} + case <-time.After(waitDuration): + } - retryReq, err := antigravity.NewAPIRequestWithURL(p.ctx, baseURL, p.action, p.accessToken, p.body) - if err != nil { - log.Printf("%s status=capacity_retry_request_build_failed error=%v", p.prefix, err) - return &smartRetryResult{ - action: smartRetryActionBreakWithResp, - resp: &http.Response{ - StatusCode: resp.StatusCode, - Header: resp.Header.Clone(), - Body: io.NopCloser(bytes.NewReader(respBody)), - }, - } - } - - retryResp, retryErr := p.httpUpstream.Do(retryReq, p.proxyURL, p.account.ID, p.account.Concurrency) - - // 网络错误: 继续重试 - if retryErr != nil || retryResp == nil { - log.Printf("%s status=capacity_retry_network_error attempt=%d/%d error=%v", - p.prefix, attempt, maxAttempts, retryErr) - continue - } - - // 成功 (非 429/503): 直接返回 - if retryResp.StatusCode != http.StatusTooManyRequests && retryResp.StatusCode != http.StatusServiceUnavailable { - log.Printf("%s status=%d model_capacity_retry_success attempt=%d/%d", - p.prefix, retryResp.StatusCode, attempt, maxAttempts) - return &smartRetryResult{action: smartRetryActionBreakWithResp, resp: retryResp} - } - - // 读取重试响应体,判断是否仍为容量不足 - retryBody, _ := io.ReadAll(io.LimitReader(retryResp.Body, 2<<20)) - _ = retryResp.Body.Close() - - retryInfo := parseAntigravitySmartRetryInfo(retryBody) - - // 不再是 MODEL_CAPACITY_EXHAUSTED(例如变成了 429 限流): 返回该响应让上层处理 - if retryInfo == nil || !retryInfo.IsModelCapacityExhausted { - log.Printf("%s status=%d capacity_retry_got_different_error attempt=%d/%d body=%s", - p.prefix, retryResp.StatusCode, attempt, maxAttempts, truncateForLog(retryBody, 200)) - retryResp.Body = io.NopCloser(bytes.NewReader(retryBody)) - return &smartRetryResult{action: smartRetryActionBreakWithResp, resp: retryResp} - } - - // 仍然是 MODEL_CAPACITY_EXHAUSTED: 更新等待时间,继续重试 - if retryInfo.RetryDelay > 0 && retryInfo.RetryDelay < antigravityModelCapacityWaitThreshold { - waitDuration = retryInfo.RetryDelay + retryReq, err := antigravity.NewAPIRequestWithURL(p.ctx, baseURL, p.action, p.accessToken, p.body) + if err != nil { + log.Printf("%s status=capacity_retry_request_build_failed error=%v", p.prefix, err) + return &smartRetryResult{ + action: smartRetryActionBreakWithResp, + resp: &http.Response{ + StatusCode: resp.StatusCode, + Header: resp.Header.Clone(), + Body: io.NopCloser(bytes.NewReader(respBody)), + }, } } - // 所有重试都失败且仍为容量不足: 切换账号 - log.Printf("%s status=%d model_capacity_exhausted_retry_exhausted attempts=%d model=%s account=%d (switch account)", - p.prefix, resp.StatusCode, maxAttempts, modelName, p.account.ID) + retryResp, retryErr := p.httpUpstream.Do(retryReq, p.proxyURL, p.account.ID, p.account.Concurrency) + + // 网络错误: 切换账号 + if retryErr != nil || retryResp == nil { + log.Printf("%s status=capacity_retry_network_error error=%v (switch account)", + p.prefix, retryErr) + return &smartRetryResult{ + action: smartRetryActionBreakWithResp, + switchError: &AntigravityAccountSwitchError{ + OriginalAccountID: p.account.ID, + RateLimitedModel: modelName, + IsStickySession: p.isStickySession, + }, + } + } + + // 成功 (非 429/503): 直接返回 + if retryResp.StatusCode != http.StatusTooManyRequests && retryResp.StatusCode != http.StatusServiceUnavailable { + log.Printf("%s status=%d model_capacity_retry_success", p.prefix, retryResp.StatusCode) + return &smartRetryResult{action: smartRetryActionBreakWithResp, resp: retryResp} + } + + // 读取重试响应体,判断是否仍为容量不足 + retryBody, _ := io.ReadAll(io.LimitReader(retryResp.Body, 2<<20)) + _ = retryResp.Body.Close() + + retryInfo := parseAntigravitySmartRetryInfo(retryBody) + + // 不再是 MODEL_CAPACITY_EXHAUSTED(例如变成了 429 限流): 返回该响应让上层处理 + if retryInfo == nil || !retryInfo.IsModelCapacityExhausted { + log.Printf("%s status=%d capacity_retry_got_different_error body=%s", + p.prefix, retryResp.StatusCode, truncateForLog(retryBody, 200)) + retryResp.Body = io.NopCloser(bytes.NewReader(retryBody)) + return &smartRetryResult{action: smartRetryActionBreakWithResp, resp: retryResp} + } + + // 仍然是 MODEL_CAPACITY_EXHAUSTED: 切换账号 + log.Printf("%s status=%d model_capacity_exhausted_retry_failed model=%s account=%d (switch account)", + p.prefix, resp.StatusCode, modelName, p.account.ID) return &smartRetryResult{ action: smartRetryActionBreakWithResp, diff --git a/backend/internal/service/antigravity_smart_retry_test.go b/backend/internal/service/antigravity_smart_retry_test.go index 7a6050a7..b1ca5695 100644 --- a/backend/internal/service/antigravity_smart_retry_test.go +++ b/backend/internal/service/antigravity_smart_retry_test.go @@ -363,9 +363,9 @@ func TestHandleSmartRetry_503_ModelCapacityExhausted_ShortDelay_RetrySuccess(t * } // TestHandleSmartRetry_503_ModelCapacityExhausted_LongDelay_SwitchAccount -// 503 MODEL_CAPACITY_EXHAUSTED + retryDelay >= 20s → 每 20s 重试最多 5 次,全失败后切换账号 +// 503 MODEL_CAPACITY_EXHAUSTED + retryDelay >= 20s → 等待 20s 后重试 1 次,仍失败则切换账号 func TestHandleSmartRetry_503_ModelCapacityExhausted_LongDelay_SwitchAccount(t *testing.T) { - // 构造 5 个仍然容量不足的重试响应 + // 重试仍然返回容量不足 capacityBody := `{ "error": { "code": 503, @@ -376,19 +376,15 @@ func TestHandleSmartRetry_503_ModelCapacityExhausted_LongDelay_SwitchAccount(t * ] } }` - var responses []*http.Response - var errs []error - for i := 0; i < 5; i++ { - responses = append(responses, &http.Response{ - StatusCode: http.StatusServiceUnavailable, - Header: http.Header{}, - Body: io.NopCloser(strings.NewReader(capacityBody)), - }) - errs = append(errs, nil) - } upstream := &mockSmartRetryUpstream{ - responses: responses, - errors: errs, + responses: []*http.Response{ + { + StatusCode: 503, + Header: http.Header{}, + Body: io.NopCloser(strings.NewReader(capacityBody)), + }, + }, + errors: []error{nil}, } repo := &stubAntigravityAccountRepo{} @@ -412,12 +408,12 @@ func TestHandleSmartRetry_503_ModelCapacityExhausted_LongDelay_SwitchAccount(t * } }`) resp := &http.Response{ - StatusCode: http.StatusServiceUnavailable, + StatusCode: 503, Header: http.Header{}, Body: io.NopCloser(bytes.NewReader(respBody)), } - // 使用可取消的 context 避免测试真的等待 5×20s + // context 超时短于 20s 等待,验证 context 取消时正确返回 ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) defer cancel() @@ -443,8 +439,7 @@ func TestHandleSmartRetry_503_ModelCapacityExhausted_LongDelay_SwitchAccount(t * require.NotNil(t, result) require.Equal(t, smartRetryActionBreakWithResp, result.action) - // context 超时会导致提前返回,switchError 可能为 nil(context canceled) - // 验证不设置模型限流 + // context 超时会导致提前返回 require.Empty(t, repo.modelRateLimitCalls, "should not set model rate limit for capacity exhausted") } From 406dad998d6def371b7f6cfc429ecba489fa3c32 Mon Sep 17 00:00:00 2001 From: erio Date: Tue, 10 Feb 2026 10:59:34 +0800 Subject: [PATCH 019/175] chore: bump version to 0.1.77.2 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index af6111e5..18412869 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.77.1 +0.1.77.2 From 6bdd580b3fcc9f564508f7497f63b2cc2b5b2674 Mon Sep 17 00:00:00 2001 From: erio Date: Tue, 10 Feb 2026 11:40:36 +0800 Subject: [PATCH 020/175] chore: bump version to 0.1.78.1 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index 18412869..aade6705 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.77.2 +0.1.78.1 \ No newline at end of file From c4d67154431c94aa04c7f3e8f2fadf48fbdeed20 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Tue, 10 Feb 2026 20:59:54 +0800 Subject: [PATCH 021/175] chore: squash merge customizations from develop-old-0.1.77 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 定制文档: CLAUDE.md, AGENTS.md - UI定制: 微信客服按钮, 首页改造, 移除GitHub链接 - 部署运维: docker-compose.yml, 压测脚本 - CI/gitignore 小改动 --- .github/workflows/backend-ci.yml | 2 + .gitignore | 1 + AGENTS.md | 723 ++++++++++++++++++ CLAUDE.md | 723 ++++++++++++++++++ deploy/docker-compose.yml | 41 +- frontend/public/wechat-qr.jpg | Bin 0 -> 151392 bytes .../components/common/WechatServiceButton.vue | 104 +++ frontend/src/components/layout/AppHeader.vue | 17 - frontend/src/views/HomeView.vue | 173 ++++- stress_test_gemini_session.sh | 127 +++ 10 files changed, 1832 insertions(+), 79 deletions(-) create mode 100644 AGENTS.md create mode 100644 CLAUDE.md create mode 100644 frontend/public/wechat-qr.jpg create mode 100644 frontend/src/components/common/WechatServiceButton.vue create mode 100644 stress_test_gemini_session.sh diff --git a/.github/workflows/backend-ci.yml b/.github/workflows/backend-ci.yml index 2596a18c..84575a96 100644 --- a/.github/workflows/backend-ci.yml +++ b/.github/workflows/backend-ci.yml @@ -17,6 +17,7 @@ jobs: go-version-file: backend/go.mod check-latest: false cache: true + cache-dependency-path: backend/go.sum - name: Verify Go version run: | go version | grep -q 'go1.25.7' @@ -36,6 +37,7 @@ jobs: go-version-file: backend/go.mod check-latest: false cache: true + cache-dependency-path: backend/go.sum - name: Verify Go version run: | go version | grep -q 'go1.25.7' diff --git a/.gitignore b/.gitignore index 48172982..2f2bfbdf 100644 --- a/.gitignore +++ b/.gitignore @@ -78,6 +78,7 @@ Desktop.ini # =================== tmp/ temp/ +logs/ *.tmp *.temp *.log diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 00000000..a7a3e34a --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,723 @@ +# Sub2API 开发说明 + +## 版本管理策略 + +### 版本号规则 + +我们在官方版本号后面添加自己的小版本号: + +- 官方版本:`v0.1.68` +- 我们的版本:`v0.1.68.1`、`v0.1.68.2`(递增) + +### 分支策略 + +| 分支 | 说明 | +|------|------| +| `main` | 我们的主分支,包含所有定制功能 | +| `release/custom-X.Y.Z` | 基于官方 `vX.Y.Z` 的发布分支 | +| `upstream/main` | 上游官方仓库 | + +--- + +## 发布流程(基于新官方版本) + +当官方发布新版本(如 `v0.1.69`)时: + +### 1. 同步上游并创建发布分支 + +```bash +# 获取上游最新代码 +git fetch upstream --tags + +# 基于官方标签创建新的发布分支 +git checkout v0.1.69 -b release/custom-0.1.69 + +# 合并我们的 main 分支(包含所有定制功能) +git merge main --no-edit + +# 解决可能的冲突后继续 +``` + +### 2. 更新版本号并打标签 + +```bash +# 更新版本号文件 +echo "0.1.69.1" > backend/cmd/server/VERSION +git add backend/cmd/server/VERSION +git commit -m "chore: bump version to 0.1.69.1" + +# 打上我们自己的标签 +git tag v0.1.69.1 + +# 推送分支和标签 +git push origin release/custom-0.1.69 +git push origin v0.1.69.1 +``` + +### 3. 更新 main 分支 + +```bash +# 将发布分支合并回 main,保持 main 包含最新定制功能 +git checkout main +git merge release/custom-0.1.69 +git push origin main +``` + +--- + +## 热修复发布(在现有版本上修复) + +当需要在当前版本上发布修复时: + +```bash +# 在当前发布分支上修复 +git checkout release/custom-0.1.68 +# ... 进行修复 ... +git commit -m "fix: 修复描述" + +# 递增小版本号 +echo "0.1.68.2" > backend/cmd/server/VERSION +git add backend/cmd/server/VERSION +git commit -m "chore: bump version to 0.1.68.2" + +# 打标签并推送 +git tag v0.1.68.2 +git push origin release/custom-0.1.68 +git push origin v0.1.68.2 + +# 同步修复到 main +git checkout main +git cherry-pick +git push origin main +``` + +--- + +## 服务器部署流程 + +### 前置条件 + +- 本地已配置 SSH 别名 `clicodeplus` 连接到服务器 +- 服务器部署目录:`/root/sub2api`(正式)、`/root/sub2api-beta`(测试) +- 服务器使用 Docker Compose 部署 + +### 部署环境说明 + +| 环境 | 目录 | 端口 | 数据库 | 容器名 | +|------|------|------|--------|--------| +| 正式 | `/root/sub2api` | 8080 | `sub2api` | `sub2api` | +| Beta | `/root/sub2api-beta` | 8084 | `beta` | `sub2api-beta` | + +### 外部数据库 + +正式和 Beta 环境**共用外部 PostgreSQL 数据库**(非容器内数据库),配置在 `.env` 文件中: +- `DATABASE_HOST`:外部数据库地址 +- `DATABASE_SSLMODE`:SSL 模式(通常为 `require`) +- `POSTGRES_USER` / `POSTGRES_DB`:用户名和数据库名 + +#### 数据库操作命令 + +通过 SSH 在服务器上执行数据库操作: + +```bash +# 正式环境 - 查询迁移记录 +ssh clicodeplus "source /root/sub2api/deploy/.env && PGPASSWORD=\"\$POSTGRES_PASSWORD\" psql -h \$DATABASE_HOST -U \$POSTGRES_USER -d \$POSTGRES_DB -c 'SELECT * FROM schema_migrations ORDER BY applied_at DESC LIMIT 5;'" + +# Beta 环境 - 查询迁移记录 +ssh clicodeplus "source /root/sub2api-beta/deploy/.env && PGPASSWORD=\"\$POSTGRES_PASSWORD\" psql -h \$DATABASE_HOST -U \$POSTGRES_USER -d \$POSTGRES_DB -c 'SELECT * FROM schema_migrations ORDER BY applied_at DESC LIMIT 5;'" + +# Beta 环境 - 清除指定迁移记录(重新执行迁移) +ssh clicodeplus "source /root/sub2api-beta/deploy/.env && PGPASSWORD=\"\$POSTGRES_PASSWORD\" psql -h \$DATABASE_HOST -U \$POSTGRES_USER -d \$POSTGRES_DB -c \"DELETE FROM schema_migrations WHERE filename LIKE '%049%';\"" + +# Beta 环境 - 更新账号数据 +ssh clicodeplus "source /root/sub2api-beta/deploy/.env && PGPASSWORD=\"\$POSTGRES_PASSWORD\" psql -h \$DATABASE_HOST -U \$POSTGRES_USER -d \$POSTGRES_DB -c \"UPDATE accounts SET credentials = credentials - 'model_mapping' WHERE platform = 'antigravity';\"" +``` + +> **注意**:使用 `source .env` 加载环境变量,避免在命令行中暴露密码。 + +### 部署步骤 + +**重要:每次部署都必须递增版本号!** + +#### 0. 递增版本号(本地操作) + +每次部署前,先在本地递增小版本号: + +```bash +# 查看当前版本号 +cat backend/cmd/server/VERSION +# 假设当前是 0.1.69.1 + +# 递增版本号 +echo "0.1.69.2" > backend/cmd/server/VERSION +git add backend/cmd/server/VERSION +git commit -m "chore: bump version to 0.1.69.2" +git push origin release/custom-0.1.69 +``` + +#### 1. 服务器拉取代码 + +```bash +ssh clicodeplus "cd /root/sub2api && git fetch fork && git checkout -B release/custom-0.1.69 fork/release/custom-0.1.69" +``` + +#### 2. 服务器构建镜像 + +```bash +ssh clicodeplus "cd /root/sub2api && docker build --no-cache -t sub2api:latest -f Dockerfile ." +``` + +#### 3. 更新镜像标签并重启服务 + +```bash +ssh clicodeplus "docker tag sub2api:latest weishaw/sub2api:latest" +ssh clicodeplus "cd /root/sub2api/deploy && docker compose up -d --force-recreate sub2api" +``` + +#### 4. 验证部署 + +```bash +# 查看启动日志 +ssh clicodeplus "docker logs sub2api --tail 20" + +# 确认版本号(必须与步骤 0 中设置的版本号一致) +ssh clicodeplus "cat /root/sub2api/backend/cmd/server/VERSION" + +# 检查容器状态 +ssh clicodeplus "docker ps | grep sub2api" +``` + +--- + +## Beta 并行部署(不影响现网) + +目标:在同一台服务器上并行启动一个 beta 实例(例如端口 `8084`),**严禁改动/重启**现网实例(默认目录 `/root/sub2api`)。 + +### 设计原则 + +- **新目录**:beta 使用独立目录,例如 `/root/sub2api-beta`。 +- **敏感信息只放 `.env`**:beta 的数据库密码、JWT_SECRET 等只写入 `/root/sub2api-beta/deploy/.env`,不要提交到 git。 +- **独立 Compose Project**:通过 `docker compose -p sub2api-beta ...` 启动,确保 network/volume 隔离。 +- **独立端口**:通过 `.env` 的 `SERVER_PORT` 映射宿主机端口(例如 `8084:8080`)。 + +### 前置检查 + +```bash +# 1) 确保 8084 未被占用 +ssh clicodeplus "ss -ltnp | grep :8084 || echo '8084 is free'" + +# 2) 确认现网容器还在(只读检查) +ssh clicodeplus "docker ps --format 'table {{.Names}}\t{{.Image}}\t{{.Ports}}' | sed -n '1,200p'" +``` + +### 首次部署步骤 + +```bash +# 0) 进入服务器 +ssh clicodeplus + +# 1) 克隆代码到新目录(示例使用你的 fork) +cd /root +git clone https://github.com/touwaeriol/sub2api.git sub2api-beta +cd /root/sub2api-beta +git checkout release/custom-0.1.71 + +# 2) 准备 beta 的 .env(敏感信息只写这里) +cd /root/sub2api-beta/deploy + +# 推荐:从现网 .env 复制,保证除 DB 名/用户/端口外完全一致 +cp -f /root/sub2api/deploy/.env ./.env + +# 仅修改以下三项(其他保持不变) +perl -pi -e 's/^SERVER_PORT=.*/SERVER_PORT=8084/' ./.env +perl -pi -e 's/^POSTGRES_USER=.*/POSTGRES_USER=beta/' ./.env +perl -pi -e 's/^POSTGRES_DB=.*/POSTGRES_DB=beta/' ./.env + +# 3) 写 compose override(避免与现网容器名冲突,镜像使用本地构建的 sub2api:beta) +cat > docker-compose.override.yml <<'YAML' +services: + sub2api: + image: sub2api:beta + container_name: sub2api-beta + redis: + container_name: sub2api-beta-redis +YAML + +# 4) 构建 beta 镜像(基于当前代码) +cd /root/sub2api-beta +docker build -t sub2api:beta -f Dockerfile . + +# 5) 启动 beta(独立 project,确保不影响现网) +cd /root/sub2api-beta/deploy +docker compose -p sub2api-beta --env-file .env -f docker-compose.yml -f docker-compose.override.yml up -d + +# 6) 验证 beta +curl -fsS http://127.0.0.1:8084/health +docker logs sub2api-beta --tail 50 +``` + +### 数据库配置约定(beta) + +- 数据库地址/SSL/密码:与现网一致(从现网 `.env` 复制即可)。 +- 仅修改: + - `POSTGRES_USER=beta` + - `POSTGRES_DB=beta` + +注意:需要数据库侧已存在 `beta` 用户与 `beta` 数据库,并授予权限;否则容器会启动失败并不断重启。 + +### 更新 beta(拉代码 + 仅重建 beta 容器) + +```bash +ssh clicodeplus "set -e; cd /root/sub2api-beta && git fetch --all --tags && git checkout -f release/custom-0.1.71 && git reset --hard origin/release/custom-0.1.71" +ssh clicodeplus "cd /root/sub2api-beta && docker build -t sub2api:beta -f Dockerfile ." +ssh clicodeplus "cd /root/sub2api-beta/deploy && docker compose -p sub2api-beta --env-file .env -f docker-compose.yml -f docker-compose.override.yml up -d --no-deps --force-recreate sub2api" +ssh clicodeplus "curl -fsS http://127.0.0.1:8084/health" +``` + +### 停止/回滚 beta(只影响 beta) + +```bash +ssh clicodeplus "cd /root/sub2api-beta/deploy && docker compose -p sub2api-beta -f docker-compose.yml -f docker-compose.override.yml down" +``` + +--- + +## 服务器首次部署 + +### 1. 克隆代码并配置远程仓库 + +```bash +ssh clicodeplus +cd /root +git clone https://github.com/Wei-Shaw/sub2api.git +cd sub2api + +# 添加 fork 仓库 +git remote add fork https://github.com/touwaeriol/sub2api.git +``` + +### 2. 切换到定制分支并配置环境 + +```bash +git fetch fork +git checkout -B release/custom-0.1.69 fork/release/custom-0.1.69 + +cd deploy +cp .env.example .env +vim .env # 配置 DATABASE_URL, REDIS_URL, JWT_SECRET 等 +``` + +### 3. 构建并启动 + +```bash +cd /root/sub2api +docker build -t sub2api:latest -f Dockerfile . +docker tag sub2api:latest weishaw/sub2api:latest +cd deploy && docker compose up -d +``` + +### 6. 启动服务 + +```bash +# 进入 deploy 目录 +cd deploy + +# 启动所有服务(PostgreSQL、Redis、sub2api) +docker compose up -d + +# 查看服务状态 +docker compose ps +``` + +### 7. 验证部署 + +```bash +# 查看应用日志 +docker logs sub2api --tail 50 + +# 检查健康状态 +curl http://localhost:8080/health + +# 确认版本号 +cat /root/sub2api/backend/cmd/server/VERSION +``` + +### 8. 常用运维命令 + +```bash +# 查看实时日志 +docker logs -f sub2api + +# 重启服务 +docker compose restart sub2api + +# 停止所有服务 +docker compose down + +# 停止并删除数据卷(慎用!会删除数据库数据) +docker compose down -v + +# 查看资源使用情况 +docker stats sub2api +``` + +--- + +## 定制功能说明 + +当前定制分支包含以下功能(相对于官方版本): + +### UI/UX 定制 + +| 功能 | 说明 | +|------|------| +| 首页优化 | 面向用户的价值主张设计 | +| 移除 GitHub 链接 | 用户菜单中不显示 GitHub 导航 | +| 微信客服按钮 | 首页悬浮微信客服入口 | +| 限流时间精确显示 | 账号限流时间显示精确到秒 | + +### Antigravity 平台增强 + +| 功能 | 说明 | +|------|------| +| Scope 级别限流 | 按配额域(claude/gemini_text/gemini_image)独立限流,避免整个账号被锁定 | +| 模型级别限流 | 按具体模型(如 claude-opus-4-5)独立限流,更精细的限流控制 | +| 限流预检查 | 调度时预检查账号/模型限流状态,避免选中已限流账号 | +| 秒级冷却时间 | 支持 429 响应的秒级精确冷却时间 | +| 身份注入优化 | 模型身份信息注入 + 静默边界防止身份泄露 | +| thoughtSignature 修复 | Gemini 3 函数调用 400 错误修复 | +| max_tokens 自动修正 | 自动修正 max_tokens <= budget_tokens 导致的 400 错误 | + +### 调度算法优化 + +| 功能 | 说明 | +|------|------| +| 分层过滤选择 | 调度算法从全排序改为分层过滤,提升性能 | +| LRU 随机选择 | 相同 LRU 时间时随机选择,避免账号集中 | +| 限流等待阈值配置化 | 可配置的限流等待阈值 | + +### 运维增强 + +| 功能 | 说明 | +|------|------| +| Scope 限流统计 | 运维界面展示 Antigravity 账号 scope 级别限流统计 | +| 账号限流状态显示 | 账号列表显示 scope 和模型级别限流状态 | +| 清除限流按钮增强 | 有 scope/模型限流时也显示清除限流按钮 | + +### 其他修复 + +| 功能 | 说明 | +|------|------| +| .gitattributes | 确保迁移文件使用 LF 换行符(解决 Windows 下 SQL 摘要不一致) | +| 部署配置优化 | DATABASE_HOST 和 DATABASE_SSLMODE 可通过 .env 配置 | + +--- + +## 注意事项 + +1. **前端必须打包进镜像**:使用 `docker build` 在服务器上构建,Dockerfile 会自动编译前端并 embed 到后端二进制中 + +2. **镜像标签**:docker-compose.yml 使用 `weishaw/sub2api:latest`,本地构建后需要 `docker tag` 覆盖 + +3. **Windows 换行符问题**:已通过 `.gitattributes` 解决,确保 `*.sql` 文件始终使用 LF + +4. **版本号管理**:每次发布必须更新 `backend/cmd/server/VERSION` 并打标签 + +5. **合并冲突**:合并上游新版本时,重点关注以下文件可能的冲突: + - `backend/internal/service/antigravity_gateway_service.go` + - `backend/internal/service/gateway_service.go` + - `backend/internal/pkg/antigravity/request_transformer.go` + +--- + +## Go 代码规范 + +### 1. 函数设计 + +#### 单一职责原则 +- **函数行数**:单个函数常规不应超过 **30 行**,超过时应拆分为子函数。若某段逻辑确实不可拆分(如复杂的状态机、协议解析等),可以例外,但需添加注释说明原因 +- **嵌套层级**:避免超过 3 层嵌套,使用 early return 减少嵌套 + +```go +// ❌ 不推荐:深层嵌套 +func process(data []Item) { + for _, item := range data { + if item.Valid { + if item.Type == "A" { + if item.Status == "active" { + // 业务逻辑... + } + } + } + } +} + +// ✅ 推荐:early return +func process(data []Item) { + for _, item := range data { + if !item.Valid { + continue + } + if item.Type != "A" { + continue + } + if item.Status != "active" { + continue + } + // 业务逻辑... + } +} +``` + +#### 复杂逻辑提取 +将复杂的条件判断或处理逻辑提取为独立函数: + +```go +// ❌ 不推荐:内联复杂逻辑 +if resp.StatusCode == 429 || resp.StatusCode == 503 { + // 80+ 行处理逻辑... +} + +// ✅ 推荐:提取为独立函数 +result := handleRateLimitResponse(resp, params) +switch result.action { +case actionRetry: + continue +case actionBreak: + return result.resp, nil +} +``` + +### 2. 重复代码消除 + +#### 配置获取模式 +将重复的配置获取逻辑提取为方法: + +```go +// ❌ 不推荐:重复代码 +logBody := s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBody +maxBytes := 2048 +if s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes > 0 { + maxBytes = s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes +} + +// ✅ 推荐:提取为方法 +func (s *Service) getLogConfig() (logBody bool, maxBytes int) { + maxBytes = 2048 + if s.settingService == nil || s.settingService.cfg == nil { + return false, maxBytes + } + cfg := s.settingService.cfg.Gateway + if cfg.LogUpstreamErrorBodyMaxBytes > 0 { + maxBytes = cfg.LogUpstreamErrorBodyMaxBytes + } + return cfg.LogUpstreamErrorBody, maxBytes +} +``` + +### 3. 常量管理 + +#### 避免魔法数字 +所有硬编码的数值都应定义为常量: + +```go +// ❌ 不推荐 +if retryDelay >= 10*time.Second { + resetAt := time.Now().Add(30 * time.Second) +} + +// ✅ 推荐 +const ( + rateLimitThreshold = 10 * time.Second + defaultRateLimitDuration = 30 * time.Second +) + +if retryDelay >= rateLimitThreshold { + resetAt := time.Now().Add(defaultRateLimitDuration) +} +``` + +#### 注释引用常量名 +在注释中引用常量名而非硬编码值: + +```go +// ❌ 不推荐 +// < 10s: 等待后重试 + +// ✅ 推荐 +// < rateLimitThreshold: 等待后重试 +``` + +### 4. 错误处理 + +#### 使用结构化日志 +优先使用 `slog` 进行结构化日志记录: + +```go +// ❌ 不推荐 +log.Printf("%s status=%d model_rate_limit_failed model=%s error=%v", prefix, statusCode, modelName, err) + +// ✅ 推荐 +slog.Error("failed to set model rate limit", + "prefix", prefix, + "status_code", statusCode, + "model", modelName, + "error", err, +) +``` + +### 5. 测试规范 + +#### Mock 函数签名同步 +修改函数签名时,必须同步更新所有测试中的 mock 函数: + +```go +// 如果修改了 handleError 签名 +handleError func(..., groupID int64, sessionHash string) *Result + +// 必须同步更新测试中的 mock +handleError: func(..., groupID int64, sessionHash string) *Result { + return nil +}, +``` + +#### 测试构建标签 +统一使用测试构建标签: + +```go +//go:build unit + +package service +``` + +### 6. 时间格式解析 + +#### 使用标准库 +优先使用 `time.ParseDuration`,支持所有 Go duration 格式: + +```go +// ❌ 不推荐:手动限制格式 +if !strings.HasSuffix(delay, "s") || strings.Contains(delay, "m") { + continue +} + +// ✅ 推荐:使用标准库 +dur, err := time.ParseDuration(delay) // 支持 "0.5s", "4m50s", "1h30m" 等 +``` + +### 7. 接口设计 + +#### 接口隔离原则 +定义最小化接口,只包含必需的方法: + +```go +// ❌ 不推荐:使用过于宽泛的接口 +type AccountRepository interface { + // 20+ 个方法... +} + +// ✅ 推荐:定义最小化接口 +type ModelRateLimiter interface { + SetModelRateLimit(ctx context.Context, id int64, modelKey string, resetAt time.Time) error +} +``` + +### 8. 并发安全 + +#### 共享数据保护 +访问可能被并发修改的数据时,确保线程安全: + +```go +// 如果 Account.Extra 可能被并发修改 +// 需要使用互斥锁或原子操作保护读取 +func (a *Account) GetRateLimitRemainingTime(model string) time.Duration { + a.mu.RLock() + defer a.mu.RUnlock() + // 读取 Extra 字段... +} +``` + +### 9. 命名规范 + +#### 一致的命名风格 +- 常量使用 camelCase:`rateLimitThreshold` +- 类型使用 PascalCase:`AntigravityQuotaScope` +- 同一概念使用统一命名:`Threshold` 或 `Limit`,不要混用 + +```go +// ❌ 不推荐:命名不一致 +antigravitySmartRetryMinWait // 使用 Min +antigravityRateLimitThreshold // 使用 Threshold + +// ✅ 推荐:统一风格 +antigravityMinRetryWait +antigravityRateLimitThreshold +``` + +### 10. 代码审查清单 + +在提交代码前,检查以下项目: + +- [ ] 函数是否超过 30 行?(不可拆分的逻辑除外,需注释说明) +- [ ] 嵌套是否超过 3 层? +- [ ] 是否有重复代码可以提取? +- [ ] 是否使用了魔法数字? +- [ ] Mock 函数签名是否与实际函数一致? +- [ ] 测试是否覆盖了新增逻辑? +- [ ] 日志是否包含足够的上下文信息? +- [ ] 是否考虑了并发安全? + +--- + +## CI 检查与发布门禁 + +### GitHub Actions 检查项 + +本项目有 4 个 CI 任务,**任何代码推送或发布前都必须全部通过**: + +| Workflow | Job | 说明 | 本地验证命令 | +|----------|-----|------|-------------| +| CI | `test` | 单元测试 + 集成测试 | `cd backend && make test-unit && make test-integration` | +| CI | `golangci-lint` | Go 代码静态检查(golangci-lint v2.7) | `cd backend && golangci-lint run --timeout=5m` | +| Security Scan | `backend-security` | govulncheck + gosec 安全扫描 | `cd backend && govulncheck ./... && gosec -severity high -confidence high ./...` | +| Security Scan | `frontend-security` | pnpm audit 前端依赖安全检查 | `cd frontend && pnpm audit --prod --audit-level=high` | + +### 向上游提交 PR + +PR 目标是上游官方仓库,**只包含通用功能改动**(bug fix、新功能、性能优化等)。 + +**以下文件禁止出现在 PR 中**(属于我们 fork 的定制化内容): +- `CLAUDE.md`、`AGENTS.md` — 我们的开发文档 +- `backend/cmd/server/VERSION` — 我们的版本号文件 +- UI 定制改动(GitHub 链接移除、微信客服按钮、首页定制等) +- 部署配置(`deploy/` 目录下的定制修改) + +**PR 流程**: +1. 从 `develop` 创建功能分支,只包含要提交给上游的改动 +2. 推送分支后,**等待 4 个 CI job 全部通过** +3. 确认通过后再创建 PR +4. 使用 `gh run list --repo touwaeriol/sub2api --branch ` 检查状态 + +### 自有分支推送(develop / main) + +推送到我们自己的 `develop` 或 `main` 分支时,包含所有改动(定制化 + 通用功能)。 + +**推送流程**: +1. 本地运行 `cd backend && make test-unit` 确保单元测试通过 +2. 本地运行 `cd backend && gofmt -l ./...` 确保格式正确 +3. 推送后确认 CI 和 Security Scan 两个 workflow 的 4 个 job 全部绿色 ✅ +4. 任何 job 失败必须立即修复,**禁止在 CI 未通过的状态下继续后续操作** + +### 发布版本 + +1. 确保 `main` 分支最新提交的 4 个 CI job 全部通过 +2. 递增 `backend/cmd/server/VERSION`,提交并推送 +3. 打 tag 推送后,确认 tag 触发的 3 个 workflow(CI、Security Scan、Release)全部通过 +4. **Release workflow 失败时禁止部署** — 必须先修复问题,删除旧 tag,重新打 tag +5. 使用 `gh run list --repo touwaeriol/sub2api --limit 10` 确认状态 + +### 常见 CI 失败原因及修复 +- **gofmt**:struct 字段对齐不一致 → 运行 `gofmt -w ` 修复 +- **golangci-lint**:未使用的变量/导入 → 删除或使用 `_` 忽略 +- **test 失败**:mock 函数签名不一致 → 同步更新 mock +- **gosec**:安全漏洞 → 根据提示修复或添加例外 diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 00000000..a7a3e34a --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,723 @@ +# Sub2API 开发说明 + +## 版本管理策略 + +### 版本号规则 + +我们在官方版本号后面添加自己的小版本号: + +- 官方版本:`v0.1.68` +- 我们的版本:`v0.1.68.1`、`v0.1.68.2`(递增) + +### 分支策略 + +| 分支 | 说明 | +|------|------| +| `main` | 我们的主分支,包含所有定制功能 | +| `release/custom-X.Y.Z` | 基于官方 `vX.Y.Z` 的发布分支 | +| `upstream/main` | 上游官方仓库 | + +--- + +## 发布流程(基于新官方版本) + +当官方发布新版本(如 `v0.1.69`)时: + +### 1. 同步上游并创建发布分支 + +```bash +# 获取上游最新代码 +git fetch upstream --tags + +# 基于官方标签创建新的发布分支 +git checkout v0.1.69 -b release/custom-0.1.69 + +# 合并我们的 main 分支(包含所有定制功能) +git merge main --no-edit + +# 解决可能的冲突后继续 +``` + +### 2. 更新版本号并打标签 + +```bash +# 更新版本号文件 +echo "0.1.69.1" > backend/cmd/server/VERSION +git add backend/cmd/server/VERSION +git commit -m "chore: bump version to 0.1.69.1" + +# 打上我们自己的标签 +git tag v0.1.69.1 + +# 推送分支和标签 +git push origin release/custom-0.1.69 +git push origin v0.1.69.1 +``` + +### 3. 更新 main 分支 + +```bash +# 将发布分支合并回 main,保持 main 包含最新定制功能 +git checkout main +git merge release/custom-0.1.69 +git push origin main +``` + +--- + +## 热修复发布(在现有版本上修复) + +当需要在当前版本上发布修复时: + +```bash +# 在当前发布分支上修复 +git checkout release/custom-0.1.68 +# ... 进行修复 ... +git commit -m "fix: 修复描述" + +# 递增小版本号 +echo "0.1.68.2" > backend/cmd/server/VERSION +git add backend/cmd/server/VERSION +git commit -m "chore: bump version to 0.1.68.2" + +# 打标签并推送 +git tag v0.1.68.2 +git push origin release/custom-0.1.68 +git push origin v0.1.68.2 + +# 同步修复到 main +git checkout main +git cherry-pick +git push origin main +``` + +--- + +## 服务器部署流程 + +### 前置条件 + +- 本地已配置 SSH 别名 `clicodeplus` 连接到服务器 +- 服务器部署目录:`/root/sub2api`(正式)、`/root/sub2api-beta`(测试) +- 服务器使用 Docker Compose 部署 + +### 部署环境说明 + +| 环境 | 目录 | 端口 | 数据库 | 容器名 | +|------|------|------|--------|--------| +| 正式 | `/root/sub2api` | 8080 | `sub2api` | `sub2api` | +| Beta | `/root/sub2api-beta` | 8084 | `beta` | `sub2api-beta` | + +### 外部数据库 + +正式和 Beta 环境**共用外部 PostgreSQL 数据库**(非容器内数据库),配置在 `.env` 文件中: +- `DATABASE_HOST`:外部数据库地址 +- `DATABASE_SSLMODE`:SSL 模式(通常为 `require`) +- `POSTGRES_USER` / `POSTGRES_DB`:用户名和数据库名 + +#### 数据库操作命令 + +通过 SSH 在服务器上执行数据库操作: + +```bash +# 正式环境 - 查询迁移记录 +ssh clicodeplus "source /root/sub2api/deploy/.env && PGPASSWORD=\"\$POSTGRES_PASSWORD\" psql -h \$DATABASE_HOST -U \$POSTGRES_USER -d \$POSTGRES_DB -c 'SELECT * FROM schema_migrations ORDER BY applied_at DESC LIMIT 5;'" + +# Beta 环境 - 查询迁移记录 +ssh clicodeplus "source /root/sub2api-beta/deploy/.env && PGPASSWORD=\"\$POSTGRES_PASSWORD\" psql -h \$DATABASE_HOST -U \$POSTGRES_USER -d \$POSTGRES_DB -c 'SELECT * FROM schema_migrations ORDER BY applied_at DESC LIMIT 5;'" + +# Beta 环境 - 清除指定迁移记录(重新执行迁移) +ssh clicodeplus "source /root/sub2api-beta/deploy/.env && PGPASSWORD=\"\$POSTGRES_PASSWORD\" psql -h \$DATABASE_HOST -U \$POSTGRES_USER -d \$POSTGRES_DB -c \"DELETE FROM schema_migrations WHERE filename LIKE '%049%';\"" + +# Beta 环境 - 更新账号数据 +ssh clicodeplus "source /root/sub2api-beta/deploy/.env && PGPASSWORD=\"\$POSTGRES_PASSWORD\" psql -h \$DATABASE_HOST -U \$POSTGRES_USER -d \$POSTGRES_DB -c \"UPDATE accounts SET credentials = credentials - 'model_mapping' WHERE platform = 'antigravity';\"" +``` + +> **注意**:使用 `source .env` 加载环境变量,避免在命令行中暴露密码。 + +### 部署步骤 + +**重要:每次部署都必须递增版本号!** + +#### 0. 递增版本号(本地操作) + +每次部署前,先在本地递增小版本号: + +```bash +# 查看当前版本号 +cat backend/cmd/server/VERSION +# 假设当前是 0.1.69.1 + +# 递增版本号 +echo "0.1.69.2" > backend/cmd/server/VERSION +git add backend/cmd/server/VERSION +git commit -m "chore: bump version to 0.1.69.2" +git push origin release/custom-0.1.69 +``` + +#### 1. 服务器拉取代码 + +```bash +ssh clicodeplus "cd /root/sub2api && git fetch fork && git checkout -B release/custom-0.1.69 fork/release/custom-0.1.69" +``` + +#### 2. 服务器构建镜像 + +```bash +ssh clicodeplus "cd /root/sub2api && docker build --no-cache -t sub2api:latest -f Dockerfile ." +``` + +#### 3. 更新镜像标签并重启服务 + +```bash +ssh clicodeplus "docker tag sub2api:latest weishaw/sub2api:latest" +ssh clicodeplus "cd /root/sub2api/deploy && docker compose up -d --force-recreate sub2api" +``` + +#### 4. 验证部署 + +```bash +# 查看启动日志 +ssh clicodeplus "docker logs sub2api --tail 20" + +# 确认版本号(必须与步骤 0 中设置的版本号一致) +ssh clicodeplus "cat /root/sub2api/backend/cmd/server/VERSION" + +# 检查容器状态 +ssh clicodeplus "docker ps | grep sub2api" +``` + +--- + +## Beta 并行部署(不影响现网) + +目标:在同一台服务器上并行启动一个 beta 实例(例如端口 `8084`),**严禁改动/重启**现网实例(默认目录 `/root/sub2api`)。 + +### 设计原则 + +- **新目录**:beta 使用独立目录,例如 `/root/sub2api-beta`。 +- **敏感信息只放 `.env`**:beta 的数据库密码、JWT_SECRET 等只写入 `/root/sub2api-beta/deploy/.env`,不要提交到 git。 +- **独立 Compose Project**:通过 `docker compose -p sub2api-beta ...` 启动,确保 network/volume 隔离。 +- **独立端口**:通过 `.env` 的 `SERVER_PORT` 映射宿主机端口(例如 `8084:8080`)。 + +### 前置检查 + +```bash +# 1) 确保 8084 未被占用 +ssh clicodeplus "ss -ltnp | grep :8084 || echo '8084 is free'" + +# 2) 确认现网容器还在(只读检查) +ssh clicodeplus "docker ps --format 'table {{.Names}}\t{{.Image}}\t{{.Ports}}' | sed -n '1,200p'" +``` + +### 首次部署步骤 + +```bash +# 0) 进入服务器 +ssh clicodeplus + +# 1) 克隆代码到新目录(示例使用你的 fork) +cd /root +git clone https://github.com/touwaeriol/sub2api.git sub2api-beta +cd /root/sub2api-beta +git checkout release/custom-0.1.71 + +# 2) 准备 beta 的 .env(敏感信息只写这里) +cd /root/sub2api-beta/deploy + +# 推荐:从现网 .env 复制,保证除 DB 名/用户/端口外完全一致 +cp -f /root/sub2api/deploy/.env ./.env + +# 仅修改以下三项(其他保持不变) +perl -pi -e 's/^SERVER_PORT=.*/SERVER_PORT=8084/' ./.env +perl -pi -e 's/^POSTGRES_USER=.*/POSTGRES_USER=beta/' ./.env +perl -pi -e 's/^POSTGRES_DB=.*/POSTGRES_DB=beta/' ./.env + +# 3) 写 compose override(避免与现网容器名冲突,镜像使用本地构建的 sub2api:beta) +cat > docker-compose.override.yml <<'YAML' +services: + sub2api: + image: sub2api:beta + container_name: sub2api-beta + redis: + container_name: sub2api-beta-redis +YAML + +# 4) 构建 beta 镜像(基于当前代码) +cd /root/sub2api-beta +docker build -t sub2api:beta -f Dockerfile . + +# 5) 启动 beta(独立 project,确保不影响现网) +cd /root/sub2api-beta/deploy +docker compose -p sub2api-beta --env-file .env -f docker-compose.yml -f docker-compose.override.yml up -d + +# 6) 验证 beta +curl -fsS http://127.0.0.1:8084/health +docker logs sub2api-beta --tail 50 +``` + +### 数据库配置约定(beta) + +- 数据库地址/SSL/密码:与现网一致(从现网 `.env` 复制即可)。 +- 仅修改: + - `POSTGRES_USER=beta` + - `POSTGRES_DB=beta` + +注意:需要数据库侧已存在 `beta` 用户与 `beta` 数据库,并授予权限;否则容器会启动失败并不断重启。 + +### 更新 beta(拉代码 + 仅重建 beta 容器) + +```bash +ssh clicodeplus "set -e; cd /root/sub2api-beta && git fetch --all --tags && git checkout -f release/custom-0.1.71 && git reset --hard origin/release/custom-0.1.71" +ssh clicodeplus "cd /root/sub2api-beta && docker build -t sub2api:beta -f Dockerfile ." +ssh clicodeplus "cd /root/sub2api-beta/deploy && docker compose -p sub2api-beta --env-file .env -f docker-compose.yml -f docker-compose.override.yml up -d --no-deps --force-recreate sub2api" +ssh clicodeplus "curl -fsS http://127.0.0.1:8084/health" +``` + +### 停止/回滚 beta(只影响 beta) + +```bash +ssh clicodeplus "cd /root/sub2api-beta/deploy && docker compose -p sub2api-beta -f docker-compose.yml -f docker-compose.override.yml down" +``` + +--- + +## 服务器首次部署 + +### 1. 克隆代码并配置远程仓库 + +```bash +ssh clicodeplus +cd /root +git clone https://github.com/Wei-Shaw/sub2api.git +cd sub2api + +# 添加 fork 仓库 +git remote add fork https://github.com/touwaeriol/sub2api.git +``` + +### 2. 切换到定制分支并配置环境 + +```bash +git fetch fork +git checkout -B release/custom-0.1.69 fork/release/custom-0.1.69 + +cd deploy +cp .env.example .env +vim .env # 配置 DATABASE_URL, REDIS_URL, JWT_SECRET 等 +``` + +### 3. 构建并启动 + +```bash +cd /root/sub2api +docker build -t sub2api:latest -f Dockerfile . +docker tag sub2api:latest weishaw/sub2api:latest +cd deploy && docker compose up -d +``` + +### 6. 启动服务 + +```bash +# 进入 deploy 目录 +cd deploy + +# 启动所有服务(PostgreSQL、Redis、sub2api) +docker compose up -d + +# 查看服务状态 +docker compose ps +``` + +### 7. 验证部署 + +```bash +# 查看应用日志 +docker logs sub2api --tail 50 + +# 检查健康状态 +curl http://localhost:8080/health + +# 确认版本号 +cat /root/sub2api/backend/cmd/server/VERSION +``` + +### 8. 常用运维命令 + +```bash +# 查看实时日志 +docker logs -f sub2api + +# 重启服务 +docker compose restart sub2api + +# 停止所有服务 +docker compose down + +# 停止并删除数据卷(慎用!会删除数据库数据) +docker compose down -v + +# 查看资源使用情况 +docker stats sub2api +``` + +--- + +## 定制功能说明 + +当前定制分支包含以下功能(相对于官方版本): + +### UI/UX 定制 + +| 功能 | 说明 | +|------|------| +| 首页优化 | 面向用户的价值主张设计 | +| 移除 GitHub 链接 | 用户菜单中不显示 GitHub 导航 | +| 微信客服按钮 | 首页悬浮微信客服入口 | +| 限流时间精确显示 | 账号限流时间显示精确到秒 | + +### Antigravity 平台增强 + +| 功能 | 说明 | +|------|------| +| Scope 级别限流 | 按配额域(claude/gemini_text/gemini_image)独立限流,避免整个账号被锁定 | +| 模型级别限流 | 按具体模型(如 claude-opus-4-5)独立限流,更精细的限流控制 | +| 限流预检查 | 调度时预检查账号/模型限流状态,避免选中已限流账号 | +| 秒级冷却时间 | 支持 429 响应的秒级精确冷却时间 | +| 身份注入优化 | 模型身份信息注入 + 静默边界防止身份泄露 | +| thoughtSignature 修复 | Gemini 3 函数调用 400 错误修复 | +| max_tokens 自动修正 | 自动修正 max_tokens <= budget_tokens 导致的 400 错误 | + +### 调度算法优化 + +| 功能 | 说明 | +|------|------| +| 分层过滤选择 | 调度算法从全排序改为分层过滤,提升性能 | +| LRU 随机选择 | 相同 LRU 时间时随机选择,避免账号集中 | +| 限流等待阈值配置化 | 可配置的限流等待阈值 | + +### 运维增强 + +| 功能 | 说明 | +|------|------| +| Scope 限流统计 | 运维界面展示 Antigravity 账号 scope 级别限流统计 | +| 账号限流状态显示 | 账号列表显示 scope 和模型级别限流状态 | +| 清除限流按钮增强 | 有 scope/模型限流时也显示清除限流按钮 | + +### 其他修复 + +| 功能 | 说明 | +|------|------| +| .gitattributes | 确保迁移文件使用 LF 换行符(解决 Windows 下 SQL 摘要不一致) | +| 部署配置优化 | DATABASE_HOST 和 DATABASE_SSLMODE 可通过 .env 配置 | + +--- + +## 注意事项 + +1. **前端必须打包进镜像**:使用 `docker build` 在服务器上构建,Dockerfile 会自动编译前端并 embed 到后端二进制中 + +2. **镜像标签**:docker-compose.yml 使用 `weishaw/sub2api:latest`,本地构建后需要 `docker tag` 覆盖 + +3. **Windows 换行符问题**:已通过 `.gitattributes` 解决,确保 `*.sql` 文件始终使用 LF + +4. **版本号管理**:每次发布必须更新 `backend/cmd/server/VERSION` 并打标签 + +5. **合并冲突**:合并上游新版本时,重点关注以下文件可能的冲突: + - `backend/internal/service/antigravity_gateway_service.go` + - `backend/internal/service/gateway_service.go` + - `backend/internal/pkg/antigravity/request_transformer.go` + +--- + +## Go 代码规范 + +### 1. 函数设计 + +#### 单一职责原则 +- **函数行数**:单个函数常规不应超过 **30 行**,超过时应拆分为子函数。若某段逻辑确实不可拆分(如复杂的状态机、协议解析等),可以例外,但需添加注释说明原因 +- **嵌套层级**:避免超过 3 层嵌套,使用 early return 减少嵌套 + +```go +// ❌ 不推荐:深层嵌套 +func process(data []Item) { + for _, item := range data { + if item.Valid { + if item.Type == "A" { + if item.Status == "active" { + // 业务逻辑... + } + } + } + } +} + +// ✅ 推荐:early return +func process(data []Item) { + for _, item := range data { + if !item.Valid { + continue + } + if item.Type != "A" { + continue + } + if item.Status != "active" { + continue + } + // 业务逻辑... + } +} +``` + +#### 复杂逻辑提取 +将复杂的条件判断或处理逻辑提取为独立函数: + +```go +// ❌ 不推荐:内联复杂逻辑 +if resp.StatusCode == 429 || resp.StatusCode == 503 { + // 80+ 行处理逻辑... +} + +// ✅ 推荐:提取为独立函数 +result := handleRateLimitResponse(resp, params) +switch result.action { +case actionRetry: + continue +case actionBreak: + return result.resp, nil +} +``` + +### 2. 重复代码消除 + +#### 配置获取模式 +将重复的配置获取逻辑提取为方法: + +```go +// ❌ 不推荐:重复代码 +logBody := s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBody +maxBytes := 2048 +if s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes > 0 { + maxBytes = s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes +} + +// ✅ 推荐:提取为方法 +func (s *Service) getLogConfig() (logBody bool, maxBytes int) { + maxBytes = 2048 + if s.settingService == nil || s.settingService.cfg == nil { + return false, maxBytes + } + cfg := s.settingService.cfg.Gateway + if cfg.LogUpstreamErrorBodyMaxBytes > 0 { + maxBytes = cfg.LogUpstreamErrorBodyMaxBytes + } + return cfg.LogUpstreamErrorBody, maxBytes +} +``` + +### 3. 常量管理 + +#### 避免魔法数字 +所有硬编码的数值都应定义为常量: + +```go +// ❌ 不推荐 +if retryDelay >= 10*time.Second { + resetAt := time.Now().Add(30 * time.Second) +} + +// ✅ 推荐 +const ( + rateLimitThreshold = 10 * time.Second + defaultRateLimitDuration = 30 * time.Second +) + +if retryDelay >= rateLimitThreshold { + resetAt := time.Now().Add(defaultRateLimitDuration) +} +``` + +#### 注释引用常量名 +在注释中引用常量名而非硬编码值: + +```go +// ❌ 不推荐 +// < 10s: 等待后重试 + +// ✅ 推荐 +// < rateLimitThreshold: 等待后重试 +``` + +### 4. 错误处理 + +#### 使用结构化日志 +优先使用 `slog` 进行结构化日志记录: + +```go +// ❌ 不推荐 +log.Printf("%s status=%d model_rate_limit_failed model=%s error=%v", prefix, statusCode, modelName, err) + +// ✅ 推荐 +slog.Error("failed to set model rate limit", + "prefix", prefix, + "status_code", statusCode, + "model", modelName, + "error", err, +) +``` + +### 5. 测试规范 + +#### Mock 函数签名同步 +修改函数签名时,必须同步更新所有测试中的 mock 函数: + +```go +// 如果修改了 handleError 签名 +handleError func(..., groupID int64, sessionHash string) *Result + +// 必须同步更新测试中的 mock +handleError: func(..., groupID int64, sessionHash string) *Result { + return nil +}, +``` + +#### 测试构建标签 +统一使用测试构建标签: + +```go +//go:build unit + +package service +``` + +### 6. 时间格式解析 + +#### 使用标准库 +优先使用 `time.ParseDuration`,支持所有 Go duration 格式: + +```go +// ❌ 不推荐:手动限制格式 +if !strings.HasSuffix(delay, "s") || strings.Contains(delay, "m") { + continue +} + +// ✅ 推荐:使用标准库 +dur, err := time.ParseDuration(delay) // 支持 "0.5s", "4m50s", "1h30m" 等 +``` + +### 7. 接口设计 + +#### 接口隔离原则 +定义最小化接口,只包含必需的方法: + +```go +// ❌ 不推荐:使用过于宽泛的接口 +type AccountRepository interface { + // 20+ 个方法... +} + +// ✅ 推荐:定义最小化接口 +type ModelRateLimiter interface { + SetModelRateLimit(ctx context.Context, id int64, modelKey string, resetAt time.Time) error +} +``` + +### 8. 并发安全 + +#### 共享数据保护 +访问可能被并发修改的数据时,确保线程安全: + +```go +// 如果 Account.Extra 可能被并发修改 +// 需要使用互斥锁或原子操作保护读取 +func (a *Account) GetRateLimitRemainingTime(model string) time.Duration { + a.mu.RLock() + defer a.mu.RUnlock() + // 读取 Extra 字段... +} +``` + +### 9. 命名规范 + +#### 一致的命名风格 +- 常量使用 camelCase:`rateLimitThreshold` +- 类型使用 PascalCase:`AntigravityQuotaScope` +- 同一概念使用统一命名:`Threshold` 或 `Limit`,不要混用 + +```go +// ❌ 不推荐:命名不一致 +antigravitySmartRetryMinWait // 使用 Min +antigravityRateLimitThreshold // 使用 Threshold + +// ✅ 推荐:统一风格 +antigravityMinRetryWait +antigravityRateLimitThreshold +``` + +### 10. 代码审查清单 + +在提交代码前,检查以下项目: + +- [ ] 函数是否超过 30 行?(不可拆分的逻辑除外,需注释说明) +- [ ] 嵌套是否超过 3 层? +- [ ] 是否有重复代码可以提取? +- [ ] 是否使用了魔法数字? +- [ ] Mock 函数签名是否与实际函数一致? +- [ ] 测试是否覆盖了新增逻辑? +- [ ] 日志是否包含足够的上下文信息? +- [ ] 是否考虑了并发安全? + +--- + +## CI 检查与发布门禁 + +### GitHub Actions 检查项 + +本项目有 4 个 CI 任务,**任何代码推送或发布前都必须全部通过**: + +| Workflow | Job | 说明 | 本地验证命令 | +|----------|-----|------|-------------| +| CI | `test` | 单元测试 + 集成测试 | `cd backend && make test-unit && make test-integration` | +| CI | `golangci-lint` | Go 代码静态检查(golangci-lint v2.7) | `cd backend && golangci-lint run --timeout=5m` | +| Security Scan | `backend-security` | govulncheck + gosec 安全扫描 | `cd backend && govulncheck ./... && gosec -severity high -confidence high ./...` | +| Security Scan | `frontend-security` | pnpm audit 前端依赖安全检查 | `cd frontend && pnpm audit --prod --audit-level=high` | + +### 向上游提交 PR + +PR 目标是上游官方仓库,**只包含通用功能改动**(bug fix、新功能、性能优化等)。 + +**以下文件禁止出现在 PR 中**(属于我们 fork 的定制化内容): +- `CLAUDE.md`、`AGENTS.md` — 我们的开发文档 +- `backend/cmd/server/VERSION` — 我们的版本号文件 +- UI 定制改动(GitHub 链接移除、微信客服按钮、首页定制等) +- 部署配置(`deploy/` 目录下的定制修改) + +**PR 流程**: +1. 从 `develop` 创建功能分支,只包含要提交给上游的改动 +2. 推送分支后,**等待 4 个 CI job 全部通过** +3. 确认通过后再创建 PR +4. 使用 `gh run list --repo touwaeriol/sub2api --branch ` 检查状态 + +### 自有分支推送(develop / main) + +推送到我们自己的 `develop` 或 `main` 分支时,包含所有改动(定制化 + 通用功能)。 + +**推送流程**: +1. 本地运行 `cd backend && make test-unit` 确保单元测试通过 +2. 本地运行 `cd backend && gofmt -l ./...` 确保格式正确 +3. 推送后确认 CI 和 Security Scan 两个 workflow 的 4 个 job 全部绿色 ✅ +4. 任何 job 失败必须立即修复,**禁止在 CI 未通过的状态下继续后续操作** + +### 发布版本 + +1. 确保 `main` 分支最新提交的 4 个 CI job 全部通过 +2. 递增 `backend/cmd/server/VERSION`,提交并推送 +3. 打 tag 推送后,确认 tag 触发的 3 个 workflow(CI、Security Scan、Release)全部通过 +4. **Release workflow 失败时禁止部署** — 必须先修复问题,删除旧 tag,重新打 tag +5. 使用 `gh run list --repo touwaeriol/sub2api --limit 10` 确认状态 + +### 常见 CI 失败原因及修复 +- **gofmt**:struct 字段对齐不一致 → 运行 `gofmt -w ` 修复 +- **golangci-lint**:未使用的变量/导入 → 删除或使用 `_` 忽略 +- **test 失败**:mock 函数签名不一致 → 同步更新 mock +- **gosec**:安全漏洞 → 根据提示修复或添加例外 diff --git a/deploy/docker-compose.yml b/deploy/docker-compose.yml index 033731ac..f1d19f84 100644 --- a/deploy/docker-compose.yml +++ b/deploy/docker-compose.yml @@ -47,13 +47,15 @@ services: # ======================================================================= # Database Configuration (PostgreSQL) + # Default: uses local postgres container + # External DB: set DATABASE_HOST and DATABASE_SSLMODE in .env # ======================================================================= - - DATABASE_HOST=postgres - - DATABASE_PORT=5432 + - DATABASE_HOST=${DATABASE_HOST:-postgres} + - DATABASE_PORT=${DATABASE_PORT:-5432} - DATABASE_USER=${POSTGRES_USER:-sub2api} - DATABASE_PASSWORD=${POSTGRES_PASSWORD:?POSTGRES_PASSWORD is required} - DATABASE_DBNAME=${POSTGRES_DB:-sub2api} - - DATABASE_SSLMODE=disable + - DATABASE_SSLMODE=${DATABASE_SSLMODE:-disable} # ======================================================================= # Redis Configuration @@ -128,8 +130,6 @@ services: # Examples: http://host:port, socks5://host:port - UPDATE_PROXY_URL=${UPDATE_PROXY_URL:-} depends_on: - postgres: - condition: service_healthy redis: condition: service_healthy networks: @@ -141,35 +141,6 @@ services: retries: 3 start_period: 30s - # =========================================================================== - # PostgreSQL Database - # =========================================================================== - postgres: - image: postgres:18-alpine - container_name: sub2api-postgres - restart: unless-stopped - ulimits: - nofile: - soft: 100000 - hard: 100000 - volumes: - - postgres_data:/var/lib/postgresql/data - environment: - - POSTGRES_USER=${POSTGRES_USER:-sub2api} - - POSTGRES_PASSWORD=${POSTGRES_PASSWORD:?POSTGRES_PASSWORD is required} - - POSTGRES_DB=${POSTGRES_DB:-sub2api} - - TZ=${TZ:-Asia/Shanghai} - networks: - - sub2api-network - healthcheck: - test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-sub2api} -d ${POSTGRES_DB:-sub2api}"] - interval: 10s - timeout: 5s - retries: 5 - start_period: 10s - # 注意:不暴露端口到宿主机,应用通过内部网络连接 - # 如需调试,可临时添加:ports: ["127.0.0.1:5433:5432"] - # =========================================================================== # Redis Cache # =========================================================================== @@ -209,8 +180,6 @@ services: volumes: sub2api_data: driver: local - postgres_data: - driver: local redis_data: driver: local diff --git a/frontend/public/wechat-qr.jpg b/frontend/public/wechat-qr.jpg new file mode 100644 index 0000000000000000000000000000000000000000..659068d835e86ccd58f5a429f270cce802590ec9 GIT binary patch literal 151392 zcmeFZcT`jDx-U8*Gy#e9PLLu9B7&gwfJzr>(n}Cfnt*_GFbIOu1q1{!AVpA`^xk{# zz4zWbgyc-W^{utnUT2&!?!I^bao0Xy7%yWcg!#6o{GQ+Qyc0W$odvEb$tlVKI5+@+ z1O5Qm3E&|>gik<7fKNn7KzN0S=n5$%DJcmFDIGZl86^`PGczL{BLj^67ANdF&vgdI z8$w(>xA_GG1z0%miQT&+cI%G7o!>XXA-Zyfl$ey3l$7=kD4x58!D- zaM%D`avTUb4z>}1f#bx-`Qrur`NF}4;DG}sxexc|ZfQtix;Nn5>@$vA$Z+nA( z2k^-8DXt615KyW=BV==+5_}u+jfnk0ULCbY-!8{pQ%Bz`#5A;Y^bDLgxVUff2npZ2 zFCr@T@R6*Xyn>>V=2If_VC^G6(bWH5$ zxcKCh)U@yE89#pJ7Zes1mz0*3*EcjaHMg|3wf7GU4h@ftj*ZXFFDx!CudJ@E@9iHP z9vz>Yo}K@m7Y+dV+qA&nf1B99%!?eH7cL$i1ds6dyl`+`!3H76!@n*-Kp~?}_{@Ql zP4F!d)q{v{d39IV?`rH)n>zLp({KpQaqj(|+8;Cfe>O4S|5r2n$He|)UgH2M1P9za z2sr=;PR_ZLeTe^Rf33k^GVqrS{3Qc_$-rMS@RtnyB?JHSW#B3m18@NQFwH)g!xg36 zcUym0sJ(;3f5r0x#e1Yhxs&-VguLwEl zGPj}7U2QkFE$FPBbwM-+Ev~qh^(IFwAD5e-KZ&E#QN3txbb$8Dtq%(3n%QN>{T2`XDzY88y z)Z|GP7ARuuS;X?!YKg`t&TScJnNs8+Wgm8g_%Pu;b&8{Gc< ztX#7+zHD-4w8@=dwM|@3)@SMpfek9VhE94)H*#qX#w|k+OVj{#Zb2D8(e0;TqYJ@fjA_cxtH46{`B2 zYd5QgKA?1f+Li9O^@@Fc$@R1K^bfa&*Xhg-O>NSlj9u(vUsztnTeq(#FETgDPvzjb*}PGJTh8sm$u3#rg?5u$H;BJ$PD$R{0>?}VnzHY#^SWJw#)Q$szGW5`&5Wui4VNF@?zbEU7+yhmy@|@aR~(g_a=x3R?)keFw_ajrbW`rr*>-nllXM0$5q z;4x<+X$!MBxwgF>yYk0r)109uMYEr>mz(^Lhcz;+lV5AcK)<^EEROfD zU?PpQmXzC9Xl8%dCDQuHsaQd!ayd;)?VA~)tAkqYT*{jK{_xhSA^S1rtBC80m@ zsfc-dQ^U0#-X`WUE-SYFcn#EHWz+QcNmT)3&sLLnGD8dY1Tcg{YA>W2P`BcHt(YBN32A0VKBT(VID)i z*uYejC1g94;Ra6!ijGYF8@0|c0tseFJlIWVT0051=2JL3A0 zmD-iumEut+P&)ThGTfP65I1;ZL(9yv_nXVC%h8V!y^<0_NpIdOcb|%>NKq3L6WA>d z#MBSDa}&;K!Am60aFZ!3dY^t6p>^DRV5&YitP%I^4RujqpfmxG@|O&BHPpBzTNBeP zdh0e_+0J-QT{;{2^3_f8c;#z>4!jdow}VSa5*DbxIkCADnsh7xrs=+e_Bc9#=hJ+H zT|qI;^VX`k_OCqWd&h`kv)R1~skombZx~TmI^-!8yJ=sDo(PW<#*U-q0@= zf}JTjP^Go=#)V9-vGlSWpdu}pokB&xbjMDo5F=t1ze{nRwv8` zek;oN-o8rXpaZ*G{DBoXc%oY ziv{RMHyn)G9FkYa>rPAwVM2~LgAtK*2W$Nm9f{0V*}LZpP5NFHNec8s20uG`9=J6vs?HZI)auCTVi-eM%CWZsbA_(Xx8h0uSc|BXi8Z#>|V)-d|v~}w{#O{4BNB2{d}LS zM5SIxna#lG3}R@kNTqZi$sTIe{!qMvE-<7P&>5W;>b|#7Uy|+^oTk;&hD!Au95%j2 zp3Jy>5ERv-v=Nn?EsC+<(GhPfw*F>DBY6)`^!3^MvnCbhRQ2zx^SC+gkNa(3v>Fb? z<30!w*Uoc#SwW@29_iws`e-U4jAuXNfy|=j8#O4C)(!D%jagM<8U+zUZq)2dTN5qW zGC~<&O76Fd4RAbc?Rxlg9saEU_V;khNN(L6ZwIa0!kntnWOK$g3#!+ql-{hKecv_Cwg`tJss$As{$17*VbpnBV#0AiPK~^$owq|` zYpufhHG_6#+cVDa^JWd=mHuBbEP@_1>q6nFO`vKH{HdC#YWA9AEYR}x?vsl)yqI72 z^S2Ke56z5sa;#m(r6OE2ri(}UOs$wMaC$vazLl`q=+Ks+YSwiWXANOTaObD;NCBbL z*_*SwX?tr;fHM}L!*D;GTjlC|E&3?FGBaP1YR?*qIAswdn^HA#yGl7nkoNPGV8!rj zTDayX_hXk9jJ}+xctQ6vGo18jp>o%?E?APcQAZ3_^h%g};d52Ole-avLS+|?EK?#q zUQpcy07sgI|3b>Gb4Isq&PdL7VB&|3;H92;%c(QV*kM-g*rzl7;|j4;s7jrMU`u_!L(veKdQkGVjgUQPAF*}+)OL@nF;GJAbI!BU zAlB+guf6*NB6!CuYtk@fS++KY%9mN8tvrrIM+B+4T$f+6b2RHpV(+%y4D!tXl_J3N zGs!$dQm`=$USa%8ObxeBmI_e+AK26^lX?lGEyb?pL{olvVicg)9713IzO_;%!?{#x7l)t{*D@II+ z<*A0!Kz_`mE6bRq4*-2;Q1}OS!_^U3fZ@0yo0>(1g9Q&7?-a-_!R${IndL`Bwv1#s z0gE*Tt(f3+nRKh+4SO5htx(!u8-x3b5|N(6{#5_N1@P;{)XgaS-SGB=0bI1J2z3Ni zRwSOnmv(bfCa1HZBD0my)2)JuFq_5aO5H8x>ECRZu7CRY1gd1-PZWmJa9`N?{B;0Z z_nfOMUw=N=9!o6K7vm+p*zivlRNlrKtNfOsV+o&?qe7TG1|6n0XWKq{Q0f++XQyUg z53*gK^nG%3!!hhpBE}Aj-R^+Yu23>e)Ih z^YTM$@yOA;HgVN8WmR!Yr4ezT52Bi$=IWzZ?#Sb^c+r5-R=NzodT4se-x z5Knf-535%aFk3H%sF|?v-Vjy#HD74PW|Yoa+cta^|!VXLMP-?F4PkP?yxS7K%EWOsiSu zIO&Qd7>04D2pbP?ll>zsUS7_xH z6P%Dx%>GcrjP0`y)shenIh{v^`3210I)|43C{wM)FYHLrEJ%z5|4{=z{ZY2d^T=Bv zFUl&cP(nnGHgb&LiFELSmaD&CLi7AVSX4Pje3gpj6An{O#5Z?3p0KPI+>GgU?Hk=j zC9e{iM;rb6b5!u0r2CXAqD$Ed?1Usm`O?gb8KbXsgHC7Ihkg!u27$R+zpoGbXeC(K z{WIC@K%n*_Q}guciY#nL__>>tLcsCVa{1(iIq%YT?JAk72QWQwWZ?ZkQDeq^9}9d4 zfgEOCK2&ux+U6QsDG8J3AbVA{SC?Jkmh5}yV{&uKFS@L7dSojfd?N!3^r`@Ak>kT! z6I>tYKOJ*mfxV}JHnw!csOu&vNqFE}EeR*Lm!x+Lu|VnJ0e?Viy?@U|xb)Sc0hQsM zf{iG}lAOEENQN~}9nm>B|CU&66eibW6AQTVg;fvr9VZbjg0gd}r5>p&&5NI%T=L~Y zQ#tCy;7X(jIMQ>olEW>7?QaV_v(ysTym=|a)srVxU)^Bq^=X#Mj&!)Rgj_WqHi$DH zmTfI{WF+?i@Ngy9A6v4NB%W^R<_+^{KCLSKq(t-V-F5etl_vv7pI?5DFkaTnHuyvq zXQKLY+2_`&%T+PuxEsF?8XE6-)eCfcOGSUn(i>kVVvUHlFnO#pg9V24SYU&5Mh`Y# zef0jRrnz(?i4+?eI{Z|PtJ@R%%5TXYA&qSIxhu@QJsNf%HK}cY^1;BsVzVT z%&I|Ff7`Nu=O(0P30bn?c5OBEQGZks_un1-{6<~tTe_-fC!rAjFE708y^ zGjicSrg|%^HK?u|EDQ4|sNsr@uu~s)I929N#P@k?vK~n5Mj@iQeM0udwCzy)#pGIF z)-*bJr(4-VAn|L3D%VYkI`2%a8wjVXey%$hRVf<60^~;T17;(6En?K3wvC<>4+L>C zzR_A3Pk;Ra#@*7ZeOpJ|OsQF}hClXu=d{nrr=J!(&bi8aZ!Hghyh#{IFhF;m?imRk z%5x$=*pGbv28QY4rS)r;1`C-$#$+6uo8cFi4i>{KqPxgn!D_ngI zH(y%742R77vu(Fh!^ZLD%z5bc-C5G(fy_C?84gG-#>dUpoDnEv0i$q?Z;`!BK}^S! zYliq!r6@AFs}=@pVod>uc@^*5x!~~WKWu|_Q-=LeS{`UthFERS&XJ+_^~#Eh3i(|b zlrKj|AyKDTK+I2Jz>Y6xrM&W!@~s9KfBK>%K5TUDM(T);U|g)I!bB42N1&?RKG&7q zZuBmd#;TSCAvjwn=nh5LLxNre+CWxqt{3>aS9{sX#IU071{M9R`}w3hCbF5;dEC`` z#cw=o?cR&_dU4n5agK^x3Clh=S9M(aWBpAG45G1s9z%xVLJbx$D6$19r>%Qi~}II7#U4h#GqO!6Xh zR3fV>mHnG!jS|q%=gdeKxv^zu=AMAM7D<=~nH(&Cl5YEAKwh{vehfK86S95&IwyRy z7z-TK^It4#OLi5fK7U=}+xkdxw243(%(V>S=rqvAd|zKuxZK>Q@M8{8H6F^%F(%== zr!)25K0+!TO(@jG_}EBll=t0TT-{QHaqUslU9AC*n#vLvhhuY5%O1qOhZ@22;SGM$ zI-FZC%IbIr7jg(!Z=Rfx8@g%0xg(a&tUIO@vTwdVyD{9qZb3lz$=zBUcF?6Wh!$mT zkcjDbzl8f4G(w@0X5sFJG^^Geo3=zo(LzS_d{ja~^j-E7jo*F| z-iV+iB*->koreKm%W_-nO-ER5N`$}bMf-dpSr8VAcG0Dm^YJ78arMD$g zVdm9Tw{_+=w|J1S)zEF+)2==w6`oTz+6S@BrFK3Q+`#I4Bncn$dBVH`TDShjVS=E z@;GIyPQHvQs`#p0=So8h;r*_%)VvtkWWeW#YDfYJmW@J?f_t%1o{f5&Hn|@Esh&~U zvZKA_5Hg7Zj)WH--}LK?iE8!Qe|URy+Xl?*QKgz6@`d?HUg)?k)G(}}i26b?8B1~dzYKmbEg!-& z-iZ%;Dbqq}PnuWMbVZF%&AxUa|H(2I=o3^U?8O2U)!b`RkeKQ5WqN*luCwYM5}-A4 z`sHEUtjEEcAp$Lg3J0^omnWAPrFo<^^o)BCIV#0}d1a$51F{8uZw&88yQix-q}%&? zx=aWcW|6ccmQOkpj*PGuIZ53)UHNuu(@4wk*hPmSc!D{yjU+=W>NE|;TwqU&9(&NsciYHA8#O|Z32aaVt5psGu>7Rv2bjf1kXu#X?3*7F zaz~GH&aUjRyJ|Ck$PKVrM~=P6C}v`T(Fcf2d~V3_-w6xe(9`feO66VVPmVJIWo2LU zsIwkJO1CEu)i*YpT(k~cx1M1EcA7veV4RO!eT~zHY*1W-9rDj0XBKk?=1LkbxmqF( z>#V3^TwNBBW6LIb0~Z@%Ni5KGmZH5WkDbjTEWk;#fvJlGH@Gzevd;7WA$NYasLQzL zLI3R{6^&AvaZQly;)W8wYP*d@#F55X2ClS(`DAqfOpF7LW>wO z?6r)$sf*zQYP%#!Q%872UEOg`PPOLcm!mJi&_J4Fa;Ay(R~=LtinZebvL<2{H$za3 zBSVG%T{%>oT}GIaEm#$0WEZ&G)6B5tJ7X2ku|JVlUy(0gF-d5maIz*}FL`6c=rk~8 zZQQqDqJ#HthR9SNeXUeuMwfRim3kfVLtMN^bXw{@_<$E!j!A4WPiRk8gTZS6E4=#t zcn`fK$w$sUhNF$V-&NhTge>%&YQnD}SB;{u04}H?^Pm#?-`yj`_>aMNZ@@6Os~eJi zq$K|3!|;Fkr;SZ9?*F(I5`6_MF9}Y;Q1vvz&S}j-TIe(KwETiB2sl0Xmk;4{kI^z< zU7CyP-~K80*J=_p{P(9I#qkZy)&DVwKl+3J#~}acZ}w70sk(0`?bi?=M^SlTkdPxO zli$RkXBgD_8$$WbAOmOcsd10K!2B^|bO{*|V_zKyi)iz-q@c}mJixmK9fMg$H(WE(a zkg!)uvnYpz*~8p8*lbI9jGX%1@k=DdWP=|T2p<`IbAON2m}v!adVKCS&xf;85a8iH0(h zukf}@CWUI!0xFfvW~^HLltg{6I?(c5BH}Xli955&8;RLka{>v-{>kil(18)*vHu>f z_3?^fLBSVQzQF!*wn*C}4Fh`SS00D08^*bN;qzB^S%U43hriFCpOzbJV*zb{hZwt+ zj9KR+Dx8BI^v&E4pjo_+!K(wMX2f+DF$WJU3}-Od&_BwzDn5Vp;*o_Oi_2^C+HnQI zf+K1b5an5;;19_Q%J%hg;R)$a}NtRpsQCFRL_>u zkh9{*z1KKv$O6#CY#G$Wl>2X26_aFMekcxlNH;{{*m;FsboK7rFvnLC2lMyqYG~RP zuF4(A6&O9~KX)xx)OZ^5!-XS-_s$~apgFl;T`NoauJXKOOZdy2xxp=M-V2KFifnX2 zvZgMxlpoz#!@Qlk2}!_8vas>tFY}pG;tj_!edBrx!<{1vMvD=;@9pS-vRN`W`UMey z35LEI{sDe?y9R^*@b_Bx%@BDpJI{s_evkEbU^=0AJVHTNHy%vNephW)qlNZDj74@kJqoqEUe3z}JT)aEOQI|c%j}^CF4PEh{>XSb!YC_W zRO9VdtbZTdUWwH5r8@zt%?McYFxymzZL5+kpu{#ja2vq5uOUv!X3$1}x2mzs-o;#Z z-uzMCU?E=#rQVMZ;}q0ibypnoSRGa!THXwGc}bpbWm`+}KM!FbQwyw_8I~&CI@!gsoI58#KHsfbSXkYhf z+ju+h{fVHoR9tekK{zP-J`tq9tn7S3Yo%*{9d@su^VVTl{hC-9@`tlcBk6Llm8X85 zOclU;V}hZ=Df(k^K)GsG{h&I_OhH>%e6X_<7dcsTUV1#6XXog#ONdZ67WhM!;Aabe zm!%^~>NzYxBz1Qkb#qVOQvWiGafdv#5EO5Ao*0w5&x_QAc&}MWnHD|$|0RoS=-Q-8 z4~4kttEk&{b?z(h%{~d@cy3C9%*RC~pir}ryyR+KED+$omySN@ui0e=3w76|n7v*P z7pc0DAPw8dtz}TRs%u+>3UT&b-(!eQ z`*(DTgVhI`zt~jL+nL9R;&Ly}X$5lizPdXYRioc)W@BI;`LxN@stw!+c2EeslLP9=&Q6MD4Qp79{4se z)GlYlVKvql1+lr7M9{`Hiqpovs>F?h&*!$%j0irNh7P6l9V+W~&1%J19O4E-Sm?GHf2?wU@#kVkDhKao%!j?jM6rU6YOE=7^u9Ps`6v~AdBW#n5v6pw7jD? zH$*#VPZVi&mEQari@!G;uC%={!A^Y~Xc)LsK=e?9!~#eSAIa*VJ?N>GBn;z!fdwwk zKYI1$q5!XIpT&)tma+=bw3IO zYO4q^-&FI%2+`0JT+S7G#GWogI()xLZy~@G`k9c+ius9XOA=QR;K2#<#Vo29YRI&Ux_cab@7(*05J`ekAgXKB5f9!BBln@ZJD1L-7)7m0_=UK# zZI(e-j`c4iUAc?204j4U!e^-3lbx`Hk#w}$E_Gt~>qkW>Ip6&Wr}0iPb|wq%N|)+%yG1q&o|+RnWqh`X>?3G)I!{!>@P*k z)3chqW<{k8t`xn3?RLe3MYB zaBfC5h1dP}wG7Vn9S+CYa+?^;bB-ImPnxtO1tJwUB0Wt@XIRj9Vk?0;&MK6eLG8RP zf?cHRJ%PHBO$wW;B9d1`7I1x}9CLrvynvqF*hh|_YtD&v$gu$FSQfJPO$+8y($nIt z7w5CtDqKmf$AW?Zf(rZh9*0TeSCx|ya6?i+n-6Kl0@jE#F729QO7Mh+YzN}@$OQaa zCHwOQ1es#$Okv2TDXLwjFWA0|QBY zvVa?W%K{6qVgWKNFro+EO{YgPb2B#xgP~_3FsgGMf?JxYNKnaaBgQ7q&O{LTnT7UBO}R z_=HRSVOx|n{NV;@+)?GQfSP?Zv@jNUrfh%7P_#C)+f`n3Nw|sJ85ofPYl3I*x_LGZ zmlv1(X!2zKa^+hPzWL=U>km^;NVoXMyUnt{74`p|?SgS*`e*i^-Tq zBy;=?rBbjpSE3Qz4fRSByXS`Qcv3gD2*2ZS4aP|3g&q44pUEGCa5VB8{L-Ti9qMg_ z00&UB{}kNTTwQoX5|m%kGU*76As2U5(ShT%mwsEINz@o_&9VoJe_FqO134zpKFadF z{9Sue8H~70sucfj?KrNdXSl9NNp-hz{b!@|1S+NXBnfJiTq%!XP?pC6N%+5Ms^63t z@^6%w8exgLF{q_~T4Zm~B1eY*X^eL+(4TO&26aCCon`ui^hx=^w<$DqvzSraTU(SnKUKp=YmyN*8*)(U|e`gK90mFNg$4)??D(ES&8zIioT3Tn`PS^kE3Ff84ghrGBuoC1yX%78pqwE*HK@j3e(A^OWm5PT8@0sRirETXOpJG4eq z$`1U2Mdt2?6}Mc&8GN6-(Za)L@i8HGjtxf}J$I#UcZ=0_32g9>*;l+4F%}(&=Sgl@ zw{@5zVd^8bvD<|%&kZ=Nl3P-LQCS!?FC8~d-zxy&NO013_MhcHBlm)10h12K5K)kMog-S5PV$39h>u+C7Ue?jwKdEw73kx^7X#U>fj6eC*kw6FYA=@w zMVNhRkJ=zi^0m-2UJu$~k1% z-aEf91VH_sLlnLbEt;Y@?~v`=F|H^1fZrHd+r(+g4GvN;pd!-~8zG^vK zvnvG(VinhM7DzC4fHB4|BxcZKF9^yJ&81#w7tBeu1%^MoYN6r|DUrh&g{xc;wJx#anOS_~X`+!Y%jO9l_XmSWdtUS#LYvbIu@~i>3qJR+A2hGz)fuob4%2 zdEA9F{hayUEl=Hec2NFK<9^~iyGW1b3|IV_Ty^wibj_k7XeBn#pr=etg3=DZ;c%gVhx*;V7-M!MbJu7j z-;^P&a(b;sI1XBB;f@*#H<;3}Fb-W=`G+`(JRb$qvX<(*Mv|{)An?UcmawB{)k}t> z$v;gceF|7moV!I0%7hh$M}V^M`_9(~d< zM>j6aqb(9l9e8l*{HBe$_I+RE;*;K*O&Dkjl4FW3A&_~d{D-sfYaJxz5+2Vxc(2!K zu$dAG?*X|bZ#cj(RAP_+(ihB*XzDb2uL6H_qW@spFCNa3=Bw-uNJr{exD*){Fhx3g zjx-$|+t=luabiG(nyZfP0$hZtTh8O)Q}Fh*j2!r28)glxdEXTV(E~FW8(e~3&-Eb| zC<(vhim9&2{Rp?%IpZydT~;F3B#MqM|C90kXG8p7z4n5D28GadmlC7Y?T-QB?G2?1 z;%dp$E8qYu9)R!@aY@PP?Jte*7QTf5gA@#Cs1X{-RqCywd3X`%C(dD)-in}~P$TUA z=f0_Hb}umUJ6FM6kj4lo@!2NYpo8BzgH_)D4XzNE423wym;b#H5(vDUwiXa1?D-{u zuT5m?2r#$dd+RNy6V8(9^;;eWdMJ8{i~&a`=4>8bXd{JGK($yBwCt9b)*P5x;S;)a zRjDZR+6p2mU395!tM@}C47aW!@U2FiMrZ1rE$-Gy+-4Cq6EQb&BoaQ!{$mFmQR2G< z?-xE58(E7&3 zxcrh_zd&#BZG7Bz91_FqtydK_R|ln2O*^A zI7XLLI7_hIo;M^Qog1Hv!2Bk(AJvYPdrx@@fVBTk#tj zmA>6m3IuwKDIf2=TE-bAc^nC^cyJUbH9Xy{9qSh4?0!erQU?)PYrG?1aCZw z9-cNJj8v#Q(~IuC$v$ZJ(pv@a7UNEO{r5-5boTCrWQqA9Y2-1InpP<8USZzoqJg5Rd&hJ8+Pt2ni;h6s+0&^0 zY6dhb2Mv zB@Hw3y0dHVE5x|bF+;R|%{rEmMwElrVUmt2^3^@>snMMg`{80QcQ9y?*(;y8T|)YH zmLs)SM7~*S`<2_ysZKYnV5qC9q{)bf`^D?9HLf6Y&9`hHjC`b09DksBH+}-(kSuEO zu|SWC4oG%^?L*E?-5ZL2FZxvEi`6`lkg|xyXg@`EmQ88;+F6HQ9T{1`eBGG6Pw2~A z`%D`#?{}Rj(-x=K5qTO4t)%kwcoyTC$a?!Z_&LLC1ey*+piMfhH57I^{1TqIZO(VQ z*k-!uEJUwswP%2g2h@6b6|$3NQhQB?@T0s(W7*#Ofb84POUm|WqDc1yVkAPX2>O6n6`6ZiP)pA1GHSY<4%rg(-3g;ha#% z23Owqc4jV${UDV2C6J7QJJ;0JY?bPsinWT6n)HDRcMx}-s|zbn20cequW2>0BEx+h zIi7aMR2Erp`n}kvRUorSp44ggU2r;B;MGOhCn3@nhn*N8(E~Vwo~od^z#vvG6r|I^ zrQg%Cc)UjznrSE`_9X8alIQku*!U8sNg0IWri6Kz&0NTc>TdD6r|O4l7EdeV8=j}8 zrlG#-3A{ItStAaUpg-TRyV*AwU+=qjns1q%{mevrZCN+u5$f&4TU5fxm+$~M;fEeH zX9RL~eg@48Jk{3h$z7BM^jBKFSZ-mVm7tCFSDfFTxt)9^HkR~<}q!9VnrT3(& zhGAE|L=h@)I!2tjW{n<*rn$41R6P&WU1ZzTkVY#C;cx{@F`)&@_o$be3$mNO8+T&N zi#IZ4$%a{(HlIZ#tn6i7@CP7>FcUy#cI=ib z%A>!dECdzV6Jksq6;Cprew4xU6~%K&f>v$W&KLQL@Ga;jhdxN=u2mB!jxVk8bB(2) zAw3iiZku+Gr@^0G3IppIyaC(_C4h&qBGZ%6qO__sXqu?mSTBJsr8?Ix5D57hHy&d#3?IX=e81B@ygB?OD?%ri7(h=*$^z92=0uH^3;p`)3wk8TlV1 ze&tcJ-*6Cc5nBh}=dwf|LqR3(Q#KF|olQKWEQDVWf*5}_Eju1q`B&5xeLT~cc*cd| z1o6624<^X^G}4FSpPvj=-E|7GI2<(%VBhQTxZIG;%^5$$zA_q&rHoU~;(<+TkYyWG z24R;LpDg@55D6kNA1Q+0gk8;LV$EXCD)M*_3#9Ikj2!I~{wpB+Z+i?8)aUmh#cd2O zvQ4kIW+oO3JZeKc(`l*s&x}C^kV}4<3D)HxdO@hau6l`ERbDB747&daBnT68f55?< z_*wlwuRsX~hHSl@tGOTsf%fW97V*p}NW8G01c31JrVSQI(e{y=`90k>ke>wEO>nZK z5BM+fH(H8Zb zpNC@s>E9eWc()IyQ#O(_^;<5yy2e%#r8Cx~3AZvj9X4^`{$BP72>fjQO>pN&O0S|eqjr_a6`c5@NZ`xbWm=&3xxg>0@=!G z#)?i(PDCtVKth(6x1Jiq^mc@PVTf zVw52*S_t-!&O3g0o?^u?{UysveLdcJ(n(LDqOq(NQtdPVYT51&Z8@G=UlG>+p#mbN7%@i)78X-gTN@-G*=dw2Odjc~=gMC^&r-5Ij2?wC7$ znSMaf5tY`6(sZ$HSA?l|0Un9I*-J&cZ-VWe7-fa--I3>bc}M-7HIdZpaFZad9=lGc zA3JI0%~sZv^j=(4G>E#itQ?&sX?DR>{&QVp_S;fMO9drTrS!SUHW8(CSM&t1+}aKQkZm`+>z}3X=Bu)=pPeuLM4It97mvwI(ru*( zaHT40aGRJ952&QgkbU;EO(OQz7j%VN!{-*;F?RY*$xH}_fwm5D(4z(B+@AlgivcUm zvXI25QIGdtgwv#_OuGM45M`!^IzJm7E~QJt&!Jt2&N@Mo2)Ayrx~E=YjFyQidLd_a zNPGpZw4IRz_2KFMY9o1cNK^c(@}P@VVL>M5g(N_I+_tOZPak|g;I*k@+l~6$$i4B% zmIem0xYxG}r=ThYrE1bKBnk_!poq_aH5u zC@QSdPk*E-liuE&xYhr-U65UqB!jEh?l~;f+sW8zx21U+e?*uu6Gp5npJqfwA%rkf zb`lH;2~G5*-?`itAMOcz!?wyov`?uUqaE+G-+=qx3+c8Gz(N7%LY=6@amqRijNW?9 zDOnBL$OT2R7Zbi9d243XskM!fD)Oe>7(0B6^n}Ux3VNR#vY7bcxP%5JF_Kc@KlgE_ zbI#Bt+TvFF>Qi6I%!J&sA{g0fd{b@d(4mnO#^f7b?7bhn_;??2Ocdxj7N$WZtS6u2 zKc_B4pn+g45SRoNIkiYbB#s?u7F=-5SXLHR-jV&a{Yk>&Mc<5M=5nldXe&FO$HX_Nz<>K`oB3BG@sT+Eikn^gviKK%=$iHjc|vNQUUOLb z=-UVlw{-3PLrLQ83j=c+_2x%E+Zj9Y%uF;-0@uR3eWd7fQs!(6PmtsilFWswyqsvk zUor`7r7yTfxj8O$et|RWJq4Z1gHMQ4F3^uLtiFM{fjEBn$2|V8-QifWDC8@D_38%Z znlCdZ&$k^CSjZ$Dq?u^944ylHu?k0~$ zo@vGPW^fH|z4X57l*p3J$UJ`srYneQqO6nV-g*+z_Pxt~XJPW1O<|XXV%FN8WmlG>E5+Id`RAUBr7_S` z3~!)#yODEAXzFpvr(i}PA4Ks_an{drNiMVaZgUQJ!JV0&^giTROO{Ggg+-sg6kPgw4<6rpLYab=d49U!H${arQ41)i|~cg*kr>VbW5X-O-8cN zhUuMG&Qq^i*vp{(_zTK;WN;;H?$kEB(#W1BWm#%C6JuEBPj6^LeL=H zpFO!GR{PT+;nr+^u@RG>XG{+qzT)*xk?2e>wsR8acoHslGQL#JfSsY>i_*89ZRMn6Giq>6eTH3_AS{`l*-O1$sR(sVJuPhEtISwWSL~kPL?6EN60#4 zXT~xHW8UXE>iyZi-*cUFuJgUl`ThRrGFKNf^LoAR?fJML_v4y8Tj4Y^gKKj&?Lyd;h8QrIKNO9fFpk%o;2k=t~q$a|F1Zs&` z&uPTop`W1Od5;ber23nGO!+DF?*$<7>wlDp>5Q%K()O>{gO-u*z}>$fxrzJzq+FUw z+y50P5iCgWe~0M&j^g1X^<>q37`ZNsLXsTK?;=YQ&uq7*vWLoF%tsh%JPwsKf2Vy} zUm$$f3P@QfsAaPB-kKC)Z4zo7=NM$mxNu`wqlp(AKzDeX1x`B6BMd-U1gI4oj?@#A zAdM|-g=@rXa-frRt*#6dQJ_U4j{3qh4I~S<&?LxPN)%_{x)VAM4l#T!^_xOV2qKjv zkZDu@Z{Ew1qyQe_UK5IN61-P$K6q4z)x%Kp`T&2Yb|jyx?hdS{N2+*EcO1K`4+#0+ z6tz>HuovBKUY%FD@Zn>7hlK!JgWMd%&qtGjSH}HE*NSE9zjU^g4*ds*0y%-u@NK8G z)9li=w&0+h#xwD!nL^EhiyG-BE0Y2ATGS7Y4QLPa+5LAUzI0S9s4CP>is#XVRRd}zsRuoQffLcnygY89y8=@%(pyZZ;!+Br zuf&Do5#ZT@s^S`@gHCyN5}T-|C>lEsb$D~dLH>Qy!a4R8#LMr3$7=g zrO%W|(=zD(bhKlt&~RKp&`4G?<_MHaG=R5u9Rwh=OCAs_+v%$KZ>WeMrgdg`u+?cCs3UGgoHv1pPJHAy~e? z(p)OLIoJq>ti}x^*YX1kUYg$pfXRqXqt|%trL=q-)+isAtj9-A`)hqmun#^Ag(=yQHRTT?oKVW5e7)h!{p~KDyrsqXDhFAO}Z~48gQIfLbR?PyuE8Ld}U)gvR{NUAK0>{&O{Y!g6$OTR{AC@MzzAJ9(>ch-x@!bsy=MpiA zRfUo9Zn-127kMO0f_@3!XSwnD_Kj4kxxmyTjhHl$?qLV6P)|AqD*WaiXyqT{LTzoP z?H=;>@kA_dOvxL68E32Xv4xei=YB20&|7szT*J<>O{QsQ*1=d7T_L4^x_5vuVFL&g zYyad+a19NlmAXGKL!Gc@JtZSiG4l47Cy&92bNYHmMMD(`SK5{ql6IEJ$0=PjH0Ywy zPLOUvTooaoi4_?>R=~-a^`cfp0E{n#*xh`w!_iBmpVVPi##0aJ4~XX%%5>i{BD-0WarB#98Z)%-O@5C_0@;=We08n*9dUqknec(WaWO)cv24q2$iM6v z@i_8Ks)}v?lN#s_{gIln>f#|9o;Pcec5`$%#W3Z$CXQ}0M2lizbo}t8OfbHWlG%k4 zKoE^`Jcg=wA-zKj0k>&FoU*N|Elir1Bs^AUPg)%8Q6p*5rAK5eKG0| zDUGMf_X(p-$6X9Kj8Wr73&epPv3KYgA;pkMqP-ja3@ z+C?Y5-)q&L1wRC8U!spkk_{r zM+6$`P7lc4OIl6L(a9a@W>zWx&SA*(FGkfJ|0lbY{lle-$nA=F{4`?Q0hL^~$?btz z?wvYj*F@kb$|y9fjkTpa8_fPHYob~t;St}3BTo)&>csLUxz`~g7ZPv_5$^WTvuajLHWQdJ18uDIU}Gw5C0Y#)vlTJjYBiN4LP z!SB6+;9Z(vKt9M^)HQ5RpD+y)cr@r#if^lI;LVllHsOLlP~vi90pKx6e2Vi1An_r9 zcOGO*N;}j7M6^a^J@8@kUgHVebB_{oPz3OWgLrn3c)NF5L3j9fw4popf9suwhU@9- zXA+4ha;^G?#5y?y*Wa?+nx5cnElKG7`R2)_wZ)z2BNrQ7yPQOQsL(|%)7`R4*(Sp#R(e``x3{r416?WR3{ zK~}JTK{RL4==p!8J^#OXO*6?sDa7jkofvu_zt*6$BP4Tb4}|^sH$ux7AVz-^0{-Kt z+^0ez6v3NaEFqBUbvn%wK&N>C~UV zq+5N>inb@=2i3(+{pR&oR0Nj)j#}oq){++D?nAZWLs^+AB1r$S9-Z2@A|a{b!UPzLfeu^1lpa(_ocK?L!?)B=sk*bOy-$%ST5z+3(c(^_w9Y!QedL`&gQu3LT{Vi z`S*|2V$|^#SDEj>}MTg5+SG_cG0ErOLSqCV+OEd8d?>*&TxHLIz0L7kn; z4oIQYkYriGeJ^&PU$$Ib{z|1J%7A_6tA^C|hj@bRH zlkJeQ0SBG6DWw|@{L7XWL%bH;Phs}@n5IH+3}sFePCf&^pdN+p3fbjyedpizTNetj z8zZJWbPjpgni_2x-K}m9;61W&mN#D4oAC%gf#-A4>!DcRfwXmP2h{S}Q{=D&KdE{^ z!LtB-X!ss!tA_~1w&P(Th2kl8H$%H*TaPzgyx6@rt#qH4AP`RAw+c_%sToqiO^|QE z{}n+V1DNl>x-|{F#cL%kxRm~zg|5|}#Jgbgz>Au^f^a=vUx_IJLd^HZ9annpx(#_R zcML#}`MuWqVEclRb;{|mbUE3*>+S9Z#-mF~_gE7|uMVH{A08HKkdjyP!X|Bx5P3$) z3gF^B^e1R)A(tr0`X=ng)hJ~xRS4ICJN|o^|4nxUd`kgf`u|3EoIRjB!vCf_ezQ0r zguwnLggo{;dJ{%@*MB86P+qf~{0p)OS`My%K`tcxeOLsnLcq%V|C&{J_sxxz!u5}n zfw$7>M=s@iW-ca-M5c+UxY$o$cYVmH{zR7NxjUDB4;j9nQ7X{S=Xl)Ew9!k-**@Sp z(&86wO!%%#l+jL6te?Vr5SO0gtVkQ*Q8;!KMSMO(j{Pr)+!lgF2bfWHhJYXXzv1u5 z(@)PecSP}+W|x&ZT4fnYU;bk9%c4hP(mC0c3o8&8YuA0=I-T}EyBO$X%JiY~4Whd- zzd2_XW*@J_Gj?!HbU_>^sY(VRIrHoidN6W>G3bO?=948dxdL31tX)!FS7HY19yx}S z6|64GDRrcN8Mz1}J@Zp&Kr0g4arheVcgoy_!vVX^H3^^${N85FuD2?~z|`L}6l{&) zBw1O0812qoq|Y#I6Cc5Xh5rZR~!H`tpnD4_em@X~bhN@cv5h2Y1ZWaWvDcXsnRw zwGPu!XO{=XgUgV@GnMv2f{;fRxvgqaZS7lM0XMj$Ml+8at~B6wcIM=QDBzyRWj_UPOWfaYdyE|nCit{ z%t)phLr*)cvWXY-NiLtm>9C?)z|4c$B1U$(gL7s*uMtLM{drbKC3EeqXh2=17X)GX z1?N;?HzEGwH2!vJ<>7z>G$E$b`Q%G&Z4Gt?nZUM{$gPDfP>;ZU&n%LeSUH{r=#G1r ziE)-wy?atl%Mc%gXI2&HL_(fFvKXwh9#XVQJC4aWxfJ4NrI1$h+H6`k z)SmwW8+HvN>3s5M#t>M?2*)zxuxf4%o53Q?*YZfg878!nq-d;8-l`WM z-f%FHyM0AO1rxAZ*Dp$5P6@fQ6p<;h0x-*xxT!ZdrJA(10fnjVOaI3%y@&PR#jT&CiP)3cxd$?jr0sX4=-u0*)97x7 z%ec@jcSL2NPtxV`(C-~D9_tnuL|>+kYgp8fW^1g(Cr%yO4wtYlOJQ8S^6YEztz#Vy zw?0T&@}5YuNS@2Z$ZaC)NMmcuxe+QN4@?@=FST8>1AP0eNv+Xb9NM)k++9&~jCLUq|kQXb3ak>r7RFCr8-%XJS1zG3*L& zKxWbJ04}B=O01Q$$TPCu&b(P#{ANW@5UVM9@?my|Xk4r}1LU%_v;aVJ{&9sIlq*#D z^6v9m5RdR@d|IG`o872uz+{OkZ}_a?tI3btB@12hb{|2*OLjUZ@I@q2_*_B--H6A` zspE?GMK64$s(oqEA2alWAkj#&!3RwQkT^zy-!eG5%S{goMB|w(E$ZRTYU9wG>t5!5 z9yuhjAu!Nd6c4B@$G{0xUlX@9saKZmQQmYHe`vPg*&uV_*TVcnPkyyS_huk;o&u?_ zvC>>n(j`X#Wc4%&kZDvXe|87gkA;O@lCyblu%w@X zO&v-L@sT(Pvn(6vR>v^kbA3ol=S=HU^zW5(rr5u;s=ax0|C2j41TqPc*A(D`z*X-c zK&AF&jxxZgLXVmhhm%yj*BaJ1YpC|Cc%HFnJglXfOLk6G;ufUo zwWjdCE+bv0ejtsf>{ggP&1kEYXmitFS(<-!@{0H>DjSI{L@uMO>e=Q(HZ#Ex9OdGN zdf-eXb;;)>_PnKpp0mw^FW-qjy|(Ipj~1IuJ&a**6_pw4%)v@V{fL!Q<^{w}N+f_R zlH~W~m*xyA$YV7n0gcA4g$}1Uz%FSNw*d)y9vgt&s&YUbg_Q(K$yxPH9c8mIY`<&mc(<0*#2 zw`Fmf*qEN3G;sUDJnt1v0Q-S+82%(UH?Yi1K;(S#Jvwv-9NfOe{aje#q)(?V4Lgmb zyG=dguy`dVQdHCjls*5YfI9En_qA!-wX?%-@Z;nz4uJGgl=GE*#gt;}CXO7Pq!{|J zva-BfS9tZ|&di;dmp@LlaZS$oL0V&Y|48WU>&}14kAh#fdtwKS~`j*VW*_gJ` zaUXb$8}OK1P=5r2`ePl`AKak+*m)fKzsE1-M-yZ4zl3#e#NEQZI(3}$xsdQrOh@eH zrn*9QyEYf}!kxGf+wHZb%DDVyBVX*Z?G3el$AwU#sZ|})bnI(bY}fO|0DBXe-X0N`oH3zvTVA8v9lP!k=$F`+kBO`CuzMn6l6mP!#}dN z7lS&`CEBYt?JFp&y)dUeP&ha}ID1v&xf^>MNqL3`lZ4;iPZqz{^5Uh)y%YT%+JZE5 zQgy`L(7p8<@20;XkQ({-i-he&nV{C}lfpHlH$$0KMxV*}d18EnYl9C)0Tm4QbLwZZ zS^#KHtltzs3v{y(PBo_m?W#xdS0pWJUPSZm`1T(S=!S0}mSaj)&zZIh2fk_F3or{s zA7G`iepmj2JnANbr~==2sUSbq@4;q2Rb;H8$@Vy<72B_KOm0KraaA;A-JFz;O{e*| zyCpIBWcwrp+xJlc#KgA1X~jJ{Q`G$X!tN`Ib@eIgE{=!B3I>PnTV zG_MLlGO79qx$j9^yO@5v$kDP=qdk+Q%n~4C**=3EOu!IadkWXgljihls)D%0zVv(neCl(Zyi%*`^0H1AZ|xNZ!f46eum!O$)yPA%6MO zdWZm#%e)=59^8geO_K8yo0rD{gpgGJ`+@YnANE z4F$3v5w*epnnqyVz$N^Yd1&>B=xe zTsne}H~0edDWli`L*%bowgY(xXj7d$7GriA0Q2$PmkND9cl#H&fly1#=lPrQoxYQM;Fnwnmx&VYqaS)%xy&Q2}q0 zz=g4Cr^EUP=OhTx!>#DW6q@u(2PF1C(+S=Pl2~vEs3cg{_V;f5#OX8&(|p+_$rRV7 z-Ax$%6)fF*t#yaZS0-ju9wx5}@hYT1aY$!;H~B_+JrDu20OqENuoy}rsSxGgd35NR z-pYy7uD)=+`)J@CmQ#zSRf(tix&C%H)HY__};Ax72ty-zy}|j1L1TF zMoZMXg?sXX@lCeC*G$(>X|-b5L*148zzljq&%ecd=&eQ72+}-17zB?p-Bku53X6_f zTK(pDfbC*U|GH$$1)7LwiAS9-S54-Tk~FfdAk*g3y}8Y)n18%m5r zJV0s?_1_T}t)D#MUUtl!{P~5-<^t$qPU9=Jn6`bH!8@ z39C#%k6A!X;>;v(!J7y(C~jTUkgf6P-RL@ zgmV-U!f;r_`hEdByIkybP5re?3oM$@;zlWX^*i5?_RD>;vR4gKT0&-$_Gqvl_Xq}WNBZXE+h^)i> zNgmh`!?D+g?z3NX>CvB{$W8p-zZ8#mY?0RkI^%dk*|}`j4WFb{W9tgnNdB1k`|3*2 z3SF=;$8JT27&Xn{2#+KweG)oP?)xmo$HmSTxC5HiGIYN2TRF?yg|OPW1c@0D#SE_ZOnkfbd5EomlhbfjW!pUUUbe9uh>0um4Q)a+y; z!*Kh?T2!DZ!5aY(Tizi2^>wE8u6 z11DwfrDf2?9LfGwhTB#jkdO}FcDX+OC<|iD*l;3}NL?q4F0MO*;YZPz3TEl486Zvv z!$cLN(isoZ=6_pTwF_G~U4p zcj^y`ny5c`YF;t^)FS*$boy39H&38HLWE=2m|zsCiqp>R3};yj(N(AQwCl&G!&2R9 zjrNSbUy|U~U05wDLLg)7b$y`efSd_&lKSBEQ_E52mLs3)MSE^;Ms_x?0GNe#f@Gw6 zr0OGAe$kJOv~Fy<)-;Rs-7Iz}T$fcd{E2hEL{+9Kgl5BukKZC=#A~Zt-V^=WFzkwS zyFj7ODNo1C`|&mJgYU;J&$nFV@+YLbMBQ}R{8jff{g@)-;?M|Bi5&{KblCvo(JJpP zAnOzf>Q;o?P$TP=njvR(X7R{X2*soO>bC#K`1;iUO0oZc@S0)8&Y*vj`>LtBepO`O z$mv9PIUfiY5NK;J(jlKx1Qq-)Tijm|D&RBILXrFV!--8*ywJdBJ3RGgkrPUAdk;N!L&x)nDBg&5Z`z!oUYx z-fS5kAo7NxxJV~o)K4KRbX91QV-MAE=xxI|_hZ?T_r}aRy2$MGW4jhlek`wSKWErE`ssYIT-Y58f@x`q3dfSjc@n7Rc@x#Te?K~ zY)x%aZ9{`p><7QjK#x-%%$$uuZ8aKL^8(+r}7v zlSzz}k6gaT^(aVJsr#C9GMB3aII%A`EGzKcc>C7QKr7}4sokI+&i;0~uAD7lq{n|* z?iFZe0qU;{h4hvYsz{;(+=-V(71zIS?mONL_zQBQDspwu^3d6vlVoVLPt=*ZYNnLnth=U6oU`hdZQ zHR!hG*6~KUMAMnSX7thSgtgWwJ|3S<39)Dwh`l8V7!JAXA`a3_CZi?-C0Q?<)_DVo zLnkMoa*!Q?rsA1YRNp!H9w^1$3*$1cJ=ByvOPgksRkU%E3S&LNAjkR6_V%L8<3I-e zXDe0b8DbTh!XME+6qV(29&j;@_@hBU0qx9;@w}w73TK?LI-iaicbF14wjIdadGHDSk8eJ7mDoyR18{X4|R6gO3OGU_-k!0?_ zR2+HM6s23+g+;xx-hFz?lr(|kkB>PkF2(^1?Wj$ab*7%%M>=*4zs z_sK``IdHeBN)fuvoO=c(0krpmQe5Vcsk^R$F0jQV3G)R=T`Ugp_6%MJiO%Fo{Gse`>Ntl94pfir$hKc zsLwD!=pZa-g%85a{NdIR(af_ zVgvW;1o6RvQyNoo%`&f^3CXgAi9QmJ+90h2ttI$5|By6!DlOrHLl;0;!TsOw=}2QK<_h3>4p5h*n`t z&w~VR(S-9n*KaydoG^5(@CbSO%=6hC50EC><@h`9e;N#M2snL8{0puE)#6TIM22#v3c&N{RLT=08DzJ9HPDNN!al;;3Ev#O+CV6N4ac5NR>_+(J*jnV){CE5iM(r^%uW0q*=%a830*_G-^gWh<+5*NS zAT(5SdOrxDR zd20-VX4xo_l}&hjHl$r3PyCCE65R9apvA3cuNnzVr#g)t<7XY_}`wN9P_E2QzdMXQu9* z@c+5FuqXs2pOW-boPVJ6J4mCC?Ps$`HMhTtD|2`NOh#fBWDLm{a;m>={+J)s%_t}4+cvzmbK(J?%ZIn`D&y= zmkMlN{ThrH=b2xIMSUu%_7J6+>nsB$i>hTtU|M^-a=(z2JL zzqyGpwf462Y|PUO((W?8uM^F4Fv#w_`8*#=%hU)?Qa?vlE8i1Qx8iOOitv?`MFp&% zXqSi3#QON1tJEQyOuzt2`GE4?Ig|>@Z@-{JxFWdWA&o?rI+{tp8>mf81d*Jlx>;|@ zDZsyc;nBs z?v4zqm-Sb)yPRh;Xi}T6*^Tw!_7F&y*h*n(K;9;1zxSXjbeOGhxVDQ*9#t2-7PGNY z+_>mL!?m%X#J$fl3Gj-(LDc3&6V#+R<+k+JaUiev%X#P2lsoSytTH2))SNm4<*qu3 z!iofGe!;_J6f&|%%7cC6?^AoXepXyzmhwmiv+v#kihR+Ybd~ZE`yhHFhXxd+h;!LQ ztL766SCcFjgGXl)?j@foNNWxZOmUjpuY9R`|Pvs_MWn829ocgiZ|A4RR~f2=5?t;x9Gg{U7r?rKDp)@He9|UMVhCX z%a^6kU&<{k!kWk#oT|`f0*%>P#ZjdEP9f3N;WEw~g1)#pJUjm3}#B zj=J!)&xh5=%~xMe8Mb=bPfo&2+@@>R$Q(xCK7+~^cad_R{r4`-j|38nmJC@}#fc87 zV>xmnuUih^S#%P0?hoH$d9inGfg>of*?V*;>~ZAK9j0~%!P!Uj975*10n*%FAmddb zFB5a#6Sr0!Vt+1dn(|>(zHqGCAy$%G{;>REzG!vY zrx%iMYl#&&tF5kH``{njysJL}COf!sFEs0V$lTT=zIimpW37>9=UVT&hBt7zO7Zbk zsF=q8OWCRF1U~`RCe+b@X-nbJ+qG#I9Wq~?j#ON`Lh*rmh4>~^lNMm!^%N_T=?>Td z({-1Kd6|nc>QQAcw>b6vevWmx&@;tce6sxYtNLw}3b66Ibg;96MjO8xclLWS%CgG8 zyxY1DYTdY3n9L9$i_PULdzvaqT*~(NZT#Mg??-13j6ZOVcSqa#=k5&94|si5aT_*^ zFd%nVoy*Sfpfdkn$kS&WmiSrY=Yw+5Ty}1CmBwF{K#$Z|ahKH!999}BE#{9N`psVM zp{@pQx6VDd$n2!%Ydu=^aak2&-wT#+$YBT}y=BK-rsi**8`R1Ze$qF6PuqZ3oz{O~ z^5ep2YCzqc^-)E)`U`ugR-SYHq|@)d<=cL8@IQQCc~D*cq&I$t+FL&+ZA7oTBKX{& zkGu_iMLOH-b__917hdI45C`Bb&NnaFj&NucbT)6&z(X3iV$4$Z?ph~qGcR~GUo9eB z?G--(mI)8P6^a?4S?ZT_D@ttm3&QP+K0ypPc5Y4EN4UsW!_u=UzkOJS;Y5%3q&wPJ z8ZbtV6oVC_s0#$tu%kx3BMZXz*Ko^oy+Zr`n4k^A=pddP8aY?2ysFRnBrs}j&r z;Kr#pjhJr5mwa=`sVQ_?YL74wq>cQHjT@=67n~S#{8~3a48r=zHte1s5cvz@EfgDV z{9>eElAR{7p<94UKM}yiH-`VkK&2dqN7V;J6wVqVMn|eg7CQKTcB-*n?ybrNk)I|t zCtV_E@8*0`xS1G7(@6V%nlpnFMv7f`k?|fjV-uVa`fAmuDq6WkT6de1j18|QdX^v$ zZ$M9A&-zJm`FF{Nr+#1X1Cn#J>oW%)EU;XhY0H|hTbD0i(qj3HFOuDW>X#_%nE!x( zveo;iGJ$4zRmfRkPk~4UnFTWrJj3q&)TG@^V^ZwbL@Jg?w6oB!E|GI!#Ry0)O<*j(L&|4N(>(zugMO@jg$L0TqWbw?b3BzsHF{N zB~R1m&W}Dzw(18Z`u*9zAf;wer~;${pLT_JLTQz8UcA@K?_6RQd973Z74nD7ZPH4E z6J1aYl2s=O&(<&Nr>#&CV5Ze-cKyh^)B7S#sq8*pzbq4eyes8oLuIkD-@iM-mzE<= zg}>Aaef5B-{{R=JUjNhXtT4-vzDZpNjE5%6k$vlDk&^Pt3^QSNg~^-H!DE^LLc&y||oHGgB$^X2*mn z=G_d;n~lOeLs1j!wejx*S@YhPPfRAUDvFC*#4Gk}gZim8X*D8uzN)PC?FS=GJNGA} zk0M^E4DoLAMk;eLwM~Cewv*_@H`40xf4Y2K)g%GPFK~bJ;VvTgjA*A7j2SMBT(63# zffYYVm^srPnXT5}>Jpf$E}=U?i0&Vs7Ju7R!KBtj(^hesaLH3@L@eqC7>_F&29+j!TXQbEoQaa#S>p(BP1KD zq-qCBF80Ni6TbeidASs&Br56*^jO_o^`C=>Ny!3b2U*};Ref+hn`=Gi;#&6GbNwPm z*Zj@Q$bR>+_YnhsYruvBiQJ0r#|tQo%QTt3muSrTD=)ui2C4k_WS98lHefwaPJ8%oEl7! zY0Y!Cx zrYVw{c~KbUf`EmPugnJxF+@OlW_RcT1f}AL+_e;*g^y zvoXtEx>hg=+$s7Yr_O7xU!crWpCn$GxKA$$C$dsP^|qH#JCYvXaRYBw&luyJqmU?! z>7b30a*Np$7-JKt$ivpUrZM~SE%;KGh7?7QjdyqIf!Zihu2@Qn+xw|UI^#$v4)yOT zR^JWRG#&XC3mO;7$Epnw#-!E}O_jtb|5`8%sC^j2<<$cXMEdr+(Tjx0M9h9Kh#8%T zPZ&3`d~`cB2Rgviwt~q;%dSuCfq;MQ`v(MU^Vo2jE}b z=HynA!pf`5AMn*1;clO*yACN#EN#ylFzIYPtOxE-4wmIVb9?;^0|T0=88zerq?Y3RU@1bQKm`T1 z!zNL`yQQ$|^M6P|B!BJ43fINST(@TD9pAK9^jkTYtxt62rV zc1qK3vD2F4+qi~EVCM+f#Q}DXCQrt0@9o>H6{m%9S^B+H7M%4q3QqA<4lZSK27Rbzu0^Fr|ordQ!!DFw!GCA%@- z(;n1oMnpr)sluxZ9CJ4#Vdt|<^QA+sny){bahXV!pQoAenjoTpw$R!O#Xr(Q6=0bH zQW?G>qWF8k(U2BybaF229X|D~ccI}i(XKBZt0RW|n)I*C8&3-HXmV>q*yWO`soV)4 z0fQ&-`k!H?rDie2ZKN;JrNHBhtgyQBsC-M(>!jX~g=b2vz_OD^ltk##h_5H!RZs*_ zyC*mXodc^f{H_Awd)Ur-KsorKfY?7GwCIIf^Q&jrnqXK;G#amG9%iC9R<$=Yo%wk` zEH=xJ4#KTD_nywD)sOYRu{~IVq74{T`RB&9rTOltdWJI48L=y`ns||MYBa;x#^Fz`w^LIT3<-*DOOuCit(KTn-<1nNPS}Kf-+-I!W?_Fa21qq;2h7KIREY^hj z^5*w90ZX;uU~!ID+CTR!XaWYKenn(FK%!`nuv`zJCKpV;7JOta<*~O8I-|3WPRR=Obh$X7e}1j+6zdYlnxjmFt0Q+CInkAP`yLlS5&%_~1;> zcf-fDy9icQhPnpW3#MTwEgwU9w?SmVJ%vjAUO2W-kER&o!DCrO9Yf8Gx=rj7KxpYX zhMYy1d5ATf0s;XYX-6gy>T}~wGFlg{Z#4H69frhIKk+G9F+1Xp)4_Z+X+P;D`1$(7 zcLu9xjuKZTFH0#*!fhcNi@T?=lB8(ZQJ|U24eyK_`{b@4EY|KF(E&`b z!X^C~<`$i0!x^-=DaG zI}Ght$^OIG>i7@FR@VfF0yFXLeSAu5N%|Bs9It^S$U{1MHU z2%qzN;5gY9mk-p0cPMNxvs;vV3M_I{1Q^r7?xeK|TGA<>$@;_Xb{|R~t`iWYnn$^I zq;FHl^&~Sow|bobwY{4Bd7-|ov;ZFQkKZo|+q+KX8T0urE=pS{pCQN^`a=hvNjLJv z>A?c(liP@uwX5cR*vJPW(H zE((Jwo-PtfFMJ`{KIvLQPgYFrlFre<8s3hOfuhUONdJ|C}JccwrzEzEttnR-V zL3q4_oSbqg3u)kURB7zK6RD)2TLcUiSZXa$w*VJ;{S`=rij+hc_hj7N5iH!Y40x&;$+u4TST9inK^~7tfPoHbOR>V(p+f3mYe*GbUx#x4lbF@b`g0njmXS*g&aXxXmif6NGv#cvX}-t01sT0vKK83H$=W65KPhQ1747+S9^*8tS<%w%4MWH&kz#wj-FBMxY0vgj$Qq_Mf$2U_l|krdozRkRU`Gw1=@l?O=@x zhoP+TTd5Uyp%Ut^v+cS{a4-!P*X8UV4RZht6>8Zu;xj+mgOuQYPxwW<<0iHD4*m3{-jSV4G zz_@(sqwsmN+m9h_+gBV+Qgaxz75vg{x5xAwPDvu5^;7hbyQiNDXGT0>XZ$k1#7paT z^u|gzKckb^ZD<>dM8z(#ejMIa?SiwIhDZS>kWY8tFHjXml`cX(*VzJNUnC_^0&q}6bc)+w_(W$4N zR=gMdl)Kb4+gfGkq8^|VC@WsiU-`Y_b)E{0WFl?ANTw^A1a$;k_7Icx%}Y_-DlS%e zBLzl5=^NbBY!N-l4Af5#FMuU?BXznfW@CGTF4NqOmwcOiQ^Q#s*@7#_ATJ*~%O&U`8t9x>G_wclOnMsh4JwS(%Jg!)nuN3I7zPLFQw zV?X9fkq|N{HlK5T`LsCkSW;9xpZ3ZejW@Nj#r$PdVy)ZvnF&DLCB<3SA1p=Mr1}MB zM^eSezkn_N-leCC+rt)>M61u;CNrT#l^zG-fnk3_$dl+gf~6jn5+8(S zU`rsqRi|KKmnN>gnlY;E(k^26IHOk@N+Ntt18Q{ zZkI=%`Aomf{j$w+j+f~N=;PR7t9B&`cY&9>m;oz(1@3?#P14|B5Np(N%@5kUXH$}8 ze3!2!UP+9zUTwb2F7G5`#V&9A3RMaZYYy#G&i+J`aEFFk{XLl7jnlmi{mF9}PpfbN z*AiLj=*WNM+4Gk(ysyk#3?8|$L!td27mU9*l4t=vTMN{6m6GJh*lNl&X6V5`{6qwT z)se*hVRV7xZ0Rl6-ok@rjEA4H+*gK5v-59m|K7z2#VYR1{n?+l_V+%HFi^30tv?x{ z+DdDLhe-0smDIktTV_y3FI>nU9e#bjd8L6rTJ11KUM$r~N(WBz91;g3K4uGKcxh>9 z-&eJA3pe&>o=h(vPX*Hf*isphWmtMVlau0rRd(ao>>UF@ajBHy$CB~jIbqmdV0aSs^DE*OV+RkNRg zytge{TUhk=BttWXQ?0A5a+39@s1Huzl>xbjd?9k3wWh59aXAA+4CVx%=r&at7aYm4 zPNNF(3B#tnj3&A?=#8HX=rZ=tZN$2d?p$;u;@` z-+)pS&6hL+sL8UvA>?mEtBEw&a*CB=ZVjynKPs<}7rtM3J5!zS@=?r1Wp=Q2k~IX$JKjT=p0TR})6<$(e1HP!zS%QDN@GeEU9 zzC#UVf+nXcRRlnm=>NGu(`^A%kU|KLQQhjkvTB0_f>cE3UE`wTtw#g0HM^8~1H_V3 zU(d}hZcPk3l5YqP?FWK{*c)>jI&crnw4X-5*_s&PY-U~@g1s9aR?b;%KH|dUc{Orb zg(_}{Cg*1IdNv2&IKy8kqeGPwY87YCo|x!tM*SM=NT_Pn^F+OoAF zs$@c5xjGr!BrWejv%!LeQ@kHcj>+hdX+Uv$%Jjv7IMM_RH3Y&S*d=w6YSZxxEWD~B z6lus4gf<#YsL{LHH!C0HXO*+MuNL^5b=iSv1>a#a{ny! zmEe{{eGH*$u4%Z&P&lgxW5e*tt|=h|;qEll_=Tjw+GhiS;{j;7=aNg1uX%RGT`j zQK9-gbaoE1TBU|=oVlc_i%nPFJ>g5276_b&ntACbpnh*fwLuUWCP=2%zTeZ~(rqzed0M~a}7h&1WFiF5=BJwbX&kP;wd zeGj_!+IxR<&dmAEocZSa!&$lpP2Tsp+jHO7eO-#z1`Ip1V|=p1@DIcy7LJ9T?7nx4iofoE*ZdNuKeoz|p`8iIOxLUroN$T8-VlnBCTOXHL~d&N_k{s0%y?t- zP@GvrBc<#Eqnj)TKgXF}k1Ho@E{3@TU|P8Gv4n7Q%F|mz&7WJtUGmGnqYjW-%?I}a z;GI}^q#rM?|ASX{r`9`M@Hp{7P)x&kk)ef9#ku*!*D4CJ;We^6vb0ryG|6k?p8k6J-`pXmXls%-f{5RXaRpga>aH2_$V@=z%Pj5uD@)fP_;*Ht)gY^O z<=P;4)&CU$gZ|mW0ReJY%TzW8THZvD+rTZim3}Za;qa|D*b%$w-{q9ErHkWTCNI?W z$>yF=wsxe`{xEQKu^_1t6xq|F9nTh#GXK2qtol=uxp6*lAH@h4CuE4DP zfQu(QS=c_><@q*5{zQ%VPm2Cms{X(7UxW~#EYvRs2*UmNl?j32?Iml=RGIjPVoou5 z{-aob3PeWpf+SY&gc|VNkoSSA9@H25OE4w^{gl`K&fMLaCb@uj^slniRWCGH@YB?C z27B?1Eh|{`1rX!bJMR0bRE})BYo{OU+!DWCkFI~m425!`R z)KA+!*Cu7>hcQ2ui}55X?%s=*RmFuj~}uWdwU1{oMB>M$J+{(eZ(dehlhepABk`}NmqCVYBVIf z62kN1hJm#}r1#r~JlDq`TwFAqGtlpygH#xB=y`r_UaaVBY5p2o&*by-G`6SWguMun zmMv0Z?`|2{ZdMX<0^9pvgfMGm4E3oscjND3!KkmS!M&uDxx)9^nar_jr*<3pjvzjl zoYznh=L9|1E(pYB4wL3pNC-0n)=UB_Tz1G2U>s0l9Bp8G_dYQ{mHI|>?9TN^(}7gJ zZk)8jxm;(w9oyqL)=qWBG&w-Cps0T;F$R0;Wk3KrrN#z@PDKf`lkHCHtn#XIWW1JHz(c@8 zhrMV3UDGrup2U#=7SW&>`Z*Qs#j@h!7jup#>I`bs&aCA^ukS_l_K;d}TA~#!d0w1W z)2BVshB%aCs|lRVE9Cfj_kB70Z(q+0d3`J9gAjjW8PfOCF@&anVkVj6^LwsZtZeD%@0M0Rv9cFKQU9 zk0ySUWY(yWeqv+4sxdS3#gb%H0sTQRt&nA7!iis1I>N$oaDaX&Bo=qEj=;1l#YmL2 z;7{Avo<{RN{yjN!^+$c_>LpqJ1if1RBOikT(LYN)r*u6_aA5W(xE)~j!MGnU=q^Pp zT8i^^Nd(pKkutP$7s{eJ1@RI(%O}+PAiAGgDu8`L76KUYGL<<0LQ|n>j{ch)aZ2bj z4?iZB>$|9sC@m&nf`|D zi}EM~4f%JL_dj|c|9BAVnvIZJRNc32>{b$};n`pLEfB`&XIo+er>LB9)%7YxRH@a_ zuH_$^TdMtduSd>DBHMDX)Z_~8m!qAqT?tzr91j3nFsX23=N$t*j>seI-OWr4*@GuP zrLjJJ)p`7#H}6_+4|#^C%pe>22V5y;WE6EKpV}cNE6;wX)4B5VhG3Fq&)`HzwgXTo zXThsr6%nzLZ=a8gZI^C4<6frQu z@!GYgr%P8wDL6M$RfXIb<8Qm~66SA=GUG8}_PBq2Wj>jNPaTTcnieNo&2k<~?Dwtr zN8*~za8am!3;o*W6tm`PiR;?}<4c;k(QaoyazKc>T}~C_AU-Yu{hr)Uy|XUHBr0Du zxk;-SiymAsM=$g<54P8dmL7kJwT{)w(nxVg@Ia&=xQhF>%L(kCef zzldlo!%X}n#KPXaG1iGw=DhDOs3 zGbWHI=V`2NI+=9XUENmuzdHV6k*R{xxwK611%lyBh ziDtlNXe4f8o|Aq1)mD|6umQWDr`_{pwhQ8f-2~GQ(^n~%$$w`XD1q%G$TnVoXPYkk zozrZy)!_QEn;YT#jHaL?xm?Z036rBa9q0rVanceL6dZ!c!f|)-4?wyUs{lP_z;Otu z$oJ;L@pSptSu~E+Hxsmi+(A*C0s3p^S1|%9C>XiMLalVGqunb}HcA0zHDu@_zJ(+hdT+bm{R<<2 z7J<|rX8W(KqKwyf^kSG7QQ`(!rS#)k`(0)`fiPCowSYZc{fKE7qGe9Ype%7{B^Wk& zV;?N|r^Jm5$5lzi7CJPUy3W@z4&45{=#zDW9CM0v{QdrH;dl~dyqP}~+#+k*vO}x0 zi0f*b(+&}PY;QU~3%wnY`NDsuck#8j*2cA1*A%S8d}ec0)vpqpqtwwY2l6A$1PP&@ z7D$OKs|>5+7gmytqfM}3-~C_CSv*eQ^kN@undLSUM;c~narA#ljaBHSfV@@?ef$`z zo1bM{0_ZNt$D6G#6@C}HI3m?q@!4aBlfK2$x|P5+6{eynF(8-V{qc1`zpM9fdN#fK zQ)=l~n?ONvnY0{2AZutg`R#HKzl>d+Kx{^D$J3Oyce2w4=|2pF0N7c6VxoR2-Wk>B zotLIe|6(fdabMkzI;ZQ`WZn~sR-okMCkxbodK4KGiik`_9BY6)IC$=aukrbLEkon%dOOvQ2HNO7!dc@-O1M zyz{=;)^)Q2uY*lK)QnZTk9w+A?hJ?CrobB=NpJd>I&aU;wP#Mp50a3&zNWPX0U>)= z95jam!$DbARAA@@;R|1DTq+B0t8cMU@_*=Q-MF!61=xY) z|F8q?T`FtHtn@xWKL|*q?dk1e%9M!EHQB~2LkbTSMvkTOj#J& zyz=aCjnAf!xGN}ojm|96>M?f@EwRuzD$U}WZ&N{EVpA2dL7OUEou@OR-@gn)14hgx zCnQa#L>aUid|4fMS?GI77?WPP5O0(7`qiRIkI=s%Dht~<9R>YU2L*S*-wvt|xTj&! zdnXS%*ux?|zF>eEryJB8eDO%ooO$$*a45BfATQUIj=|t$f(VvmUC!gZotT(1`lscq zub;0%glW}h$)FBdhd-Ma(h%^K=iu%J83ggjsF~Bx86}C`+e)=6HvM6|`qLspe{tgNI;`T&gS8s^Rd(d$q~>HK+hcLb$6&0w+0M(Iy3D1e0EEg&I*kh> z6d2^!>7!EJR%7b!OwO(`UY&LQMVA5gb6i~X29G8>^q6hgO9$SGHNx$z>}PdK`#L!- za`f+d{jEI;@d@RrFm1CYjG}H?tJ176-uW5|DGa=E-)fzHvdEryB9rI}2rl*7ZebUU<*kV`j=a0Saa)Ik z)QO_i^tG1)rw3aIT@vX8htc>t3d6==ky{jCvcRukNCKHp11TP`4q zN=O25c0C@ERi5Rg%_JWQRJ!rI%fEfv<-h#9%m1l(eVY}30n;4i9{$~(b?aSSyuPI^ zP@?Q6dWH1vBJ+oo!2Vz2fItl3N+@i{(OeMpMLbVivck(?mwx6*H|bfvO}q`#zwUEr5ACC&Au zBICKXn4gu&BPEd>5VVly00k;he?l^o<169E*akRfd7zw9MlXs3$|=!kzF#>s?7}2K z0GSd9Am;)pZ1Cl(?Zt8$t7Df&te1oJX*lCQJ$vjMEH^|t_PbePdh9jP*0WKHGGjgG zbfY;c4(}5vcUG9ayVIO!tPD`^Ot?+erwshasgKpLqt3V_yV`FT`{oag!9yuC3*8Yu zttyjaZHmDk@x|F}&oeU(sZ%2KOa?F7~mIMa$t)p%?XGw`<+eeaIVk7E# zlHk?l;|wN)6T`|BV5l;ya1ZpKw!t7d0}!>em7hxGsM^7B(luc0xo^**^BG72*h#MN zMGo*!vGbyNCM1b4fR7`&7}K|)u^%(^1xw8=L7D!ohCTn%dApr-&7ST%+-mY_I9p7n zyRX4#EVc9mvHs5HKp|qIRZ15#8#esBo#K8wH^qtK(P}u#gT9N$+eb8r~eQXFY0KXO2 z9x3n2?Lxx?NO9hIxKJ3*LO=XrbC|&SfGMWdSDeK2mwFV?N_!JtQ>}G+F>VE)#`9d+ zW#ai?y>wIR2#Fpd?|AwzDj`o`j}?@WMkU z{!?90g#&S*H#E_bS-fp)vru0XiyD8JM)^=&i-XMYK@ZrTM<`4*QQ%Zf#>L!a$M_l#)K|JpCS2_Lm7Tsn5e;V;nqcyj-yLYfHE(*%~m`_Iz~#;=>D{K z+w|0FsI^OqoZXV;3=|Kz>pBv6-MasVr>CQZ-2qDtok=TeU!QQOKKCLz(R)?erAkX2^)?cpt@&wV zLPCORp@4i*5ESiyqqcri8nq#c1J2_Z7Pg8}B>pISV=f_r*Vt$O>Pyr)^dzs_`LWTv zXk=|O&VYv;1R{qhuzw3e+ky($m5pIAJ!<1QI!-@~++#1yTis7HNO!a-yj5=8%Bq>< zbA}{xpbiTMnx_|ompgoJN!;q>mc8=SK%MKxna>ovIW$ON+QH6Q&c-@z+bic*r6qct z;?;aP>Uo}#3seAtf{3 zLS|asIsR~nEa_Sk+MnhrkS{+Kc9n$Tq2Zt$EcFEjRt*fyGL22eE#W$KS>fv&oaX>z z553O=ThKpe@sq>BTv76qdod0);zx$R1ghCXuZBJ)8t7tU8-R0;`^U8_hlU+ZtD=m= z@_(^dk{bkt7AlBAdI#^&RuWC&L3T^H_qP>JxZa(*!Bj%m>d4;7_=1T}ojB-+!8L62 zOO_CnKcl}R3Y=;YNk9X`scyo(h3tc}<~RBqK30)#G`Ig)U*i|2+ntV{oUtlw3YP^7 zBsv3FNBthM++KRQmkmP7BEnj^VmSnmWEO+! z4}7JL3GI31umEn1&e%cPEl+yWJHg7=f3|-{m{Jo(W3zK|Xa`QaXH8t#ZG2n+F934< zHlyRnhT~4KldEtIRr*}V;KbowU6RV{Da~`ToDRGp12Ti3a-grfZy(j{zW|R7bNmYe zdjOY1fL`k1e=1Bft=8Gcl%-nGWQJZo({PrKP94H^24cc>sYE`G)$94VH}7xt+~I3l z^rz~%L9L|BJ$$p-R;W1|m}$5DGZdG=5=u2RQuxQrk7LNBBvFN$s>1NpRKuv|g~+Rq zW?r5BNT$V)fiT7WXF5Dht%1_TOK@ZjK?4ZAwxYqLPtl*Mz$;2^T)@jM)4YwKIpJDR z^;Pq$MD*+Yc?0PV7!A&~gPnPhsgJ3uQnc0~OaE1e*2ZSbpe_)DMD7hvS`cnu+&&5Z z2^#O^hL1pw=$;3lLYm<}O1DEztiPpfN4H?D2Uq91odqt|YKLYl5`|7TKqNAJPrx$c zU@UA$e{cI}lZOy_8>QnPhem{Pb7Bj2rRyqPyIt8?za?*-`?6WjyUdAR2Y45WxnMAW zEBo;GL4uCW2FOhpu7is=tQ5cjlA9c)4m83lIiNHv)kg+N=OU3aPBK5^JS?UYd!?4L zuHalTZR;JP^ze3^}j=?1PGz|S!XOj z2L3O1&7oNnme(5JEw#Mu7Na|Om(IewD)Zu023lUL%Ly(tME1V~#{O;u|1SWu|0n;S zgz>QtsB8qt3s@N|?~?Jv7A8-$`fTivG)Slwnku-QZ&jzeM#|msw29}r)l+SHtzN-0 z+bteW6CKa_eh=g%`=jO2Ext%8HnA=N2-L-P;UFgWbzX2uAvM%Og@TIDfs<}1@<V+v%RG^XrzPJEq8{8=IsmVzymCI=d9vX; ziNypt8-nUUYOA^GS<>j5vD8}Y`Knl!i1$d3aM9Ni>yem`5a0~~TV8l?_ZJCu?NW&q zU6jbqM;;0C@riLCO7!9m7<8}?3}bD~Zw0DyK6#rN^;6iMJ4B?LJAZt8qxG4qVC5iU0U01(N-slAs@PtYQMhgztaPPh-0fyQs#0rw_<~f7c@aGMyj52Rj&4&Au*uz`V9VOnT8MKoM6aDVHg&P%C?v4xgy55_Mu^j11)|nH; zDlDXN=3Y5ao7^BZh|?UW*!yanNtu?t$m`yp7q+5bdsG&ch|4qa^H{*R7!pA(VO#N~ z0cTdGYRk}nC52;Ct+%Zwx|_R&VHXiAKXj&;Bcfz$zP2)HA&bQ@R`W-(Fi9(^o76$6 zAItm6L4W@i(%nOj(S*x-e5Q~KziNll^WtVsZIOd?L} zBMjaaIw5vuk>H+h7(A=f?RMiab-kV)x5hg}+WM+qDAFLOnaI7*pyI8mS+|!x>XPSe z?sF{G+lehX&~z=a&bJ9#%d<&lc}_i@Mu2?jHkYyVvu8pr^C4ZAht)?u=U#r=4IxU< z0~Cb00pe>c&6r$2<;%61`Hjp^7Y;JMQd4)s%Zy0&0KHPrE+St^eATmiUN;C*l|E3; z{2Zki4!)1jRJh$W;wF1WqEWnL-Ioen(88gprB`MZ#f6KMq{W&{DX|^&6k9a@II+^Bcd0SA!D8R&b8QW#snA)0UW2LsYo%^q5HD4*20{u51XZ=+ zU2DHo;tWgstmNCzYbN?xhE%;*dlLWS1Me~WR&f{(ZR_lbsW*)rpcH(QuTxH@`#qH{ zR`Cr3xD|BYT3p=GF&f<}6LEY}SD8AW#RBDwFE=pYqTnkqq)6q;vQ&}P3dZto`^G?S{W}fThcJt3Gnyu=!BM3q^4_ZkY42mTa<)UAWb*xb; ze*Tt+Exm$#+O@{>>iL!tzlqIeo>9S*o!Wc9ApR99+z-qTjxU+%j@XXBx4Mok5f@R5 z9ojHr?!NOn#|qg%-}r0)?&n(d2}g5EW~4qa_qe1~?F6FjXrPv=kb)BZc3`eB@v}Dm zed6XXfo1;8k3Sz;?n5AoFJ0>!U%EBZmu%Vjc_O+!soJ*T3OO=ei#Alk(5DnqU_d@H zM9Z2hzE{wyDlS)oRt0O3R)n~XO0-ye|<)u zSM}3{zKn0c{++jzAsGjV%-o2jB#Q{PnE~RWUa4JJmyS5jk7`Ji&?}v8DboMdPCl24 zMA*`!m6siI*o*x38pR@Q9E~)bZnSKPHE)yF~mIp zI|4mC-1ZK@5nk*J0!L^Ejvz)FRj#g4)N>(F=IoKx{*$^i7|Dxk3T3@C>-@L7FI>H6 zLNc15H{N8`?=N+H_0jZ=dEY~;m*`j&HtPZi^c(s;VCU#AdB3M2gW0sUtGn=^!Z&j* ze*145x?22uQnW(8QzlUyAM70#ynZhXMvH%1qbLdhw5&4_fWCS;TNnU;HdkG>2qctj zEu+6$%70CN9qOKKkGG4Kb$3Zx}tsNNg|L`J|xvU|FVvx2%W|w_@$6P4(IF0hTOpOMT6uXK$Bt zm<}@k07w1?kT3a1MF*sM&5=s7j*nYh zOh-z!ZOG>mlixS;IDAX8WxI3)SyN&*9kju+v=7=#bPN{V;9%sZFIh0+1cif|kKm@h zw=<}xxs#qhPuxHa_V3hirGJZ-wwNGcLnr{_arMPQ+gEL=z7Ef|3X_sxE_*j91ePvi4&?o(%F!svkeS>pag$;4c=&Dr9 z=*!(j({rf^E`;St(x#vz`5B2&TQBW_Eaz z%G~j&Z@;o42+7raRX^42G~S+q(_8k6jm^Gy#^yP?T1mb(Rl88>qTcyPQ}G7VgOM^D zW>4v(q{gZ@^rm`zf$?1hsj`9@tNCHE|O=CgRiKYHDng*%8$YZ7olw@UwzgDWl2U>syO<;Q;g z=)`qPtVOzL`eQ&2`w^;1<0Q*oOiW40##NQ7!Jt>-+uEr_cMku4-&LLmFw;{Op#M8K zAHZcCu{%gFi<){6O?Xe$INLTs!iXYNgx2Hh+<*?>&Ei#~VdMl~s_%6IxBZ|<90bLCMQ!W7Q z+FLkzg1=PuF_z~q2xr+`3qg9g!Tx>KR+LCK-~-O(W=pWi z#OgkHv1@yvsC@$9!ApTa!?~sncEqT>lW2cB_tL-|qppC3zYeV@3o{ob(^t_#vE%n{ zEbTy(*(haY2B75JUl2*hB0GH!S3HBW5J&{X9le$TXoP{q5uNI<5ksR1GDnJ)(NaniSElx zyvU6WBpNBXf0uX|aO)aycuBMkbY{2vs9sx4v|!dF$eX;mOQx6-Cds9=>R>0SbA4n% zqS{)NPs22t^C~1(rZn=l@o#_1`(R@nP<$@@L3wp!go&5-D+4u}Xe(-{jFc+j+#nTv zs45RWPYuKCINVcVDzOulT(_(q-F#dXBvF0If+8w05pZFA*U?ZJuF@_@e>h@yr9l4& zH)@jqajhWalY)y^>22V+ZJ>85+}6A+Y7P90hy|>A8gKq=r4!NTQ_!hyvn(kA4qu58Z_mmBYsO)st#WW5mfBICl=8QVyENr=?Bs+0LFZn+m?JLtT7INV6u)_wOe<)lcUp z0w2f&KJ}A*4bZ`!$)WC-P#NB>McPYfekzYou~lsRtlr_?^^jJkRPOKBHTYf9OxW-m zpntciPFWJ!IKD$EHjS2olI(<*&+>}YXt>bTv_G_1J z#Y&z1NUj1|5i&Nu1j&EQdQOEGb7LDwqdid$@e}*@2rr#4!R<1B&jr4nmet;FOq=Oc zW56zuY2#E&CBj^u7roaGp|QPxmy{(r`CuPm?@j93%G=&4L}tJp%1!eO-p2A@pa0bu z@~&W|&LDmX{>-M@L&-s5utEiNw4F0^tk`4<#&(Noj4wt#JUw|1pMnnI3n8&z9(yk# z{up2u)2RV!{B3|^<)14@OYe`jAvsLk27bI#iL8~CfjoA3F<>K;L*1y?essN9Zt$ZG zGs~#Z2EFMNe+dgeS_H}_80Qm0IM)mesR9_%>1%gz)>fhT$OwTzoFJP<6Wk1#pJO9> z&I>1EHu+CS6q21QB)Ibv!~?3T)F2kdUMB71o|m1ia#lTlb-5&TTCso5eoCfhGf3@8 zC;8w)FGp}mv^XnwPGpK?CDBXJ^s{=|yka~Iu+wB$2m&Rmb+mB>x+AMTYA>Us>)AFp z6Rz{gf)f>&Gbmuqf&8HWmceDBn@LvWVyeO;-4?%Z>9gNpt%{wW3W=@nnaXhE&e$K! zo9O;6ZCnhbjmMQw0mh)~im6fp(Q+tD8S~(G5Guh*m<;1p7pAo5pyE^)f|xI=gmF^a z3CWPUwV+3uxD{KawROR@0KU7+DE zULU6oM9d+xKq18de_91k`voXtZKOt5c!5h{Cwjm&Z#%R}09-e*G^I^Rxf=O1-~7I8 znYXd32u3^J)Ev~)Gv0-=dDYy;#3mqPD=uqRRMB5ciEm_tM-Vp&R0HPuSKY@JhZ9YN z@eC6$Yj~AAJ~xLpguu?%+Wc$vbVRNI$Gg-i6XPFX4r1IX?cAEHZn(002Mgyv6u;u~ z<(I1mBRI8xBE>Naa_-DkL?gzeY=4 z;Fw`0Am^NYiWDURJyCSh&$-HG{F_7g`pBs)?h45!g4C-|6I=ae;z%UHKIW?yb&E)R z)+8jpXYrigFQx%pJ(}kwUbs{{f6o8&+7d)z?sHdJc{zdpQTR7)Qfu zecscZ@_)CLAxA6(cEQ#;@SO19kJ%`FXIw-|Q9$v2*V<)~{J3un4s9>HTwF)F_b|x- zz#P`RV~;#I@qNK_M3h51Mi9aa*&ZiKhZ%rq7ByX#>d*z0Bc;HDgzO{HA5 z`7%4)P&eI+Zl*e&m}0Q(=f&`O;Uz%vyT!w>o~9eKupXmfoGelJ3Nv{Zb$9vw zJ8T98CEQRv;~l<{2inxni^05>1hlC~8q*nx!l9uGQjF#O(D2G4;EM((mqLIqo&&zP z8Pq9uDTi7ar>Y@LfSDo^c9}eK?4mBPcdV5l#ko zBAa|WA^xtI_HHGLiERfvh0BHSlUyq`u^gK75|;+~p75d-PrSCxBjpTxlq;quV+Frt+UN7lxuHIHjkH|6Bf%GWl?msibifX#!;CGeqUi@zZ7 ztia2D6d0a}^Vq%j2zY^+&3Kwieb0eYrCML!g^y&A@?}9zh^#CxS2j|;t!t47#JuQDJ&xc6@*dn_BloquXTeoF7BP9*`qa+FV8`b z@8)OO$CQwTFsB>K(K{4^$^Y?9h^~P8$$Sny7e{^*D>a*vyBcq<_I!!$S!Sw)Py*{E zt{}AF7sYZTWz2|i^9e)*zWr;69F9S^95uyZ!tZ1DbI*tU1-T!Kev#7is_)!g*4X*I zF56|_Ifz&Fz>bNHpLDVOtxjzNc#h=2L-aNuniYI23Q@>CV((&i*~==i_$; z6$z5$GjAc}wNkqPu0^_bl-+tFuky*L?8@$!(!N}2`a|cW$Du)q(^hQUcteIK$C~CR zpXuQ5OiIhJOK+cDY&$^Gk_A+)XNh8GQx~?$R8jXo58ArKH-7$j?wPpQ;((8HFQCUu z1$DrwEVv=Ssu9Bo0(Eg=%Fu>;&ge`5+Zgu!w%M;s!oBYsYP=ruJLCr%xpe1{tT_?! z75#m5uumYcAWR6vxLqWu<2EHr9kn9|IJ(Ytick;SDp65s0V2M!;an9eRL5FgS4<{i z!Vq~##SH`Jx0YdNu1J5Iq&`O-zaAYw!P<*B2aZm#vw2te)QCX)CIV4YlmpFHw&EdK zM>Mcd=8Yd?o z8mH%^aT~H>jmX~HMv7^=Yroo66yk_t@-NqM+KA)b@`w#llt&|^ZeKyFKKRAbKUAvF zfS-D9hi;OX4OEKBt$mK&daVFLB4uPmy3S6+w=;*o>Yq>hY#+LgHaYiu^$|vVs`saI z9x)98(wN>xkQ?#4F~91k#ga}_@KN*N!Y%pSOZ6F#tz@|05beY) z2P&97^A>HGW&du5dK%jKeCIqb`uv-7?L)ygaz_^qq7Y*5YV>H<$}7vxNscF|hX}HX zv_KSl5qixhdD|mf&d=UWNj2?U5btUIx*Q$U74T*XioAJ4NV( zw}cA!_Ch+F8-jM#$894{G&BGDR&QoOJaK}=ROA?!(EC(xvVtd$@E4u)brH{FUhxUF zoOAMzG0r)Y8~Ib|JPxgcQHq{ODg2g-3p;Z>B8~G&gRHI7E_W%h)}$_Ecck4Ce~0Y7 zxO@AnKZR(gUmRMG$9DE=L$aCf&OL{LJ09`1bhjj0sD|d96MM^yPMf5|=)Vykf1pw+ z1ctaj2#`O2ZsN@iRGI@;i~fyv$;Mp;mzl`C|Ns8ff8xIW`SNpZuw6;(e}DP^FMp`m z*eb$J)Bw(RXS{N1GH@`((Va6UE;qq;;f1)r4#fXN%`X!(h$bsvkT2S-dh~v*GWqb1 zi_&s<;UzsnNQUb=?4C8UD!J&o9=&_W;yP2|%Ry91UIuhbS<&ckM zau^pqU3K{(@<4bS9cu1~X%B&i{Ekfd%~lAiwI)`KTVKgkx{YI*(#Y!?+Nkn}4qOpk z&SRdfsD@(#yPhGq7zXqa2#vnb_A4AnujO{_4>zbx`Wd_puhF$>1?rAYb1vQ`H&gRF zK?DpZ-1s=%nQL=(+9(fo+3LBW^YpwM>F##q2ghTrH82+5lskin|KgoxT3YYm-Q_iK ziR6cp&HLoMPMzlCCDSJZ(pigD?y43wD2GT0s4N zG8U{OYJ;1iIquG<&nBDNL_f8(jLcacBa;ea3wq?t=2vl^?XI?Smaatf<7^RJD&|)w^Q`puXCdm1&A~fM+F#yRoYoxj zS4oRxo(zQy281$f!zlQ&%^OEs;2xeGKNv*!QOWrYI z4#E1Dc6`!a$TX^Sxchcv(Vd^u=_|#P74;jNx}R{~XsY!ddkf6?2ad0;syZ*QIR_#w zm%u3_z@CC(=Qi0IvboJ(S#y+5%+W3Gf)?LRxDQ`Me6XcUfL);TYegsMKrx9S*hd`aDafs<#(?Ckj7%OlQNe%ZG4n>M1@cZWyuYV=;p`-ps5+@cuDZ55miVKt zo;vjm^yxJqv|WMD4)oI!?4at`?bEIs6P2PzfQT?^FBDl|bm^iV8{7Q;igU}UuQvl< zZ^pCqog%BI#_p1*1`L`rkKwP)_mY*Y*0PEt1C*R8~hTXi%MChtJMYejji>|7Vu-D_?&sZmy zdby3i=_7rdYNYh--@eDA3=lSSsKFzqDiEpCPiv?zm%t<=fXIuX!^w*)}zZ?I8WgwZz(e(}%2H%QT>oIrA$m*=Ws4FP<`oolx6 z-yrs9V5lJP)lGm)IvxYzioYNn2m_obrVUP{;qLjtod4)0^3ai!v*Q(%mESUJ5hL3r z(+fGQ05gHOS-#VL`2->MMGL|Yd(dKUrH?fu6z0GtrK)yIUHUvW;y7bFQ`Nuomg(M@ z{}qy93Fau6f!S-IUDMA2_^tRsyT*-Z!tXrbOz`oNkGhW8pwGQ-$ z7fmAGoRN*cetmcV!X=T0O&^?`=%jKNdh~+bw0aHb0c&xwrHmCl2-!^B%Ykwta+}29 zy$xO#gxVkQl@QbZ?2UDlXI;b<$K5YEBBTjweDy*H=Wj=6+r^do^27vi-$3z%@6WQ- z0Koz~iwOGyCk_G0AN4pkQui)da352W0b%!)^9JYohx5j^3ET-huyOd4l{p;U>01Lo z0P#CB?uKA8TJza|XIcGQw>z(*z9;(Zhv}R%J-rH9SFdFRLUX!rpK2Z8exWDuNu{1A zKqdB$`#zVKXJ>P8U3R3x!=t4j{}=Bu&`LFrP7m>qMMClHU zf}{ZA=QBe-lF(TA=p?ghacqfQ;N26k?E0%!W6=qlyN`?1HSWYVI?3sSk%pBcb7kbk zrV%nzohL~ zV@~SO(aUMr>nE8Fu!hja5jI~2$q?nRuaF%h!qJKCHuA7o6@32qcYC1YEyY1$NAldl zmQS)kL*{M|nckhOOeY`1k;;EG0u9@nl)!UucPQ`EpAG^_Va|qwSxPvnk{4N*n#LNG z{Vm69K1;QWPJLEzk*EK9{lgg-7btlKcPcM8B}^3$Y%S62)W*h?cQXvZiBWH(uN)o$ z|J%lA;~g3+TIZmp4T|#|2r4aa(KeWd`1GBe%ikiZs$-oXs~JP&-Hb^ahRPMqk@uI> zs+va@1Fz9FeAbNa%c3Nrg;wi`#%*k)JQ`lXUa&tj=B1m*NzYA6LPTdJIaK;qCRz+|rsJ?7gud?mC zX`Z&vJhY7>WFmP3?vkr$E}}mm$`m{$lL-_POF>9ki%|zNEQ$;5ULuT_yH1X8M<8vS-gWsht*GPfj`Cr!%TnT)M)Y>xH-{3a`-VThFEld|&b}}gz?#<0hWOjjY|(TW z+8J6DoxPs);I+z%2J4z@3;BRmslV`lll|G4O5-JJqMMhuPsdvk%D5R{y*6cH=^zEM zaTXSsb$wmp;Fn8RU;R`;HukWt`Ruihhb$@FTq!Jmt_wtbPdLrmE zPL!d_70&0VE;!OWvK~)p7&cbFXMR%}Y`6P*3RJKBORA9MClAt)aNBQgZymZL^>c4` zxeFF@kR|>dJ?^HB%WHUCu1zy6%snLY>=|iDR!&I99&79A5E;aL_~23uXaOz$j(yGl zO}OX2%yUMGQ*WMwV`8jq1OTe<3h^7kzpEFSSl?IS&+JGHT8$O=2NE0qp_W1dlO+6R zWl{da$^z3iO!l+EnJLHSSC6^X_sox#=It=c;vz@mn(W5mR`%Xf+M&Zz!jvV_&&lfI z6U+)L&fFB2vvgk%%78%GdJ&%qH1OW5V8VUk6SrjBvDpkl4%=(fxz%f3Mr6;D)z7>3 zl05F+@9FoaThL*?LhI$ztr2XPCNC5ERAX&+3BYgvFHQgdcPN5GI0}@OOd4n|-3g5* zIs43*c4Rl>3q*aN3spk<2i^=?NXs&t!USH7ha1G>Zsn6hkh3F7^j^VslyGr`wiMWr zR55X&QV;G1nyQ%{rzoX(lVYJZ_IXWWpCIkS%QLZ)gLl$4i@z#A=Q$J@1RZ8d_%0*D zkP%`?hYjzn$)dqn*2QMhD}GD7SjVEEZp2W}g^kVdIp&^iDBK5q#RNy5lh6y`v&{n) zR0%B5xaJj$bOOHfH&7lk2+SyQ5&J&frev1u{s_UX!fWyDOX^rgx!2^p8S~40e9?>E zP@fegz!i3~(0qIrRHiFGbD#kyIZEeW0Qv?l>048dQmE9l^{{7We;^s7U8l3AEgHSR z6GM66L%q&_K?0thmeMu9ArVgiBw_@h4H}dJm7;J&*em8(fm+vh>pM}KLscDyBE)s} zEtdl!d>Q+EHseZjAZynENVcza~ng1wD+?O`#K2Nl{Laf^JiJz~9iS={i$bT%@h7Rb6 zLi`%DZZgSBjT|(QbA=eV87u~v*zy*m4 z-aWT_4V?I&_m^{WsvNHd2L5yaSYUo1DoL~l-8ev>m~@V(y$t(efI@ZK1+TxfAxUh# z3+ZWH^l0g3Q*y;}pvj|Hx{Q;`;`}80wAp6ATSPD&Y&@k(XxY)+3?w6$>76i zDCMJNNBkm!?;ld+-+K{|X&^n;j@~|ET>=fx(>cZN>53Ms8E&F=^}`x7w%wJp-%&7G zcS4A9i=^6hRB#=}VK@dLXMp|~jNK52z+Q8pM!A4!C}%e}WF+wF>2&P35dE1MAViM{ zU*CehQr_VO?$GK9JGxxFaP&Vy?Fhfafs?$*&<_8bq1JTIO_`>7G1tqH(3bq0gK!i1 zA2IPXKFVglBJicR1yGN<*ZaC>+1@gH)$vkftBA7uNxjT=w52=ZEwl^DbKMnnISjrU z3MSD0Q0xIgOQ3!?1c*oSA>xWiM)0!Oyep=+h_TY|EeS~dTlAOLq=goFR0kBH zxA(8aH7R^oV9U3`tbB}c{~#H%N$v84f<^veG`*+00=1hg`ezgjcNl)MfM1`3+dA%X zvxDPaJ_Pt@a0R6b`7Z-##PMMbJJM=mc!#Bqvo|j}e*bRi{?PgnT{fRJB$zQ6T0Y3C z#ALSJ!gDB`#xf2+p}Pq259$;>?f+cyQ^Pf*zGK4*ZS&?b#m2vSRvG2$`yaCgFMd); zTm@!emtADLFbBN+;FJ*%)Q>CUIW&VfQzcMWy^Dn(@3DZ+1x(dT8b#ApfvH~ZO!2t9 zA9YDE!N|g;Sol_c20iVN^LCuiAqms2XS?#@^?qMMBLd|7M>)`Ib+|rs?N&jmR9B!p zGo2c>?>UCd;v_+moL<4%%tAk_|A)2jjB0A_)(wJ06o@ndDFFdd5fG4G0v4)BQ9-(b zf`D|8P7tI@X9J2<=^_NA_uf>Bv_R;DUK46)+4sf0_jkYV+;PV^cii)X5&xK#wbp!R zedhDvyR+tW=~e~bM3?I`o2f_0QbQ00?z_z05N~U|TE~7Ov_*~IQ@vbyV^`y<#D4jV z?TfG~S@Ky(<8l|^oILZ}LZ=Ry7XP-;6*z0dT%el?v3e9}{fVG^21(YClyU-uFDA^E z<@#-#ds*4(GH(i4zvODx1W;!wX+E@fitb=jvN@9XQX2N+bFj=%(FT2qIj)*wZ+>OEj}PCbGG$h%S+HG;?pJ@v7vfBV=k z#9p8iyh&%xh!Lw3o{T1`iJ14*eIT572hs$!=gO8`O_6gvm7+w5I2g`murFAc0c*FfJVEoipH7{7OS}=948i~4ti<5$Fw)f!8 zmBSCU7$9C-KDXR<7UiydupoXo_4YccK|Z2{_Kv51-gf|o^db5UcSRfxDJh!r%Q?AY zu>5T~oCR7kCX7eegFYxY!0gBFU))5Li~SWJM=XNj@A^&0=M)vvx=h>~D;{R)z3jjw z{<6_=(N76=`R$$ z!qZ+{tmHUi#ryBv_wqkNAiE6v8rHGD7rIcn|724Czn>(fb|#y!o?7Hy%1{}VbaVWr za$Ri&s+~=?=63rR@>LiOLyw4{Ia9#n#DB&O1WHsdpK-5X`MKDxjvoueL-nZHFAw$6Enl#abZE|SB4B>Mq=g-lcrEZ#7&IGe0QbhwL_DWw2L$rFvPs zX!^XgP`&v|E^?oEI(miWzdV_$ncm?I!{#mKiGs9&ax??k=L6Q-Pk21-=d}CC5rJN^ z*#t9{AoYF@15dZ03DS7mwT?VqHg&5F_#`~}gkd>dD>2EhNttT=M4Ad8a~_ z6iDQ@{h2eU+bZU&E2b-Cr1x`g|6-Bbe0b=$TT2RTRNmL|2CY)1aMGRRhZmNg*A1qZ zGK$=PX91%zZx^cIcB)`?7zl-=gWH$_x6uYH)~CNhh7PB62iFnIF@)K((KhfZ6?zzY zNm8U`+J&p!gWcn;314iCiX?BHOT&2=gLSTsL+(VEmPWLA6y`@oX0!~DPnWu=G-s~_ zaNvjj0L*v7+oA7eZxVTB=-E9^xuRv>SP^SvF zy=I+j4{)jgO-6YSYA8JfxOa$`Y@W5ac)Z6u%nOlnyfvkjwy?)~uR@Cak`(ogBzQ?m z;J3V3+{SM&_d~`uE%q zwFc+)9-g~RvWLgDM}ppSjho$?5b`BcZ7PQ#i^t;0+VSXWiU&HbVr}MgJTH-!Wr%DJ z#xIdSh5KXG1lK(^)}i@_RlQEjxLN~b6-Z!IU@|JG!~)XSz<6XOxPVLcN&T%SZR znDsVaoQ_9*F`Vpj@XTp*F5gxQe7Q9*&>x4~B@0fu-lYw4JY!=O#6B z-P9E8Yhr&^Ru}tO8bEB#!-OPQsZ|O9OK<=lO1uZB0U<cZA(3ZD^bFAU zbs|%b(6v)q4J%h@mj@l{%|I_U6ydXc_0^9K&Y#Me@6cRkwODX!1g&cJ$RioI#1-xY z?b_nvu7gnWNb_~U@9b|H z+!tI&X|cOG;%j4>>(VYq6B8xpP3toY9xh{WfoEKMdM^<6u z8mZWS-LI^M|BEVB%>-bYdUfyO#{Qv+2PZp2$5(FRG~fNVKT?E#|7(|SvBd{N{4opt z^QtN=J6ySPt?UXbFj4I0k@!+Tgv?#H`bm9VcYK0Q{VzEdYw<(FO>5W7SGe=`|G1qS zrj(B9i0)jGM1_yUbH~P;$-~tNPSen!(gM@Ty-m$ek0Qt?`LpkzTP$C5f$S}LT;7o5 zAn>Z<>!bJhwa&}DC}lif<@R)!I|6RX!y^lK+vu4hG=-wtC}`kB=|{mwHwG+YRqzS*ZqhQxIr3Z78vHta9+)hCG9EhQ1w-5J5Aub$dm>U9lcOT7>HpJ7(3x!?OmmveUQ? zJ|F|-qouEN?339m$tvA(B-srRcl;CL=Hs%>xGs{vLPr?1q#ds+stA4LVJ+U3 zd$-6ev0~ksOS<`eEA_G$tPjZ1QiUPX zK#ZHWDn?D3>p~M9Yw-JnPkV=F9!|4-bHZJH!)qED{L9?_Gw?lS9?rWEMcn6gh)W9~o(`c8xcC#q3H!!en!B)EZH> zYC^?oUSC7FP+%ps*-~R&pueJ4^Da^O-Z$3b`&`VjD*G}i3%-)?dq;ckCN5ZV3=7)K zy55YwY1KrZ!pIvNsqgdYNNPz8-Pi5%#n|{xctSw*V~HJZN50XeG-_m<4{iUk(o9a@ z=6aw0*)wdR(9!Q^J9$V$6)!# z3<-WZZqSoVqs@?5kyuhj{c(tcVOQnhPuy6hBm8G-&GBE?jl%S# z;cq^i(R<2%uIZTi+#IQK1=E)#f}H5g%pTnf;e|E&yR1xR0fOq71V@a_Z}E2bl*^4~ z`(Ym;4d4D(&;I{&vPLp%Qs!(s$@=T#<2bdPoAS$B4kqHd<37R(3PCLi z7Hu|XmLFu9Z+A68i#oUu%~F9Fe3=)7INxw`SOd5WAlr7nS~bTyzf}2d-4zidx0gv^ zMufnVZHR+3)yH@J?u2o9p7VWe-zu^Q{{RHXjGS;Yg_~vsADgzWaH{4n-wQSA{A3y_ zm}SZ_i(t{V&J($o1;$!-*4k^P zspj`5B1CE=dJA5kOs-v};HKT4>&?(=f~QCNyA`9aCwWl~L% zt6?lU;Mp}&tkfaj?6`2`=zdG^F9|oZ;>pURhDwgb0chwB3nCzeqPRA21KqN>eETLF zbMgo|8}$VmqbKXhbthlrE26pkRI`l5m?=RNjoAHlEFQjq2lFl+QFAWsa#MPXw^w42 z5D^n?WOEvh@#F%4d++aQ5O0*qAf{m#2)2aDrNnz;2|ScOrX|G~hbVEnh~!+-+|R|w z_h;34TP|B$UEW>M!vvmV^QYBF)1*N7%9Z^@HZK#vT;whUB$;ZF#4EnihX+vXnr$kB z=;zMw-m26_cqz7Er))_Hz_Y^GW?2)j*fdq4J*85?@cAr zT2Bd*)zB9KyC-7O9lkUyBEnJB!oufzAMTLg8N4}7Q}(SkL@hn%7P5}o$b=Z0@d_PAY$Y+;!F zHhxSdEcCLx=b;1JqzHjh6WXg&+$GxT&>Xh0b_VQAdP7k4)}3C=PB>>mOt+Wt@caBr zS4O@W3U>Q93PY-PHdB&j>qHEOoll?-6SRgl`uqE75~BZvM6s#J`jVv#l4Jp0XIynX zJBTLD2plaa%YzG2&%155`0%HAB4Jk16TN3B*kvPGYeDlmc)c zKwxT^C?0e9nV+N{t1!lKT7ItI8sz-303nBZC2k;RGzd^)zzAYNm^s?{_Hyt;D%2C3wM!v)saBb+e{D+;XE8=k4p0U!IXh7v)4 zxgR;bv+qZ~|8Sf*${YsbT5JDu5g}d+BF%{I9aEs2r?r7Dh3tG(dTtve9v#{(w4=@7krW7 zG?vtsey;~C=St1Xak()*!1o02xh`>4hg~YK*4l19ssG}Mau|S;xd8Z1^X1`K*YuSN zDccO1&;`t07F4A~=PtvY`skCaW0t*>s|ecBD+j{}{!Zo}?aGj8(irI&`7`yle^Fik zlY##~WZnz4kFY2zH`Q4dZ{(BEe#^mXMVdLAjIgM*C|QxP?TA2MDSbZhqyh$T`@}#_ zaNNO!56^&pbqGLoISP4}x?1BHLxH9F$f%Zac7xJ%40&U=gBh>q?H!@0=}Da#4Dkz4j(JC^^H`7Fbw^t-SIGYcpD|AU+Ma?2dvJLYXM}EQ z$Frs3n#$B4$CS_a&&a)YCReDCOWqzx-03z`E=J#8c0SP=5?Fu9^ddI>Sc&5Zi2NzL zpzaVzSP*UD3~|iy2-y#BlQX)aUJ$u;`;fStW6)r{GY8*4Gyzn3fCPIAMtHK|O{yN+ zodZ$+{IwsL8fkGTxb}GM+TcPeh`F}w<+Mcs4E12 zmiyR-i#J2RPhrHNl(+0(n8L8b^_2v7M8G_54T*bq8>*)C>=J)tP>+GV-6w?cNC#8l zqgkGg=IB}4dQq|PFJD!eUVr=`C^0ebM{>9w_NEQum1(^KR-fu6xd<<9xfRvX!bRgk zm9)2%%A63E1d@|6yQ6J)BE!1AHAdw*S)$XO;V|Fkz9`Kcn@TJ_$!jh7&%QFTd!NIn zX4cd1nN#E`_B^(DwKdNyH+rSsh+$tNgr`5FZ_~i!R$<~5#48c+iR6>*gW!JRXm09=1HMcq7sK zVUb}8u!0KsT#~Eqns$0s{V^E-?WJkm-hn^aiDbK{c}?m?H>4V#J>|7a{qbm|wsKZ+<5guG#eUOq{%FHr{P3qEQPoyM9>Y_sY8_wr&YcK5Y1V zeLql^4o0<*ccyyEP_`!KWj#19_Y?$vXAQBx2W-bz0z7XYNyf(NV&|8%#>Lu7lrm+k z9@}kRllWSJU*EIXsE0u`MNfDbCtGHYG@0d$W!05clZD=1N_+DsWOWzCj}M<}%9u5n zA}F_gT6v3id}3jj1i9r^Tp?GvDA_BS2Kx~1{RtO9iKLQ|E6jSTU#P&kE81wepL#}z zIF5OL|ATT3WwenERf_Oa8&bJi54>^r;@Dk_J^!0pm<|y2MC{Myk~^jNnl(JoJo{G7 zkOH8m4xZ-W&x6e8VachVK;9*!PIn`M4w@r z8hyf>`-;~h9hr+8xs@X4tQV_~{5$B}JmA#hsf?nP{lV-OsY!HWOpC7Bk|^FI-MCPUS-V_3;Vs*?0%CcUPxPLACv9SX z)-#^s(C&~jwDdoiVI#_%z7d@6L#9Q?=8%nii!iH0*S+Llo4W{#5u>MLGWe%G{p0k^ zLg|LKTmyr9s1JLHeT{i}w#`PJmxSW<03O%djYp9@tJZrd=D^d2pWl$yX}?L`vfz<* zB>QTAe~R|C-THcRf%EjHlfbQ9cJcenvRz=)oor9E>PT~`0#7JJ>m5n6$$2_g=yh-? zXN5dkj&c-1L)oYxZdI$sQ%BcCkanxqu@!5-l8}x0t>vfJH$FJxf0Y_Z^F`~s{=B+Z(rMORLsk>7a!e{h6TrR@KQY&P(uBT+r7%S z5IO3pILM8EIb6h_^i;uiuUo`kru{~fQeIfNBexzMWws) z7f7z(eElw;G)jp~x6h>P{kP@nGk)@x=AFAUe9gzQ878aMb%md@##9_!?2pXu!$Y_M zwpeAF+Ik6rLEag3Q72WeEh@K68<^A+P~9@qxe&-T;2#SxZLd)Bg!5~u3_I#U%p#;4 zG6{tje%NiKey`)(a|c4w_1tX!3#Y1Dv!RNgC;rj zNSw(ch~VN^n-ViI?G>`6=x~})vUl+`{^6}LP#Lo@oVv&QZ%IsfwC`&V`Op%$9{mY1O2ae(n86PKdsvc4gN5g!GG#Q5q?UCBwg{eD zF^$n%3O7Dp^f(^vcIyp0CIKs6D#;ca&gJ}N0I%)cXEt^^CRpE#k+V`sGh&>>^|v zoa465hl$I_`77{Ilv2Zh7ljMYZjF3mKvT*T^6Y?V?rWf0y#m5S!bDh@nvl6!Oh;9= za{<4dVnbh-oH{39j92pc4ZS%xtsJoKOl`iBQdqJu5YKj#4 zCl3>=jU+t@Pu_JWI?fwSgQwqrG{u0c2)b=_*J$|l+U*)W0Ri5qvrkNZ#*UD5@xC>2 z?js2m0(@Otsp~fkh?iajKaJw&Y~1M-ogV_d{?eY1+%_!3a3Yv?muzrG(0_2O8D!1t zxVxNEDHk^=?ZH{(iZw?mXCWb=x+%ILy-}+cPopzv?`eoRh#QjdpiAs z>(1-ek18wN3IrbrHE}sC&IsH$m_d(#nr2&ptg7JXme!57 zr|^f9tuz1{e%(aZr+cFt*F*IuWPA4(8-YysyMY0?x-L5P47fQ3g1fbKzJX|2OBZ*u z_)(Jq*^4)F9e|kcw?riD%1Is*s-;H8BKhGh?~XWruUfv9O^k?A!>|uKY4(_Gi=Hw6 z;FqLmCKdW3HhiT8+c%H%2M_p=&Y(Z`%j>qeNw5zqrX%^`h{MuLL-g1ad`>ePqvntg z=c~Z>p@mBcQR6+0&&|SFMTBH4q3k&j7u2_0l2?s8nV)?b=>y~a^*wr%Y~4lvm&C=!?`;0<^sgz9Oqj zC+?uJW-LD5RNwU)XfoPfbRy{X^ciS4*he+d{kZv+hK5{CNCE=WKSb)`3X2hI0P@L@ zQJ)*{K=)eGROd;w@9t~T4sAEkk-a%s9@gc;0?ea{jFqNa{JXvKOD^`IteFCsHhTAy zx3o%`=ckWPpFg&TI%>#69bb`-HCQV>x1K4Zyi;O0=YePQ+BZ@08BAo)$Xz;onEujg z08opGV~iw)0x3$Pm$#Bq{PC!y^N4+etu(M9zvpdk^4{Z$pZs$P#QyAE6JzvuL`ily z(iMNBSF57@P02vK&Z(oHp!*zl7!N{p*AkVt;eSjx{(ai<|Mc&)U-u!7LmOhtJnMDC($h-N3j%(rbtn?kLcG5YBh~{4)yKEU1%zUc zB?(!%HvWcPvYcfAxtyjf4`8H0Uq4Kf;vah@V-_!;$-{?%_AsggPy%=FV zlVIH{e)&mu^409&DeivmW>FOq&e%A~4yV;?5UwpKp5xqW!{sRZ|@jM^U^d9}sh&5GU_=z!TA~d-k7< z`o?Ae1a|4;blpqm%wliVbw=lvm;=)@@;4}MI4Vo-G3}xrq9T1{0WrQF8aWjNsCSg) z*+DT(#cv62xfK9YFCz${y9kIIS{+{=SRIk1Dm7#AHbS3~xGcfEC%Zap1q4?ggGr+` zn3et|xO%3i=IEZ~WuF2E>l{nr6&<|TP?dr2d(I8{8*V$YE14#xaqqvs=MCJTp=c4u zG+|^NDiROSWdN~M?`7~gk^~hbeWulWE%i1Vh!c&YSGZ-+C=s42;IgN7wZ#ZkzF1PP~nh zKi=XiVc70nOZXGQdCHQ6mPRh}c*FjdxByZY&@6heS?d4ap52};xO%8As8ccsFmwi% zQV&x%`N-kl>(>SfFRe6F4Ea-V>5NOUh7zvUr-8L|{pyZ@Pxq=_j*LJ0z7I2}8(8%p z1cm(_!-OIB?UG20 z$YUZUFdlNkL%5ubRl@fYqhiu)%CkZd-z8QY9X)4ji2k7v<3DsS;2+J(qFRom^>|7= z$MV~sQ*!Q63W(E=$O=nzh{ob-W>0b=TvVL9N*4y~bO3k0`SvAExjkwUX#Fj>-rFQA z^jc5Kq>sFX+pWh&$#ig>u1+K8Atjm4@>u|eWI^s*wEP+WL2Ky-sJ>~p?!9K3{nv`vFD-%%^Tqz4oS#c)^{qAZ&SBa zoneRbyu@&ijcX9lh!Ko%o;qX6=b3ApXc=3hIIAi9>V@mk(eHRmIIiY1^@u5^TYw-cW^BZruV}H^;qiwvHK}M3~%DjUD zvafG(q(s%>-?ZTOQ~-r6o^Q53Zdxt(YYK(vg~b+`b7L~4T${8CtII?NaDH13dh3Sc`bX$ z6-c)`*9}v{eWWefS@2|sM%f<)vWHtUzkO{M)g$4IqGZKZ?P5U->6`8$Xu|#psric= z22hS0gI;4%GMX4}1=&j_SQ;P>FIj-lR(uM`E+Uwkang*mWAy;ct ziI;VHi2DuHZNaPEa|Ih0x~8>3ui z>)6MdgId2C3+`zKt1n*ToWo(Qy*?|og&jJu~aNEB0(U5GRF>c+2KmwI&k`%Cb!SWj1lDkY|0HO1v!!iCRhqg zP{lo$^!7kZ`QD0se{|Tkzpt?5KI%5Gxj#z&wmCom@%f|xi5+V}YywQ>wfsd4OPJT08)@` zE3``3t$&7+IdZuUCW1yW$y4}!6{ZCaSwnd zXW>|o&g%9eF4xn!#XiMnwO6DWP$^Q$^C^G67hW*>L>KA=Q6S}rw3lz>{Y+rP>uTYB za$GX4i@A)xn!8y9oGEPb{oaD+<=7tF>5yx$4TT&)Pt5{;$T$YfA`Qfb)k!(18>1%9 zBrqpZC;IHfQAp-n$*o(r=!+!GRwJOM9_Wl;^6;yxsia=k7I_*rIZv~5ESXP+2A!0c zqBZV3`Eb!&iG%%JgjJHsSuZ5l7dGe*%0@fuTzlCb*2GD4J;E*C$aKDxr=q-wKiDkg z;s(7#yyS`pHpzln^dUSlMO5!ewkI=<^3Bg9lo43jTvCiiOHC~96@kNwhNXe)RprPV zyKS#=vqF)r#et+ZC{m=~)Wu7R3TccT_zStWkm9IxP!l`{R^AMf=6B(-g1QhKN^JT$gnA*?~~ zC%U5i#8G7piqe)s4~uRy2Gt`75wgY;yF(RKMH3;|K#s`L{YNPoh0<77-GS26(@Zkc zz+TT}J$VgnT_d7 z?$qYJ&vP4m^P9EImR~NKyOF!w$({N$g^XG{{mb@kHaq_=M?bp6TgjyokzMYfB97% zz#-tJ;W`7S#jJ{vddiQuy?>?}6sYZ!jI+0QXnUsJ2OXD#;dK&bCoaosD5d@kX`Etl z;O3t&aR z^3+wRG~bdm`c5sm8fkp;ic|+kWkPg&>-l)wQ%mm$Ms0J)dzf7xQB8xOy6J<>&3Q+m(j{2nZ#$VuSyLIL1z8P~PDI!ZBd>wFg5!L5cO{iV2l8_AxeJXvkMz`=xE{(?c zt>IMbuRkG|R$Ei|C_zd7%SIkf`wOg<{vQ~~s+IrDcl)4YrKj+K?S^)ZcRp8#YAs#9 zE{QGYDda}2y}xp=PdL|*ZH+)cwMOo-&zr;Q>)vpD9T{x0BM3jf0CpEb8=ihC$n6NmqVoR#ssna7&Q z6rkt#k$r{j3&o0k>$GblFQ&ex{rY`~6Y&i0IOP&9BKh+VdjaEq;Q|h$w3xO^_N3Wr zl70XdMF|jgr`aC0V`Se|i?r|SJ1O5UIj(osW}4rN%dt>=Koxe!mxVj^8A%t{lGHWQW*_NeKJ>BD z?77`u*k_ZETF+y~JlgT=tYS}nThQsO_U>H31*x5)hSR-(4?2yy-u{3-0|DBy07IjL zdl{JFTKLXyQBx?lOo!U^F~8qW3d7`yzdTV-{S_}}ymS4n!NO7l#@z%Xk;8*TeOOrW?q|EX7@g#(UVZqcgM_^orw{=iaWqO;s=frFk@Bx;ceON?s=F!dJi3sdi<-#>ZwGKBM-kvHj^G?*3P*Yw!bWx}mxjA3!s zMCqyvtulm;L5D@;m^*NKr?aV15)K+F&7gV)`3uMd8!HhY9qU!AS65OiZ3toJst&lS zIU)a&QsiK$3CRCh5NvSPZRNz<1A@ChZuhV49AIk%wmLYYGRV)RGi@o)B|(JUW#HMb zMCakGKG%%gc-N}UBhMr&&XY|#`wA<53T~H<^ zB_S`@?K^Q>32pkwoz;Bd`iy4Yyx*6{5v`Y?GK;(tjh)b<)1g=wqlfU$_&HSUZsglZ z(c;04g3=R7QR!D>`?^}av=ofF(t2;9#cUDw%5x_cDiN>; z0cCe7&ph!-;q;5Dn5Xj?kx{J>-YxdbN0A-9tYY$sZNP?eo_DP3r(7nS%rfl;Q`fPI z$8)d8NL)FG9cN!03p?zCW_h{esB^I1SDMusu+{~VnO7z?5_(`25~lX&#`<=9wIaL_ zez47d=5^)&|xdW&Qimv7Gge{z$8noXmVntoS^{RE1X-0_BcO zTjIL?pWV2B;qU+H!2I_=p+Ur91kk=j{;hq9HF@Rr0LPXZ>C9>7A~H5E-)>-;kjmfL z!S^c&g}lQDm^@|@3qfD;U*e_mth|kg13nMNqXFbn2%p{W1>souiT^BU9H!I>&O`!1 zd2(=zV^V=xCmBxXqpi5^(zdr={Hgd5TRQ!QS%xmmn1I41qB|0FFz*;UGzQbP=sG)J zs9a%g3i~}kQ13kl zC4RX0iT^yf-agUlmoG0eUJJh{{z5paf1pIm4f*-0!CWOJ>E-S4o4-MJLGzsWY} zKw%EZHXa>cCpcMv0HKkrGY2%f;Zx%mwN(Ayd?|Dzm35coO?pO+Xa}!;eHZjXjI62^ z3rW>#R8q?^*8niKoo8e2!m#jFaEA_r|><9XTceOs>2^fOm<- z25{}fS7@l_=ZrSxJT(~A>-cuzRXo!lF>gNmLb~Rr&zE3)Z`Ff7?0<8YIp$o7J2sSC zV58}YlD4{cca8zZ&9!AXK3urXGRJ`Xj0y|n#x=tj?*K1_ER%6dspP_RGu{Lz>nq)K zaZu~LP6-+I`?iGMj!6!D_z#t@CX)V=YEOyEk75+*UASZ)YSckIEh2p6mNDv!BgdjP z>|ZozRVKc-B}LuAZaPD`{HR3koP6GSAHwYFHD&n^n(!b<x2hTbGTfrxq`_;KUns zPBR^E*N!jD>*lNn9O5> zi0?&X!7oC)f?p)tKqLXVFQai=nj5d;FBuLZv!CS4f%%Zx4=S7!0{9?qzgrsRLVybs zFU#*a1eEE)v9OW3muFXbV9oQdWAPB$pE>X+#C#&5s62g^MzrYJiwHi`u$Dd{v&iqh zvI$M8TUVC=d|Y!!M;B%L{8txg=dIbwCgn}TZWx`kwBLK_VWk5Od?}~#u zQ-2A<mei8WOZufEs~(*VC3mX|X5Y0M?wzfmg%;lO zK9>e_X5F%U1q!Ese!&0K=fUD$BtE+?n-GJ(+rK$qR2ng1t5=Ym5k2vqQiL=K?3=K; z_c9$`uGc%sMa*RY<9Y@tqr}swVMICvJqU7Lphfaqf!s54Z4AqAZ73BHoj)nMCRQ9W z8*k2kYkcH}VN&$C1-A$GOugeefK>4!JKGA8e~m7^*`+jJrS*|3g}N~BASqWc$=}+y zNAgY#yVdyv2EgyORn1bZB$9U=g%tLpnz<3ki~#e9wo`BPZtt@GAnpEe@Y_4{hI}! z6HGe(A`wB%h~RZkD62b&P_@%_ia{iqjC!71NfU+lSLK-T7W+M-X<`4&ohXBH_w~2y zCh&#&oZmz3hQEi}zOtrann1SIzdsbF3TYyMO!5jW7=&p?7Z9cs>*1)%--hNti}=UT zmi{C7{Jo1w`luywFMX!Ym`y{u@|8gPQNbY1IL$a*^)P=XvW=g&y2v zAsC@d54p_O!3PVbe)r`%DaeqGLH%{&Ek0L&)#^R7b(UVTyVAIQ(H_58!a3(Gs{}f& zO+F%C&l8M7u`NWF3GyTrwNJ6F2@w0oNEevT`oP;LFVk|F=h-S5ZaI$!UfQo)J8{4K z13I!d&Mzhi=}ID0^Q~XFy8Va=W#(p;oX`gf5=hFl)})T z$Zs0h;~K!g^@cP+ro!>zLyc_6dc%uU!U10JD1NF3;I9DL%JwYmh+-VLjo~yh;J^mj z5Ie%F2m)LXpx{g%dtJg`&6sUCGqxht!ku8M6p;CNtXANb@^Wf6w9L!ZM3>l&+e9&{fIaEq5ONK8 zyVCx;?B7WMrPh7WzHr-L=bD|cXpwrDVEK81MPvQy2ld=Deiq@h z4UYZQl>hM6dv=(yf6c<{z0zQ21X64&jXItjtz0)GAh=e>5dG4iN?$vR%T?#j!^iXX zo)_Jjo8ao`zf>G1E)XXXJ1P2yhyP<43E#wo!KlFxCwCcp1=@rd6{Z`XBQj(AbOeui z>p^QKJ^wyxJ)-qY`EGj?HVelkhL2 zR&o!gMAj-U2>e`RDUS`CCMs41;Z99N+OuW+MIZr8(lB@$S>w{Sisj7 z%e>Z)5aSxqHgz7(IILc4aVJACb^Rl;ffeC2?BMyalU&;zB)frrZDHU27h%!y(vM4i zOU{by(360fN^=8&AH-$0R3bG<5-#tcf)W{nZp%{9HD8^yX4Xqx7gGIpki*T|DYtj7 zqg=>@UboukySOVHQ{A1Oa@Uqqh0GvgqOHh)P2_DTWMB4CE9TOB`y#8up48`jg}V_8 z^IWD?TpYAuwea{x&N1&K?DVy-&{PO`nNa2lXm`xC2~Oe)D8@Rw>W%u8FFqwP$|;9&_1D|+WIFzA3z6hbig`u4Y{S(EKZhve_i4!=nH+b| zpC3zB=bbBJ)cTLt@MwH z8-FOKnB2PR5gpysuINosI0;tGPLpEnu_X@M;v-V)x2ad=kA#uB4X(m4e%K=9I8*yq4E1mvL!gprrvI8XHXdY)l{=e^ zZmy4N)zrm}2Lx@odq&b~RM?7y?Z2oj>$|#Y>KUk=TDn1N)?AF@`P_^=?w$Wd-Ft>L zwRL^JL6Cs-rt~17AXVu-pwdJH3q@%vT|fi`0t7-4PcdO4VRqmshU0~FPBg)-L$;f z&c$YP-HVAUB%f^2XT zwP}RX*`$muIGFr7lAZi^4!9SZg{IhfM2`Z@x9=e*{lK`nmWr#?Y|oVnq}W>Q8oc$#QIu#$=!Y*P_fsSH%yg1cke*_M_vMXl912` zX=0bM3EPqIIEhl*;z&#NxveN!+33Z69UiI6Q5g>`Z*KDXY#{wH?QYWFXNZVKMZszF zPf_T4b>2EdXbGx;jC84@@ZXoZI-(hx{5q%6F}t4ibbO(JPT(okZU3{W0+Q*b%n`h3st2GnCtB=&&kI@xG?-W(F@|QT@jGMctzz2K)p7JI| zPUZfp!g+|U2n8ZS6_vidWkN}f%tN)ELNMSPJ><_7s z=Gsl~aGB9|aanQH^)D5%T+`@vg>iQ&DQb!*8Xq$^YeUb!J{!_hsZy*{&%}Jl|D$ON zLN=FZ#cE;R?c*Wd3<5x3k#~I8yxnh`n($P}(a6>a&5Eu|;XB>T*TBsrD(}KnuxJ;2 z`Z9ujF3o8IwKTVBW<~jUT{3`U(ff=?yGjAgiLFa^e9C0)dk_;Rbzc8W0yi!o^i%!E zk-ME@dkgN=zqVQBt%H?f-cNj^O!yNp6fiH)TsW@^dl&FhC@a?aHOlrQF`rf!Iyd1P zOzB4K@-sOJ#$loLs6+PETAT+Ak7YW#ZfUN!$*@{%AfR!w#7?2Z*6{PeXJvbG*`6Ydu8ICls5w~f+ME17BszJ!hxB(4 z=YcycSne@?5D0k>Snl90^k=>Q*_x}Bwn>HQ)@<3&F4eo93XK+k2wVOzL^H)lH!{@Z$@@T9& zb%Ub+KXDoR5j4P=reUF$r*Y`qLUM0;AE!pVhBeSFN{vvyC~y*?{JYho=X zvZ@d&#G8fH2VKI2rrJ{o2IFsKRh!YBg&FQf35qsVW_GXK;uEx1?Qz}T+~K%DUtm?D)*sRHjC5`LTx;@>%KA+Ou_luOirK~Ug^ z>C2|gi}#ncydT35=!%UuL(PbV)e28H`C!R+sdoiqCh%F;e&}cl=d1IC>B4VYwkZA6 zdZA>9e(J(-ujjEi;l}4^RjvJ06Y9BHd7}mE%@rThYE~s=i^LzpYk)bqLFD3|8tAL2V;+o;D&VmZvU15lPa432VcCBM|bXy zjUmkRzJ@^)M4diJRYpBK`9vBpcJ=+g&1Ntz25|QXRc@Y*&L6+V&O`nhe-vZ@RU?=r zg0(n+2bG4)-o#rjAa#RmPY!Q&%`eO(r^ql#w>~SmoG91J z&E|q_^_d|vnFvLl;hVR|y%wFd9`=O>&^usINJd5PB-}h0&HyV~?d8`c) z?DteenZIkFY_y5}4wC%8E;anmrGnf|O$3L1A@dL^5kGQ;hI^Q5=zJtPn>CJXCB9Xy zB9kGBC^S4ajJ&)_;r*mU{2XDM@4+BX#|7SI9oY`-AdFFspP<%PeR)WU_%gL~t@?pr zyx8;`&33iB_Iz{p3f#1Zb0*+$Ljxpn$JF4keNt|cU<7eM3ig(s!!r~Nm%a&}KNS7q z!eK7+cAD(cu8K)#42GuoTDHZFQ)nvwnqR%8YeL37{YL$+4-*Z3nsrf}j3P^{I{M1v z(N~>jiZ8&GmMUkIAY5F3ZFBke^z*r0Anuul!EZ^c!xZ1ibmeg-+bMuiJ=wkeU>Bhx z@FsHf;fR=-G$@a_qiQrHOA$a}lC%hpi2-bM*Ctnq+!ks#`PV94q$rr=?Z%=^tp`r+ z`g=6pvkZS%Q}ofIl_%k&ZIbE5L;=3@YMZJ>>Rha)b5oS;1VBFL#v4XwbuKNY+Mz=24 zPC%Hc*03SiGAG2sOl=t9%ty@!xuu=W{U7!tdX0XN*nRb?;pXmcE$YgZqHQ3qy{qN| zzuC9@7#|A+DkhnD?PgA^$fwVD#W|<+SYlt(%QsDqKS0w#TzbJ+wn9eqP|BG}n%y?B z8Asbo0&6!jl5DlC@gj-9pmuWi8Icax$Xy#Q6K36h{<|314agp7GpA8{*V~U8_SQ7r zZpzPMWH`i{aH5M=BcB?UXRr1az73j~7;(JXC)^hMM8D#23Y+&6Z>`b4p1j<6wcq72 zi?UtVkc?+reIr?XE1O9xFMkg~{;j^-j3WP$SYS@vBKMfvcM+f%o&|740nF9o46#Cq z8sVjcdX{SAcX#bKG+o`s-2vEz;IFN&eoh=Pl7IExb z!Wq50&u+F};8woyQ2p>REZs-DDNi|#fp?|Xq5)TBr7ui-Dal(bXYo#mmA$Fy-KDZz zr<{BBj*nlDvNAo>Y;Uh4Cr{1!xX7uN>;7dqRX=p+dm2T`>2?9KgeNrQK?#UVhS0nv z*JmSY??J+U#;o{Au-36QVs%ujkxtm*@&~n?h@INMXo*AM$^WJ$V#JEf)cdZrvtGJj zXQyCvb0G1jA+-LMR&SBXt$g}Z=*+%mmQMaw<}=$r)S>7du;~g)l>r$z`4EtS z$D*Yu0U7w{b&%ppg7l3(H4&sJvA^u}1UDzL=Hc(H#P|l+ob&=_D6P($H`U>oPdNK- z)Z*#`G!45zg4}YxfT8xhsLjsD%-2HEHZPKT>ufz0jc@majNSQPOW4NM#6PDP;mR;M zW^w1Mi&Mv`mnvpO;J2-4(9~AGkZb=tO@seOCCy zwFf$K2c!$EH_e4&)Wi_3^{FqtI77xP%!`u|Yewcx%C7C~LZ9ajXQKkWpSB+5EIPHL zzz!zuv%hz+Xb1f2FEw(#wM%$*?#f&0Z4+0UfKoy)mOI}k~JsNFI<6|nw)n1YE1)34>Py#+rG8}nukrv9T+Kd|0=)n7P`587M0?^%kTkcqja zW+I{pIkKxp*EHha&Kf`wUdnz3Y-X*wDzV>OD5#Z%pbH{RWLU`VLX5Pz5JBAFS?wRN zCaB}zAv?4B-d@GcX+N5U#s~TQeOwf!?Sv`A2VgK3kGxh=QM5^{WgF9HDJ5(LHh%h& zd`m4e#LAeUun7-W(R+JVf$2{Yq1_cIeHbyRH7s*B{)zK7U7-_Mo(M2_L?IZ2uhnM7f?;F~cT1Sim*d?wv6 z2^iUve<8BBJLs~jI;FENUd>Mb8G6T13OM>?Qh&r;s65WB=5h59)1%B)0;-{2^)$iy zqmC3Pbc}pW@4@C#JvJrYtoV^!gE7r~_o-(F?pIi1Gr?$0i81`Sie5GXbtTahUxl_D*mQNzG7Rk9W=dV?d< zd5&e?6ULYQKb%alb9?g`%2D?Tg$u=-5JS^R0So8_K)~vT+a4*WxsqGzsYr5r%YV?h z8z%oz^A`9`1ks<5Xq+4hGGk~+EOf&coDzdZ}t6#=KQkR)dNdL=c=2=8p z(8I~7S!MMuDU?z#&!3{ke;7ypubLSDlkbr?!JQZm&OC)59)VYzIwoyp88af(;9L8G zAnkA{UQwCQV3#y~!^TrI_9cpGj~iWw?S|n3@vVS=1q!i{bHDGY)07Y)=^M@QQti&w zr1B#JP?rB&N@5Z~fgo@Cj39O!4?ImPkn6ueza=}S&lFcFby8hja2t#+cMFnZjx_OH zR)Z?KP6bO!$dTwTb!CNRMI{MZ-%WqT(N2ZbbGwB^MFk?xi;mXXfc_M_XYJuGi1Al2 z(%{z8w7_;mR#5mq_|nlMdAz6v?r2%5tkiDk(s;|~X4<;^Nl}}k%Tmw}K5DP(GZ`32 zogo1*&jVi-@&Hzxsh@S`zX{CPH)7bX%%)rVxp3|gNA~8;zS@@r{kjaSxCP0>IO?JO z0(`+fv3T`rP))yFRtnxR%wWUIkOAEz6)5jcAVDe0d;|CgzcJ|=&s*LI=Y_I)H&YXgPn!Y~8$A5O zBIl!44^(!m%e+=WkYvi~A?i0a`y8#j1ViZeXU6-D8wbL?K-Wr|sgJprNMwYug9dP3 z0cwi2mZ1L>=mi3}5uZ@pjx1viFCr$ggfkuB@?#fAW&^P`~ECsjg8MFod z@292CQ@#r1JLh-}j!1o#x91)p{ebwY6oaZ|AU=<&0v<5kbrXDj)w&gh%S05H6=W>shn4>YOrMO{L0NQ14cZYUFTo6++;iT)$kfx zN|{lMUIRkjva-Z5tW4A#cO};Jlh!-wg8s|$vAP=2t;RA~Ej}HYGgLtAlz1!hBTnsV z{6}`K)`z~cA2&jpA->sv9mJ@d8sJWn2T+7Jv9cTVVzd34eI(b|rs=GkIoU#6!pSQA zWwU+mgY9#_`V$z~G*yUzvtT6H&o^K*(G zZa(G1kHxLwkt>f^UyuBF&Ki#%GtP#vZF#p!LJX$iO!~R^v|zfmNoc9st>*4?3;^#o zQ&aSbvn$cMz;WC3QF6P7%f&{4KvX&ec~;X;J6a90V(k4ze_AWn@Qe3(Wv@8FqF`U< zrW24^#r7SH>Q}tqEY*2LMk8m1jlwyxaje=8(&ovew~JV;oj$P{dwi-AOC(wIHT9vu z9flpUdHw~Y%%t~?SVaBTpXmkL_)HXP(daL>Z=m9`C|KcOfsj_L)t~sHP2jV<1Xila zJy5czMuA8UtWU#+ou6vs|mcSNZJp+YDsJ4<^*1YL-j6$t9S z?4jK#8Fb-;rFJf&`R&(u#hCBX6<6GX7v8q2(8e6hd}~qY6CP3+5D~63ya>sJlGe6K zD~ty|mj`a-?g;#tHEB;3S-;sh4MMlg=GsK=gKUdO#+5NxtuLY3my(Qb6|&lrsDvT) z3JQ=dGOKIE??6Z;xD5U_5m2*CLjPlJE`SiNDdX>&Wh61Lysq)%O^E| zev+praSD~EfD@3g_d)?1b_GsLc#yU1Ehp4?7&;mElVxP2x=2SU%oq|(MbN%X*7gag z`x&aO-K4If*#{e|oKLQf-+_4vK>QMe)M185AUjLAnF-?M1^A8=_KA{-G;Z>86%HG; zQ^0@x-YL>FqRm>7L(GgrVd;&D{$I5oJ204~w+Fi23F-33zKvkw)WxrSjbRGTxU43^ zIiHlH|8ZnYD=w*2D?w*idy|g?vL_&brHDGP0;}{QX_cD&y-Ig8hDC{LD%ViMr9^jZ z&%GU|=Ip;97hObwz6CUkmQhgRxz^cH<>!T& zdM>xH3&{#WTi;m%zuy3~)i*~bZKZ4ff)oQ`G)ZU;$}K=4+`B_M@7?zWf3p1`Xl{Cz z2Up8VU_3aILY_a2LUD&HP#iunn6|hCY$E_tC=L9r2UN8?XEN!bM9wQss2$2a)Rqva zELlJsfgVo&-axQ&fUD1Ky^Pe%b9(R|0rj#Dj6_j6e9D z4&Y0UypK;tlNXmq3{g}LtOp?8$HBx6f$s|YA%xyzjbv3#2b(cCBww<63EVt?XB>^- z!(&0JvFJIYz6~s?kudycq??at&ZDx=BS58`&K@Y&&OgPQH65f@9mX5>&CsL+_Jx7) z@&!;CN)n$X?W+FPJF|k%ZH+ItD#dk&V`S5Qc?I{%U(`rwien8BQ^%YD(wQc%m`<49 zC|Y_Rti;P06Z|d!r=);u@`4@1c#!_;yxY?oBj<@Oc0;Ou*NYpL>xO=Lm} zD)CXX*}mb;aM&n<=89f&_}ZCr+k~O>P$xF>bI(_$RJ$MH=V7~%z~)P64G6CIHb8eA z3v?C?ONl$i~7mnZde1}inxZWdk1 zI6NeVY!;ox($BWOi`$Q_1!gb|$IrmfoRc9m8`Y?dPb3B&nh-%>dM0mNg?y$1&WZ36 zx2)&AXxb|FSWz3ota=!MkNHb~1QTfRL_}fKu-u(jh}qOBxf`5_>{AkOZ3-Qem0$=jz)u=$uK~5fIW^bAo$v$BtwYG+(KNKlZFcSa)L32eVN&ABuQ3;q z#rZc{Zh8D<)bfqJELzL9y|J1IWKg`|^yY;<=&;i0A-l8dgqJChS~0ACm^>7(vh|rF zNs}TmsC~-l`$7Qtxpw7ENibB2+PQ5A4WA(P9`;nEta7TQ@AtM=gsMd;r9AOQrA`OemtIHntjI|WE=(HGx0TYFL z`K~8XZRu}OKS^kK%>w5L$hB=$6rA;T-ILLgVKece9QSvcx*A)3Dzkzy74__t-vqIp zFp@aP*W@^)l^Ep+YN0i*vcpKZH6iv1oi!CE zPS`(vT@{Xe7}4sQfEXi{I4~qn=mIw&PYWQtVG_8 zT`>;_oAU+y@4fjI)R$3PMxH=Dyqa++F8Vx^;Y$DqerB?jmqY)S!ZZ1c86wMk^vvo~ zUWugjruxJOBaHnQ#365INW!|OWb}f}j{oO&cbMNtB0m{>`y8zeeQCv!II1lc2y(G2 zsKqQyn!!EDULI;h+2ZIr%CS#omVUangPC%4lEc})ujUzUugD3rnCx5d;{3$=C6UMM z3aZga0V_KsXwW5CO>Z3T!hZBXZ8cB)-<HVEWaQ=9w8wBV&a`HEEZIO41hnbmI4o8(;f8mWo&q4oY0 zO#QCnVbQA=9g#cCR6^|aUA|Es-wDk4ycvtzEpZN&yV$)qoPi<*{GViKpLfTXKr~vY z)S8%r=`kce-hSBbs0&MDKMl|4f*<7~V#mWL_eotlsz!E z_N|RH-oK8$$lmVEW+f=S!nT@5<%Q}8r zV8cv36BiZ49}Ic0sG+J>(^`-P?Bsy!8xkA@;HiIOGT-7+I4;vZq=|JdKA70<;%sa7 z(oEjYM^NkJopr+#3kg07?Cf3IJ**{U^I+zmLw~kS{P9jzR>-X{TuinKF;|5InJG1R zG#2`~=X|V4094e%#^0a$Fait`GlamXF)A#%=F5kJt5Oq{?EDdfky-)@f<=7Hy6ko2 zfT!Au>9tZux_}$6FWpkkc|O$rX`9T{+jKUI9D7aY^tvg?6Ue^cn?Q)#i+433(1z9n z+#iP!xU(eO-$urXY8TY3B;EDHVsh{ME3}Cr#9D!~5qkujx}VnQgM?#NC8s{)DNlX_{3WUFCtUWGes^S zaniXC;kr{dAl!*>qV6C0g9mGQX7gyqxN5r79ecIs+z}aL!H*OXa^>~0w-*~Bv^M*S zJeRtYQ4N#-b+Pl-{6<<^-QLs(V&bDC0Vh8Jb191|fmVSciT97suAtmul@N zVUH%nzYI8gemHaHG#9spji^qc*{M6UZXs=JK`~OujTv|g16xj1sj>_I(UIKHcl%PE zcbWjrm+K{#li+`m#1G}82F05-KqMyg*8T>9TW;e<>wEA5jhB{~%)j-Yk=7T0ki`Yk z_bAa*O6l%nUe!)N!54=VsKJc~m$?+3`?d>?t^wCS2WWUJA472Mz6Smk{U z@S>2lkEEzk$Tce;B~i^bHB{UGR#)P&keS1X)(pQ<48i;8o0QT*8AF#9?yCxjs#idn zX@6Ogcs@|A-fYM9RVnFan~;Jcg^k;rSalLI zbcL4*$mcHGmZ3MVcX~HUg*v;O(o$T%ftUsD-=7Yc82I!P1h{a;zb@RjeD!Ekz-m*j z$nPCK_<`@$^XsC?WEK~VuHC0)^gf|vjjos?A0w?m;SUuf+22kg6AO`#Hq(i@Zl1Ou z)0HN)jtBE~Vu~amRHTr2pdNuxRd$kYu_ccrhe;$bnN*B{g(^vBjM_(%x zp>j6INvcj9y)M9K0%bmClBWJe zTWTF(FF3KGkDYg1AO-p57bC-}kGd#l(AuMemimaQ+A4SUbIZKv<|{1La#yoL80|FngN`jXN9H_30(wpF27d z$HsF9%3<6gJO-9uc|^ZvHABQnRW`Oxq}Hb31T)SuyTGh2vD1lj)kMLfYdJ9EzS$oH zXG&}fj5oo6ASg|{$Jh)?Jw4E$gA27BYCO8LYy875k|#@#BbC*`@%7Hp-a1{N&e3}c zdv2UZYZ$_B)*vdsG}3XSlswl=^r4xf5u;Ad$FaOPe+GpeRnea{s)+5|IJ@Wvlu@}n zLs@+cB`k?l`V}Kz>xH~;*R@_jLxB2)W3&anee@nRc>rkBUX#G-by@B7<}Ihn-Qz0N zcjd$HzC5gd{W9;2*!#dN98_>YfL%@RZzLip8V3A+$<+MDc2?F#-|v+jP0YO#kbhd+ z$T&2fV@ocON}(S5b0Lb6K4Vs>ex;Vhnj3# z0E`|<6pw9DN-B&Fc-o&7mGI|0!0J~2D*KQ1OwGW3UAuBMyv3+G>qiPAe@)uEKoQ5XOG}k5+0STOL``q8inoh=f^<)b{LISEJW>5+7Tz#i z?Rl>3ZT3W~>qj%~x#tUQ5m(0n+9=CrkMEGabor?vw4)Z+H33Qo8J%22kmr)H)xE@1 zNTF(++FPGbD*#*VFJ?C)T=3#jzD?d~!9d+}>MamPW$tGHyB4otr?~>BS(9kVU9-qM9vvXTm3vtBf+_jjs?>K~wH)kcZbQl+Fo2!^ zu_#pAcZrw2}c7`@v+Mbp__-TFoWII!B)t zmebal)*4|w@>8+CAtd%K+YWv9>}Z-nNGB z2l5_aOiXMG1Fsxv=KUM{@m9&?)#rt-iv5^r$@k>dMfnI%;!$**2ACREY`qPT_9o@T_Q|b_D;c{>Fk-lcBtbeS z47I43OLPOt+5}P!M;|%mM18i;Xg<~Y_2X>5wpkV0pgP`=m7zeTS^vAT z%C9BFT;(WI^<3U}wO+0ZY2&spyr_H%a&WpP?)W8q&I$`$>fpyGa=?k!6}NCqDQndQ z2d=3C^=eFg1djI-=~Yj^An$wF`wjI? z9VQ+C>{xH{gE<#arxL)DZ9OjPNQdRbp1%dHo zvyv3L60nw#X)>(R4$><Q*t36s!yw=jRz!Idx!{Lk4pbIt!9&!~!xE1YF1G07xi8uWg z7GsbP^Rtdm4b}@GbwY_NLFnK>VN5zIr5`Zq9`vdwj@zOD-ZrZk6I^$PFbEkqC47DF^Dc26KH1v(@3El&?Rm#jo zqGze7r!_nDN`Bt)|2z1HEm3g&7Ojx z+m7stanBV;7oKk=y0kZiuU<75`N$5TuR8@P(!8iNyK&0myB}x{$SV~dy%5_dTU4EhX>VtY7jXl0C z216gVIbamjWyt}Jwl{*2Mjk-nB+aFEc1ChPd>b$o3Qz+Gr{g2cFSe&jusS%6qi^ZA zxpI@#h=$S7Z}pdB{2r5OX_sG_s50Qei|TCFyehj_?M!P^BK?6Po&`&>r`mcYXh|K$ zGXITy3JPj#G{0+W-^_>{wT&4&9D}GW@G54pfCttvE%H(iWZWw8*}%v-N0=34H89-z zN>1^cM@gN@puq~lT}(+R_Gkcdi1%)t1Vj1Z#0Hb!L-`=y%~#igjFX~Tdd9-b|8Akg z)1}7&j@D&1R2$l2!-QH_egEHlJ6V}W!Pmbuew z3^#D|N%XJZn~9fZk%#h_9wv^V7YZPcdO<#(>*DInH`SN^R3%w&%VR)vUON7j zNm21H$ZescXhPnU^Gpq4Syj`Aw-iUyFcOpF;mtNafle%&cBQQ6wy){Y;In`8lm{GZ zw)Xs27S~Wj{accUyQTkv^u1ahtXUR+ ztQZiI#{ODex*O}?og?%TvKr{Gu#HesgV&Cf5{u=F|0DVIKL@$LCD_RBBg04gTZqbj z9lvB^cku9IX0nEZC<{HIh@&vll!`fD{LzAHM8+s-@%a&bT~WmR1(LoHKXscf4Q_PxTWt;gG>ItNPuioSlOri_!_g* zkaokC4nj^|V?fU1v~VNc_t21{Eamp_xVDGAbiEr5G{&^u*qDb(AkJxR18Qmh$dako zwQ%xB1qo1W1$pcheK!V5p5SZJ=(!IKoJoL@FJA2CY8LbLtW;7soAm4&72g(>3>mQN z0Fw>d8*6%b%Z%OGj!YJnrsh}sJ5S*RZJj0j;#3(p777qt+}MhMA;ym5FYj*fqXf;W z?eudVuMM~O+Oti+BwMX5CKjMzk&ky?es#;~bjH#_jkfQ+3*!xkLU|BfPehkK9eC{B z4>R7}*~)+XP>+Lcj3G(arcS8riQ5hake@&3?lbgdTt|Hkcu9ssSL98u+{ zLkDV}bXW>w#bW?{Zdt!8tGi04sU@zq4jJ@@F$}$CG<^{gDotC$4 zQ?2LbeINgZB|{dYfhVU(BpCJnaQMN$o!-VVQgb^hLnDFGUni3RLn;f2&U8ok7VKeW z*B(bGoN zP3`dos{;4Cq}`m=gMvr*u#Iym#+3=nrV?4@+Ht?iL~{61KDEj?5 z?HglH&)3}gRjiB2MLGVoO`IO1V>RH5ycwY_B3 zPz!y=lzsd!8oXvVD-KV+HLmHhFa96QRSQN09TLtSyj1xZrLpQ&RL>prp0euRRE<)j z*yJhMfiU@`vWB0Uf>Zh&LcD<|3`!x$z;%nrs?%cby2~KE=1nf%E2zoN0$AoT`cq28 z%$O+>m6`I(vXg>sMP&@TfTL}EaFzp&G)oUe3Dn*v{v)9wB{#HiZY%R8H2u&njnG!1 zi8OQBHe}kr1q?N}5L-{|9^g$ZCf6mcf)H;}u$0vEn5x)$_nF z((JDw5HJVE6aguE;OCJu-s)|wlDyA!(cxEgL@~@z(;v1O(sZu`Me#K9rz*YM$Q)0O6R)CPU%O6q-eSt|1`K!d5^LG|20<}a2k zH2a7aXLJ=~h@f!%-Pj(8u4*X67qzBc@aWQV>OZjI@F2upm>=YZqQVFXHf+ffv4pZe5~vFJJ!sOUHSDldalP$vGlNn#F}#+`Uak|K5p4#Jp_Lq z0`{SAO|E~zBX7>I9QS>B6NZ|*=5s|vb+S70Km~3i2Rp=h-TUz~$#S$Y&+;yiLaB!y zc&lvd()O0z)@r`Xem@rsGWt+(1{y;9_XC6hTjoty?cP4;`%#hOEC?(uh(m?Gie4J4 zvY>+fq5ri=Frl^3X|ZEeIr}(St$WX>dirsU^-u4UM}`mGuv|XfIjW)yoREIpUatSU zmDA;i7LB78b<@e3YR6QI8jBiJF?9Ze>rPX7RM}s@1%t&V=)-2j$oxZh5wc?C_j{xY z`U6(d_2r}ftWlp}W!%ypu2shCpdtLedn=E@{TpswN~v3?cCAM4QQw}qa(z>&`CPsR zcOlzn#rlmXY!Vn@g%zU<{5?PtnK-7B>}AKn_o9O7jMU@NG4hhXu-sOxrA}*Rt%qZk z(T!s=XA(v=t2eoIRzC#-EccH&w65%OM)6ZANl+VP9|Nsyd-bCmeT_c^?LVjs2#lG; zBdtwNVwV89UiA?Br_5@1J|9ceG` zADT1{5WB1Vti64c``Pi59J=gpORF3CGVq6}$A+=aH0ZF%U_IfXojN zlfK*?kcB}kwEds%)8ve?ixax8-+XrKjH(WYLbpcU6q%LWic-N|=_m8aWs_SL7UpUP z=R6Wx56ODS>bU7e7h{J5INsxUa|Poov#?n$%{sIvgavsC!3yi5^9Oth6DDDZkqg!j z3u7*R*QlrPl7X0A-|H7F+fupJQXk(F_g(uk)g0%c-@VQAMX=Rn*{0egPx&v;xU%a| zsdu#aX2V7d1mb+4D%g&s9dwl=Xh70dg2^=dxa(zJtG6A;SZ_q#P8-^vC1&d#emRXsd)P#bZ`wTB5aF6udNI{q$3 zFL0f8?bcNU#aAW;g6UIJsT+g{%hjf4^(~M z#0$aIzK{%EFMr?Y`IJ$6KH*%N8L-A~-xWn*hQs z-hWel5fi1}IN4ICvH{L6y+%=v*UZ0O*7($GlyeX_c$1b;j4?pLqJrv?SowW;zoHJ) zT`WhDn;4hKQMOAo<;|7EkA*Ufo@?omK3*67DNA#tFV*EVetAaO$-xAf(Dvk$sxBr( zbTK1xw01f}MH;BQSEV>4?uRS367K>#8hYj7aq8Zh7&0N~Nt1asKjqNDPe;xq_UPRAS8mYS zT0j__*V$XKvG)!J5}J|~h0u=AfM{^{%<0~{G)c*ydHYW66r~s!lxBGP_AnM(JAt zeH>A_6TWo~Vnt%WuazGkRD(&gRpJ=~+`L)U96n%Z#iQu@w6T$U<{L)Axk3YlS%vOL z2lQ1Nfv7Fxr)s{{lZX7*AV-v>M60@WMi$$UwMnny>|$nQ`cl3_`r`G3s_L3QL|I=? zr0OO)QM;BA_4692DQU?9U&fId2+9jfJO&lGP(Q8sb5WJf4B0Z`HikauPn=={e0M8K z>;Q0_IYUUa2X)Q^)DA09T!AqVa%HPT{Yafkf0s-P6)ohpBA46|_@&q-OU1p6K-d@v zeYGa2X)|~!c+8RBk5oN{!--cCYRoek^3}M_2^U-$S#bu#CU+c6$%<|5ZqT@FQ&P{C zD|9YZIj(>G$w=>Q zA7x$msPl;xZA&;yuT|ZlmQ>Z1$@2@1dmA!c zHeL+;!A)~-C?NrCBex8pupEK}&N$tV?pH?m+ANxqKKk1zbkQ^fKeYcmgztIIWt4(tlV^2AtqEEVbD6YYk&}@Y`C%1U7C6>Y57a8*z@6!)iaZd>0e2?$60$wH+UaA_COOJhvtqrf zhgY24wLo476~Gm`5?MhksU-z%pxFaa8~eneE|b=k4qd>n83S7NPg!x1C5Zwdi{1{7sn{nVYH>3nFwZpoC67ep zkUXC*s7kiA9)Gz8S(nWSt6>-_D|P($&l@y30^1C0|WsBE!Us*XK(c?Ca_IzeP;vx2Aj2V zIFUBJCa14`w>qY;PmMMf{Vk9jxUy4Y!dz}hJQO&ZJ;2rwhn^j$G+J(W5Z-0dZ-dCx zxN*pC0vx(; z26$L;*_aey@w);G+lQT+G=GXyEVa;SK@3Y3Sso#~s1H4&_By@L=Qbr`r=8foVKKAC zL2vl%Pe#JjKT*R2XA6*ixF2B6H{url35q*bBSl3s$AaXoKb26U+8|_u_}-5L0v7P^ z{Q=7NAWWJoVUaHLUvb2DDY@hnFHhRo2-?`1Qr0g(*%cHPou&_g_38cD8Mei43kj=Z zE;fkoLeolY*yiNjoY>T#;}q6ZqM{17v^;)A1sdp2;5W=sBLR@ZKd5w!Bz_5~M6E1= zciHb9RLC2sHj7JJ+(LNO;cXg5be_>z(|fIPTcbTz&>nBLQ@1nNEs$Jl=W4*K{YC!d z9frt++$W6)$o*F>V>?}nT^?PJa#p{hWdgEx@nv;JPsSru2rAOU93R*jTHV|PrEb+uKTX5dYr%rPE3i>F2@);aiZI(=2( z1qbpB95zG+KJ8dPg*ryLL6EKdH{4fpskt4VeYvy*p_(Za(uvJx07B%72w* zv|Kjc`bhV#o+q;<-rWqr5tyJ^&CJTvW)T9i#j@eCdVj~0GB3%2%@|6#6vn9vYAb1ug6FtgKFxDAt8S zGu}H|E;h~@L>5;$~Jsijc&Ok0ZFn?lWR1vf z6oruNWEm=iEUD~UmXPfGI+n68A(YB!u_dOmOUAw=jV-b-BfHF42V;6p>U&++?|Gir z{d=DKzW=%ZD6d`>^Ep4~dq3XCF``r(_@GF%?iWptxoev~gWJ6=YC-6fj^E{eZ=g-9 z)4yU@SySm!k|4RkyTO~J%EfqVy0j`o?b>Ae({JA?j`MG^Y~01{sV&rmqCK4Q9Fq%1 zu8tVaty~rtD5QB$$c|P_@?W_1YCqRy_%;*K^y=k+JHc$>ONkL-GS~;3x%= zSaL$yj)fFlJA=Kf-q0GjUO$(ez7m<{v{70$e&q@hLFD$Cr$vJM{zALCBr{6yYSP42=k zQ!NTF?-EuyN&bDcveOEQ=h`$wZZ2c^ON)VhT_=4x*7w}~IPQ6ZM=HfSVh9XKvvmO1 zNY-xK7P3d08#cAuP-Ob~UFqoOMs*x5W4cQFA_FY4%L9q;1KSRT0*A(ESviGeTi^iE z1sfwOaNM3l@|&I9MHhDW3Dqu$x0KO?adNL=%GmXcI?fD7#xHHcmXsHA@LJ1q*gPH4 zb139i?~3KZifixeH5|K3xP>OP zuyu;)HC%UNPjO=Xiyp9-5uWpe?6fHib@GYP&e`E>1>ICw$qaUV%nfXBZ6=w+5;!^l z3mqW>9F#W@PN$Yq(l8{3zY9snv9|aWHpYib(zy|Rpk@XYYWfM%qD7tf?9qb}9O8&*3Sf6fc5PFNxu{98i zmE&Hw&}g2B8TJdi|1#(a$J*8;d3FeX$DSKlCTubwhaovQ7C9GBR{xy2W!|$%F`anD zSf(UHd|0~hX)8s_Q#L`Y!I2qUo_b*14`9L>z=T1%b?B#;s)$wugQm8%F*A+3!y)@s z>qkFb<6q~SbfpXLe$JRAAwL(!aFtsS2)iW{@^$c*RW;W+f)+CcKb6RQJ#!*0qUbqS zs=RU}w^y4eSalL-*r!fKPWm4n%&NDM*dzn{0$HxR??&JH$&yn+zVeP%-PyPj29F;` z02t|kVcRGg^P6fd&3AH_Zt0InN)a<0IwKG5pVEaH_?LW10*rf4&F_Y5-Xs=JC=2XLj;G$1IVP8dK<>abakw>o4S7>|`;FTUCrs~82-1U$pb=D2}TCJo7f?@K^-7ySLU zkf5I|4`2G1UK^;rpm@c_y~rAn6?}RgprCFnTeWJb7qoY6sLe;hln7|pzqco;*HT%i zPQ@+D?b_`-LdU!;ZTHk1w|B8ky;387)y@iUYSK?;Fus-Jrzegg=!1-;^Fw=OPL4Nm zuSTD%sPsLenYw&UKXB!TM@=t{ z+~c&bd`w$Zpk_GN&)B|_vM9WywKfrp;ERHYEBoQ4>d6I8*~hC;`jw8Sc9*bWat)4Kf^^AOl9Z}T{6Lx`w#x!Q z4+oS_oPZ511SbsVoIn27Soo^ykkkcFn>4PHpd^1qJ6~psKCn$e(Mq!Gb@RbQU#dgp z*1NjCx#}GoSkb}Q&XZ5=ekuDqp)Ez0L`Wm=NN>x=Y!-pgFlbtmFOcbc@7d_4z@aoK zLu>7AjFT}1ufOSKrO;7uB_RI!(cVhhrUNeC`!c;IzwWxTTuzxt)l&fzta^0NenFY2 zwn0x>{GPBw_K2QSU3a^L?dVA-zP_LGN(K1U6~??p)0WS9uU~2An|&SO03lK3!Nyr% zdddE|xUKgTtIvVi@m!x+2lm8gnYpLnnDub5IbKwqQ`59h&xB*z;9_BofEv$C#==5c zH^*)bK=H(HWp?|p)!waXDEGSKMSVFAb&%?5t%;qn+5rJ!y8~TZcaCJ* zM>_7s@~HGu`Z@`Fr1NJ?8W+=up1}Jyw4zrNz_e+DAAI>WXixm{B(__|J#NbzD{eQd zDpQ5E2ci@LxNN@)&ARgXLEf`(wl6z(C=1QkM5W+XoWxx0Vn!{reHi&b+_C|2D|tqh zTP;Ku-#JZ}oLpAzT5RpCUE~YMLzUOU(#oRh+j#@&Rk=R~h;jT0Yr=HR&sidv89B8FrBp)OW8N88_Ma5-(~K8kz@ zPvPH!WC3P>Cx68JEhLEnPI3)XY?|VUEW8I4yRF_2Yn~alyy7G2QlBK{{{>i79sPoHgoIrT;NO7w3FL-g0n7?)alzXH zkzpI@wRJa@nrDj3KUVY!v0>%ppnL+b0<`$w823*q07|xbP-D9gGMZXx^HcXC%XDl=j1$Co~Sv9=fKt=)Q%U58{3OGeP-vzlhurva|_$RubM%~PNN1i9GJY8B*} zulUjLi{?;ZNvu};wSVt~DdFhHh35 zkOHaAhZn%supz~gWg7uLiH&z4WaI|DGAYS-3Q^2-KHtTqM)ScYp`__i(7%;(MgK$b z)SF_#3bcm*3wsWhZlLZH9^N#PNHdkb`VhT(BfsC~g?$G9muCCqJ5YhyHp?M2RW0!> zt~03PRkVh70nVmK%J0Vcb|sa&u=OKhinDc-#E=8^2XOh9*`2{78rIpB5&3YP@lU3o zT}v8}wJw&*)K5TQ9Hg!TAWk;G)<9=pD+_EUpFpkXOcJvTI(^0 z4Y5BejDY6QcFP(pW~PY!TU_h}kpbUkfv>CfDkerqeMFuuwk@aqC91sexf=MtW)hlVdN%~* z_awVhx^Bi#qLatIo2jlfSkrJ;QCWFKp9@oBCA&zHK{C7YGnimy$q)^Oqm55Nn}=_u zCmzM;HY~%P?eJ_C$L;%T$7_%aSMJY@I;zZDl+JOCrezzD zr#f-mTI*Lo>gaykb-IvoD@*Q6ri~VhFN`<)HE#k>HAoo1@a+L|&5cJ8bg=xuzOAnC zzceTVF!&1$oE+!q{O29>P}%kUr~O|wIq^q&X7+eh>9H{gP`q(wHNE4)oQ^V zXm|Y?#m()?$o96AyZ^MgLlox|t`HUF{t5(Tu%FV*iBTsS=acV^vbCSYwMuTs81jcN z(?J3mT9G@vJYdpS3Ew{#xHIX4E+(HL9$9Ke&@L;==8v_ymX$wfUc2!9c^mt69^KK% zY;yJ{Fg1KGnHQv_H4{V@7*JqR0p(BA#11?dYy{j=JjV)qq<3K4w?qd?Tm~4^3|tW6sUuxoKtR288vO_%5C`uD0ig$b=o!RGedDhpg!(S*_jPT-{xQljF3e8BDjW z5-?P?Iz|2iI1~JS&Ap3@Y<@TXrOpatr-Yk_xB8=q24K83Z-pST9zHk^YEJ-c<$o#$ zlc`bU?$Mx!0hsy;s1?{y^lB;6Lfx-Rxtq#7qZ*!=TO+>N2E?PCd;eiN19C2SZFoJ>+aFl3$(?4`3f~=6|ihhM_zH>BjOW)8le&MSEw}P z@RG=@(BNsvTEC(Vc+nS;6}bm(R9#!*bTfVH#-OBss-L6zkP! z-~CkJ(N6!>xhXl6Tu1D(CXX)!*8AfdLhG$hrv$$& z)2sfHplVopB#-%j96G*_tWyd)zi8kE_fI*`aqK}GCKzUW>Q0UNYprcwAAGNy+Qk*X zC{+|0ruX$&Na>2@aUZ-!+arrE=1Z5%Uy0N3`{_S)XBemWVOIdqaTu!at!K>&n#hujqsUJl$D0oz*$rp0kdlu0+Qg*x{Y+Ij9X zj_kKC(9c35j;ZqOKmj4#?GVwu^T_sRMaUmlK>eB7K@r8 zxJCMi($hSs%vfQyOFFLqWpR?rE5e9Ydf27bGKm1FGQR?ng#P2Mo;G40H3AsNAqTQ-_X(Rv0j=}q^XqX*O5pA5rh zWUXMpQnbXdCOs>?q0xqX7zn0{imE)7Q#0DL>8IF5D@E=<)1b@XJrT4H3ozi-<9m1pVS5yYLHFV)`$P+X@a4bN@yfPYum63y!2sw+-Z}cLHHEa#5jWwF< zvgf9}rWCw8Q-C!OoD30!qPYc?4)n7aPVa+nF!N#v`9oK1pG7qIbKD*)PA?Lz=rY;b zx6zSuVFj@8rR-|mHolLwxx&Tsv|bLzH+Se+F#q-}Y)Cr9%uAvhxzQK1EA8Baxrg21 zDnswV0Lm|qBtw**{)A*-sEMj}%(BQvDf+36JI+WjTQ@NC@n~|Tg&orqh#V#b5nbN- z*E#DLsSbH~xSR-IIZpNUQ|u_)BSiemHfX{|4#Sgm%TIeE;G$jyGt>9xs9l*umkN{* z$PW)(Y7r8IA?-lXlu4PC;b;@~a68YRj~~IlK-l?b4t=WCp-=sX zIwBVBO>Dy_H}AG3{E~igY+Om5hNMuC&gaN}+Xxl}NDmG#JZNozXmT3aU5mt@pEyt} zWiZ))jG55^5PmRA0>Uz{YPTmdBiw3FPRW#i=T8{7)ZTh(b0y{Y`D0yUYzdwk?2~q$ zA=G~&@_}G#MCiZ-VPy)vU4ndkd;MpK1!1Dz6qQ&OFC^ulQPY>DZsu@07{BKUJ&>1{ zZ1|Vht=KNB-XCS@?jvOE#F~+}*d8c2djp)8c6U}M^Ba?y=0l|-%?Xsp^INP-Y!lj3 z%a-Rm!av05LzzHI1M$4J%k=NW;QtS)?SJ!cm3=_Ftc``4V0NlO9E5{cs4)Tddvuoo7NgwyE?8N|J2bN?UcqnST27jF zQV;QWc`#Y0+FEp#W_w<8;2Mm%2Zz^Mi7xk=a4H1(dKySlc^3URwYpi-HYGrM))xC4 zGKv8Q;$fxG4SQ!y3Lymhxp(+&$xj*`yViVK8|}_nexSt3+cNbV5}iR7C@W9Bg5**k zmaQMQ)A?eR-Bcop<=G^Rg)kEQ$U1W%c5i+IN%vI_2-x8XCF00IKai6k9pYF}<_`84 zM3ck-8CYnO_f(n@R zFHr+YacW`%A4+KA zLFCc`VuKnoQw}!m$b8RkSBFrU4KMusK;q-W*%q{!NXq@o0o zst-H)pBDqjV!PqJa6IT~F_yNM#3Z>{*_{0>W4_}bzH6Uos^aAdI-#TqSB(2bJ_4MA zX*Y8G*&+QB4~Eb-o%eEbUw2Tw`?XrFpg@+>BZ*~s>bFXs#|09seXs}Kn`ZAoejB_} zAioVzJ!Jq*@;K%%%h%>lKtK>(zZ$cheW?%4`~CHe0^|2i{A$#xd?zcRAJ}-4Ut;CB zSep?^ft$&3tM`L1JsL4Jxi-*-jZx4%BThT!&wi+o4#u4y-AE4<)c|D8n(St&gInF< z2?n9^jxcJUqn~03*|<+=`r!LlF3)w|NA!z8R|}c$PP7M~T?#bX5-W<#||f z8}+~sYc)kZ^3UH8a#|b&=cbJCZ`Cfe{Qhn-{K*rXKlCyCq$^I*FSn13Al3>0u&di+ zY57_HC!AXauSCYSR{+zb!TU!Xuwq~A&LWE1Wz1tk-h?Ilz@s9 zVtw#PSkm?f;E->Sr57+q$mbW8B`~!;oAW(r+l0|3DM6Ot59KDtfin>d@*NA|s3&_y zC83>GYd2gm%j{z0gt#!}mn$l{ufjO3#GNPLBu$xOj|*|UAK`8Mi42KWr^?~S7YUA8l-Dw2ilnwr^4gzQZG1aN=x7z6ke-==!~{&TvumEUb>3VuB5y%t9GQ(eFnW^+*$ zekI#oSr6^EpN0TwFnC7TdLU$%i$}=u@08r!t;LNEMd`0)p({u`DA6vu(#C11Zg~3H zkE4j2;j0$u0q_+~z__+JLTJ65uVEmdFjD5SVW-T9j0Q zj{CD}s~GiRJt%L|Q0(^}JW;j~V^aiw}5648KgpG9LSEy1?V8KkUJ zq5l64VS0!6Y+`faApPYo0O*_W2}UsuZC)|4sc6{GacE`MAfL$h8| z;C^zv*t=lG_h!Su^)>6A@J+pH!(M}z1bggCx|G268IgjC2l?=Rk);}L0(d^vgPQvv z&qoGsV(UF{6L-kctD!7}{gO|3GV+wBla#pW6_K>Yi*4d1#zNnya0$2x7D55~btpTs zqtba|bEyD1U#h~Tw%Uee)AW%XekLb6tS+wJ4HGZ%H{Y(u$>lqt;3Siy8xr^6b~P1- zJa*btmzPG5tyt;?0{q?&D@J8rbW|w3VVYCe>0FvVWA0m-W34GPQLcIrX#F=`n67iv zL5%-<+YjEt3i+TwqQO`0dxK2A*Wm#8I!5LSb{i}gV;OSFK9j^5?{fK8#T7TU#Mdc5 z&R1Hh>nV?}TO_xx#%^0u@5wxUqe7qS#fHNE`?lu^&!M6)|3z~VghMw{(xtyQzWW%0 zB)0m;9FD?qmQV2t(-|*u!h2l;77sbr1iQX^HNMnur7kp3Qngl{lUKSZxy}OWmSy03 z|Hv&Of!ZQ8pZJ-OY}~DACKEqJ#62*9q;)9XA+_>e99DoZ;If~4wfu=f|9G|(0+y^* z0g|T!mEImrb5Tf0G(NLU2ZuVoU$;2g*26Z!_Ymf}dxT1E*&i(Z&3*>?N%P0W zK_2TRGB!LO1Z_ST+x&~P*y8>@PYIf@h?J7*M1PVAM18P^I}ls zXtM4mE?u;e6rx5ZlD#Rm+nE_vqz(`SXa5l1{Ig*Czv(NFILY?SLs?p{e3kFo7Pon4(SXzQ&9?TIhDG+e#lE>EE-9m#O~^E!^o zIL-U6j02PHbKWC^&Z+9+DGLd{Xra&hSpy*OW1CwAX-;x^nylJe*1vZ@PC6R2!b~xF z%Bs{Rw_h({`lbkVeXYmP@?FbGnf2rnxdVTN{)g11_^_y^JBYQxc} z&*5!Kp|yjzHC5os@S$wWabZ0Y1VQqwtYyR>!>SOt^NI_XL<>cq9e??d_hPd2HNmfb zOe%`Fgh!ICs#~Ul6iz};AfY`=%)}cEwwe+L?w8B-U2xE^Jm38^!w_=RT9z)~v0>R= z4ko|(RuDReCEkt2%sc=2A)l?LFySJ>$8%mNf77VSy363DU>AVyJMDQnOb!ytzghAz z4ub#eyya0=^2x&_AXZC8dp^b2Q9Sf088Xa?V^0=@+%7n#GS_DmLo=SbYtaHD}^ri^y_Yb5@mzr$-P7fJcD)da5Y&eIVsR-oAA>U3V@%3-Elj9s>Jx|E`i&XpW`j+s>J2a2^U3Y$Z~&w z{QLw`y#GW1=vTcCHhV{OR_!bHc1C@Cq4`rY?|LlDEwKXy(yE!wjWc2L$%kik@6TD) zpreqKD~g=x-;n#3i!Ozujkzun&3BArDJ2=Je#yykP1@mgsdgsFC-ndrxV!Tm2qwW8 z((OQay&D%X&I&&4WK>h!wV(Hj{jBzZM5*oF1BucZYd{bn839qcIgM^KL-1IdO|%3L zhiqt_aSFyrTO7M~)Q|Q+tZ$B&ki<3Q7|pR9SRF`3j!&_)~C6Xexw@A}L=U#{+VTKw`PzaKE zNYcFTU}Tmo{%xVMWRO4UDbJI(dpn03?#O>1aR@ga*SgzUh>yQXI7)gnYPImee8=_X zkthrpmoLV+YsVe-nJCcs?=TiR1QihWl8nLxwTin|x<$L2FM~sJwF3beJS^ zD(f8R%NHW>kOS|ys?1C}TZ(7--bJchEm%B>e*7`!t2UQ8(@Q_DRFw{a_pS2SuPFRe zVm>)a2L!M`V#R})yBPMR8dS_`nZ-^0?A@W_tOe6q_0+3Ac9v|e!X^$n|1vw)n= z>4$dhqeb=+8o}Ii@bn;{;i!$EpRcIV%}U6qDEt5w2RM68<(YvO%#hay+67#0%@dFx z#BI%_fknUanhKGDL7I%Uj#kg76yf#T8)s@Ofo-|{+qQ(~CFHwAApx>Yh=_`88qlS! za4VPmWAu$S#_>nkV@CbB^P%rMCZ992eDI!A|o@4cYZcV8~I+g!d?MQN8R->US% zN`B0Lsm0GvVpMZl$5=7J_`xS{^R#T1YY-sKn8Q#hoJ90@`3Vd3A9A@5r0D->*d|D0qVYV{iD$k`}>r9i{ z&k%F}px@*k=0 zZlFPF8|Cl4Z^3$E80biHp*X=ymmD!~6pLUUO> z$!mexs1w7b=rgTG*#DL8RAl~R*r{2A$>d9q>*(8-Cy^P7xPv9o8R#O%bqBtIIvy~4 z#2#vz|21!`Yh-Ml!1rLy$&L||Dp`Yt^=JM+LoK?`ydOu#tS4+Ltp()JyP57k7ZvEw zf2KV+{H5zhnc><>1y1Mc@!`e(QZrr5#mhfN*$*Q1y#Hj9*HEuM42!~d zokVsI!Vj;2g8BdCg2Y2Ac6SfZvK=}(=)N?0anHq z@4?E-1iF?6PL_x{>*Hx*vOF^fhRPM47)oKfG|u~oGQ z+b20X9IDg3>P5JYs4TiGr=NSVC)1kwL1{adc58mff4c0)S0yN`94Rpf^xZm$o%Jx>GLp@nRO00hEdfntoRymY2t)RnI&jvM=}+@y;FGW`vj5*`(L5 zFPVG{7H{&W+qJ_A8z|vutqp2CYFN9~bT$!r=arKz)|Pe)z?M_5vAxNSd~^)&$(r9PI)G)>067WTe7N*+k`PhP0Z*P?+*y6o?{mDbgJ~z!fZv3&;QI5_dZt6$ zTo)jn3qAjzZ*&u6k?7XIt)gj#eb(Fk=P>0m#ooo9D_}AE;{}%3Z@rw4yxQ5? zQ<=yAOj8374~Ne4RCDdq>lYlnl4UMmykVsK$L@o37K0Z&MbIx|NDxx;K&%Nw=MPv3^P+lEf5cZEvHOH|3dNK5X*yjW*8N|CqFc__-NFb19$J#hZkLL zEY%OHxhXc72mc6!XADNs4n>D3q~V&z;WHcK%UqVj|qKmkTmuuw`b6bdsX zO)%Q09mbLm1eIF!VWYQ$tUeoe*yu(hc!yxO<&iN=(=qIdo>y7KwqYq!6moV5c z@X7Hl)ci{m?_ksNf9Y3AP@A$?Z!P<4nb$>~Wn6D=h>1z!6mB6cBR~7g#vmLKC&nz@ zx`;CGto1){$=X;04G&y)Oa!u1`V*3v0ob(K~&DL#ys)*R?bvCQYa6ET~ zE2XO4o{y4o!BSMV2_!=2)W@c2r0+jZUh!{b$dd$^kp7BIgy>b5FJ{hfEK`smUfDW-~T`dv$n&@>Ezot6H8s&VP-${{}%d}JcoXOmW4=CLWXA&Py zu10fYV79O<6T?o!4|;@7ucnT@(Z5E23#TNG+VU6<1HJE8%8ba4TH@<{V21kwECM9q z-;juj4RI6X6D=nk8?l;&LBzrq(k};DwA7$81;%sGsUzxkeI)?C7Wq;d(Guw_-iFy+PIAEN*fN7 zRF(kULJB;3zv?$+L2?kwR~#}_40d_)-;A0p_NbjrNJ$Y)ou@=xU>*=(7`Ok>uHE3u1%%d0-;rO}hgF15$#a5(gh_CpM z)@et@YC_&a`F&bL2;()YZ2o$hgs2Rueb>=Wrm7SZGw0WVMZSPn+;9Zwl zGlz0iPond!kA1bvJIH%4&P*gYa3;&OlNO^%0^{!|$^ zI{;J}?HRaI-rM^qQSlWN7En08dG&Yix;V(sIe$dcC&+`Pyw=vMQ~1Gco;TS?ZNPg7 znOxoFD6=9l#2-`=^ht4Fd!_gJ%-4}0e6&Tcbd1b~WK7u)J;(X>dOYW;iz<6~*vW&_ z6Zgm(=o7yoe}~F<27lCDAl{g=Vp$S>yO{B@cr>nHg)-YX*jog4Gf8qDmTQM>jKCvE zdlSgX8^Amub9EfxpemLoHdm0KXW13}(<;zu>(sDIf{fus{!`rmZ}jNSZ4I`TfX&tV z2}<*^F3$l!@u%k1*YXA9(|T06C;$uMxxsD}J7y-#-)I3;qi?z_lAhu<8cai$Wj%&Q zwR$evYvx^ekbGOoxs1RP^{mj*FC(x~-Fy4tgYmmZUQwhDDbm`?uf#!!2LG`FuI;wZ zQND6$SsXKs*PAREA#R=@|dadCwrzmMa)vt_)~y}jz#Ap?2$W6 z<;Y#2od7UG&SYj*AUr?32N12vM5CaoF6ySHETy`1u*7_-VyXO=kL}o#ZCh|OK#K(h?QSzyIw8s4MSJC{NBG?}Wgk>uA6kw%LO-EI4pa zl1Mugkw_5RtHA6T)Lu1pC;StuJhQ4+MyuAA}ybw>(DnkqDhY`9+A zmeTl^+^-ee%6z@2v(5?eds?=>d%$%p2DJxFaYX~2yOc?c%^^CNb#M8XjWjsWM43IcH?Ga&r-)XP ztltJVi`l4z+=*DbA%<9H6(c9cePST0C?7t|KNsRNbP5$+%lFc0P;|1Rc=dFaxsDg} zw5Kt}g9}O&-xOT2fIa4b*db~L`MQI=K)#LF$PL-E)SO<01`DH_D@&J#Uu2^Sv>G)h zIpS2Kc)1?)TR;D`{Vk{)*7>R~X%I8h4;&KO%xzytE29bc%!Yx_m8W*Ur+*!{dGKKI zLsd#?BMu>>$F^$uSv2v@uGhlaNb^=A`@@&QYi#YUqk*>IV#`X4AeKOA0idnj(A4WJLgQ;lM~T+s?8empAx4Mx zfT7fA5OsufLm;-6&UMg;PH8`7?E-s;$d7N0UjtLC~q-OoyM|Hc_^9M3>{1A^QgJqmaa4&k)R|Jvh$iMay>Mis$$@)~uUM zGX3ldhPUn_A_}DmEfC}P)!EK-PMi!WFX6f)H&R2t_=v64T z!jBcfTvlxzJ1%@fr1T=!nI}W42>dxHA%_k%95wAg*ew2}wL)nq*uiddTU*kV*H^$1 z+ijFGh$i>T$chuEhc{o|aTd+Gnu_~**U&~|s>{EV&lKi83`i@buS!&n%KUGub(7)MW zrr5A*HQQ??o}AjrTJ$m_T-oR&d2(b*`&d5(i@qrIs*D|sJF1&$7^@r zpK|dGvui$e$`IT0gnFkU?2=WKk)$a}1;01a#eaP`vDWVO39Yhl6aPi(RH|R}!Yg6I z)2N4AezFHU_z3RjV!~}m%CF20R7vc~lQ~&?mWNkd89^i{Zo`><9%MW5U*nLQrq!gX zGp*>!jug>_6}wB%BY57GxP%HU_L*NMKM|vdpy=Lqdf1L@e}VqkN3* zvo+#q(v4r=J!^Z^)BO@{wyyB7b1TyR8Rs)M>P5ekjSF;LU&<~t9Z_MWOm6+0^)K2N z2D<&ClKgm$?v>ZsS0e)QBw9-@x8FJ^af!d?8TXc z2csuw_uZZ6&OB8yweeD1jp|5;UUoevzC(n6J9pf)eNV9X%_G%)&Y|wbeT!9 zW$5&9h`Ng0=8?}fwMAP`KUUWgJrkX2zKEh_vq&x3q<2y>`L7k zqR&d6|BG@ezO(ca@1{)jK2Sy|M4GqwSWz$k5(_;;3=usYkgY(xYyDd8s{6NzhzLjM zifZpaOgR59Um4N?&Nt{&G%>~{E5MYk*jBvRJYA>hIsH(Z93Xu+-p_k?;T17*YlzpU zB70mq&Oq7lkCywZFs1D+-Rh^&;6G|yRCk%Vw$1&v?JKRyLMpS@vhc)G1HxWdA!dqj z6nC}LO*}$yhA;Xb-S*FM5Yssl(!;}fG9+|*z$nVmdnuMLXZ)I1piiKiFu_WO;i^xw0QO01IXkT~eykO86GcDVc;@_A zNJ>UueLk>=QeTziWm*O)U-w(N?S~-oXWf38rB}M2+_hHu;#oq1tZkmVc-F3Zo!kD~ z`xla9q>5Ekenaf`1XAtWIGaAS4N~gEVvRn8cCs>|lcYKC2*=m~$Y-+0@Yj#6%qgQ` zU6CUK1GT=NledgMRwFs?$#&B0XNW4ja`IS+oRbWCeqPm(oNlhaG#WU8iOs-E=rbDVh9rU62WC`0~+)2qw+wstOR8YbSsnZRQ0 zt_D$!+cmPl)WS3k`_8-<9XzUOpd`0oB?C$|w93&C>Eij_$k4By z`TI#$<++b9bmL|z?iva3d;y%@Pxj&}^UOh+b87pPmnJ2ly_hTIgW=6=5F6gUXtC^h z=UOgmF#N3%mOX32Vj#sX;-$hv*fTaC)yOA?j?3p7^Jn<_e~&Rd&o_MSm%6TUUY$Ss z)o;kXJ(|L@;$v%U+$$?HFYcxUp1#8~y6=nnQss;h?T^iTTOB(KJZD&sN zUW%~@mC&5yyRiUPyROG#|meW>WsM)aHd@I&C~j6Y4J-D$jR$4oRwMke@`Lu zKcW};_us+lr9dX?O5|)>^DksFjTLf`|QIUwTG1{F=z_=uuMr*_UQxPQ4r`nv#iENxDKX#F#p}ifNz@T^XOkM7N0gPnkk69v%?H^tGnFXQyzhyYV)6PIy_zGEV z1?Y@}hwK)FjvjQ=a^K!NRbz-!+IlHrHasE}VDHr!o{XKB7YMf$7id|1*s4A@r3pu2 z_Mf_530nI;8Vs;BsX8UUA=Rwp z95!3Ya&of#h5m=`7oZZRf zz*_*q#YS~KQZKY_XWr<$zsi4m z80~T7T795>crmrO3LAj3#{&6{hx?!}a&Qx-oh?-KA#Bj_r2aDfV`|u_GWOZjfDIi9 z6MGvhe@05Fqz`|pgp*6Hbj}EtYisx13)-IdcBu-+-yiQr98+@!z2**rZe=bpNFy7d zU0<9oRXbgouqr^jye-gM8%<_~HBRg@=Ykr+q7tlO0WLfNz=!7chY#&1Iuo#(q9}rO z@W(z(Gxu%ii0$!gO;KxzR6J@tD-Y|WO$bm5UsS&|3BQewCd0uR)7}(z@Axnd~6ECC0rP^Sxg5KZ-C zLmDq6uecThJ}?!=5ech(TkP~s{W#>4Di7=E^mTIjg2d9UdEoIu9x`K|NT^5kSo#Rj z56iZceQB_|G_QvY~(YWVCtE8Uw3O54ucT0TTANE8hzv zZCp8Rx_mf9R6ia0GRLmyz_xj&PA~sp`7ei|A~(AiQ_#KgT%y2zTh z{%*l|g~3c@;{?$NIDp3(q6lDqnO;C0b`$}y*CL;+X{89-f}3qkh!`Gf0syAnnfk=A zTvVYdkbbtzoij|b@6PGCxtDV=K1G~S8;Z%B0a_-$*sCMm+!i0CU$Tf9x4s4ml~;;3 z560nPWUblg-w>DIkd17up8Z_ROo0yQ!#@ITJry2MAJVZ;{)X(}2QaJ6RwU0)Xee<) zozVJZu!jE|hm4SD?WD*Do{`5)7d3bzG^VNGOg!bQ2!NVKf)-&_18F==;Wy+I$q|o^ zlJ4*>N(l+_fbck*v?irgdh@^Ei4L}-#mTp`Bk(<;px1+|mjYNx`u1w|D{cdkv=iQyx zyzNqQu+f@Np57Yd>#VALq4bphGU5!CG+rsB|GpL~8wU+r+Hw z&a1uN9UK<2*Go%NS_gvsnItsKnP9rYK*1HJb+CIeN%A1rHigpm_v!X?EG{l&2DLfH z&7K|!on;ev&#qk6jt#Kl3I^9J6^#ZZAGlK+0Q6V}##L(m2y(CwW{<#nFfzM>&ySPB zoO0}0%#tGC7i$uBC&>DE{77yNWRgcmEde$oJ@VBRBXT2rPg4*&G=DgfNDX5iRKg)x z7kFrUzr8H})}G+o#%wK>TKOs=`RgN?m&Ne~%j0`y$hOKT%tST$hW@ctl9W;8Z^&Gy zDImdQL=wKt*TM*9pwZNSu5Y8y%f&Z#mfz2@;4b&p3O|!V61_YKSA?H_z$qOdp{>B= zbK~k`iVb)(mdjtGN$4m#PPES;)o{EbmRh$Rf>40*3eItRdeyf&gDCkB1G>!*g%Efn za2s0yU!FE-;OT*m&>hebstHr>Bl=9E82n#fURM<37;bR=X^H(}oW-Q6_zF+4NW~8? zQCVyW-rM6xwzb6~M-P?SE#@g4!K1g=;M=0-(q~au`s_2*FRUrOx&KvKiC8>j6fIKAF?F6SWVr*t)Z)|>1{$_k_?z7n^ahglz%$zQ0| zTHNZpD_ybHoG4G8zuB>?hq!WrXkzzrlE1)z10f9&hMG%o9AfSL0ktQV|RZ0M90xF## zy-6<#B?JiB-$M7^zu!Lh+P}z^gUrG5q&FxmkfU;kkJ%!;5AXaCcTF4|xh~&cHg+BN?=Y z(6(fTW-->~n)-@#JXPj{bB}0uy}Fs*f;t{NfIL$0RHD-JPtp|pHPi{olKzQg3lOOa zLWw?JyVxgOuV%!gvk-QiON;k0AAjsho7}Q+H}WsxYLE_}MdEIeZC9`?G*Bz^F~F9R z;0{5e6~dQ!9=h4t@G(`qO3#F*WsMoLfMh8~##HS2`t~Hlu<2OSgEsbapXiUr>M)d( z%R*z^k1s_)cKv#-q(I2kBE*maM}7;ruZ-1E%#6a>2|&obl}B2{Ch;=Y`#^F{-yJ{| zrK3R}0oj_2o;W?=XFTQMkoBR%!`VH_(nMAQQkbpP@Gbo_`hhY87rrv>Y>OO?Yyf(( z4mi$MJenFix{R^EZ#~0i4cqKfI}S-}h5*HH^}p26k(=P=SX9{Hv;ka%4@j;h$+1OD zjc0w-S8w-jZ;F;$n@7XP0=u~GlI<)WM->TwM#KCN)fmN_4mnOE{zZi{k-QPQcNb2T zlF5)8P~z57x4(J8SkdRj0t?qN%fl4j>aQ$B%c(!4rl&bYO)OL4d66tUpZ-e+9;Du! zU}rlLg7<+nZA5I$78KMg+$xX2Tn|+ww2)OQno|sa&Hj~@Z>u{ubM)fXoHnB zg=dwVrqf?+zcpQMAt0~UZE}+P>KZ3r&@$}oq;BG_iGE5EYxN+lVyhc8vugSHKYW^jnwy z79W-zRBoK}dc6BO8y{sb^?tA*#cUWB`rBr0O!$57bZRYBmP;{&DPJ6v2C~DMRzIHK zaYzJsyp{lwHb$&_kSzB7m) z_cA*}B?8lfHL&=bpeoP+fop7hJ z7|}xc_`un|?G)|eiqqsebvw@CPu(IJ;{xie1C^fuSEOl!Z3_d{C^E4l)vUDx8HA(> zcBeQu-^nY^gy!%$Gt&hV6}2A^ZhoOmlk8?+R8>3@YX__}bl{{qXFw9FHwH1O#J;RVoxyN|kb<3Uy*(j2Xj|Ef@}w5K$X^@bB+Fwf z%$EpxNze9}cWW|}atn?PB(#%4ot;3I0Jb?G67J`Zo7vF7X-uSK+%YvD{qZSS=VC%L z`>mG*NHp0jRv<^1g2bWFg4~Ao{pk-p2~$=S%RY8M(v@~n>bjNH=$HV1LU`wEvheM0 z*>8R;Pp}(pmy_H_8^S6~IL-^23TwsEGZ;3W)T%}m`7zAAfU^<2ChwHezYy!?v%ab&7A~O5f|w5BbI?j^Aw4M99r%wrBfs z5DF*cYHIsEb!CgB1&zBS&B8Y4UIhBY2OsC&qgA5wNr3oifx{&%6252z0NPYmr1R*Q z!}L1Q4+OKOzTyg3{1a5Ma$iYFb6d8N%DrJ+swejzO7DFJc_JEt0Vt{3?V*b|)%#Yn z`(tqwW4|5oUSyg|o65?!n^lKdJCo(D_$Zj(rFBkX!==j;gf1JnDPR+c=)$&DIm{@X z=)c|nBY9#;rg%D7jyKZxq=us*nWg%H7DC|;PWeTy#bV<8S~=!bhbzyu$YQ1hZd_Hy zLK@U`XIYS922M~R+JF;JG3c2Go&s25>BV$1BH4_tE6j97J~7E7=f_eP->2HpcM_~o zFNLTm1XAOB$T~XM_pL$Ud=eju6$qCWW6r61zqI9ed8_TEglS8k4yPdL9EGfMy^>GF zW;a1a@m~5Kizo2Jc%ty4!3%JbWe9ei>f3&yz5b`)#I|X%HL(k*i3fr-wJU zk7zVosB)vTY^Uv_FL|*%T)i)#6a>J8YQo{Th4zAPoUN?MmO=L%+?*e@u_pdB7f;Y( z2xRliz!8}GnaS4o-l6Tm6}&vUg7WmvBNwb~PLu1~>86s2;FD0lPir9h`QxeHhvwB& ze#7_`6>dG2E&q)*?qZrZ==LlJz-+q3*}3KS`9QL5^|EBlRSQXc%n%)s?`H4nv7G7o z*D}vW*An?aRm7jA9kJ3rq|AynMFgq7bqN-IHx(m3u6xm+s>eY2^@^o5I0eE09S4O2 zat~xL01up9@EO?42BlN-&d=zwEE6T77uG*b`|hzhc6aq-T(R8`XSr~?*sa<6@3k{8 z^n@)-DyyP*hS+Q?>9u2{nNR9Wr7}ER$o8~iQx*X3QndE4XlUWar|V}$GWMK!eB#7Y zry?m-V3(1fj~&HMaP}qp6+0~tQ|cH|_9LXsA}0>N5kJ@irz_Djl7I=))?F>lE3n?@ z6yiHQz>$H>Km%yYI{(fLj26?O%ECIc1uwoS+$;n8SCHcNfvl_OOlSVGKDD*NCYw!W z2E&ad4Man$LDWrzaZ6OzV&dgp4>L0Sn~M~8#UY{Qj6FO74W@JO>Gz7DO83_*gPfzD z<%^#P z4s6zW5+={VP_u+Kkl^g08f4kJewam6C(2(2w;Bs4eCWfruQB?%6{)j^myc?T@GDrF z-Tm3GM1Ne4XE=jC2_KCOD4p?V)vN6l9l`x4ZiR&rROc{)jOW-iD2Tpqp8`}+l zJ@0aZZ36*>Kh{I{ki8B|9gwg7IeK|CbUwI^X>tuKLr{I+w79yg@b)SpNGwk0Iit=9 z8Sg259RZ5|N`vFGsKDg5f|-^zkE2v23RCANi+!vN#kTldkuiAOrY`kSB9Nwp zeFUo7*yirPNRj_82+%kG*3=zG{jI6{c4dv>If>4I8*zU-sm96VhEa8xiUis^4037t zR+y49y1f~^m!j1X5=HFK_v(R?{qwo`{Ce(u^dR(F#(*^)dKMT!zPxV`QRu0Jyk2&C zNTR6oTuPZ@93Pd6w;-9jgn&;B+!km7b zy^Pav)QQy8w*G3ODsnXt0)Z$1g4#RTKr5XJKIWQ<3i3%!K3EHL7oHz?!av3s->T-g zn47BAhk#Nm$gg@)mNE>hD>G$h_eP>4 zpT|}|&*o>i_cM}YjSj_zg+N1z(l+l#y7+s4WUBJN?mDFd&zB-A`vqG86$&dsQs3m} z&3SN4(?-Uk3JTiM5=7owTw3Yr(sx$sEGvQ$_QkKmJA;~Te9%_T2uWSwW8;NgN~>50 zGro4e3U!~%j<9^JB=-I`*w(>2Aao4i>f=7$O~|k~0(s}Ah32}#Tg~kt-jMTxy?>ip z@sSmMolpk;x6lH|GXekTExKd0fmR|w3mwq{^#hOTG`m)-K1w&Ao2{vHmps*ZIscf) z7eyYoXo*K~U;)3Z?Ozs<7w`?adF>3ikxvb;QHB7#Gx;k@rI^)iq8-6RCVB!;3hCBIPhPKx z`EuMXmZ`lu?Za_OAYZ@TxMB(Hk9KL6;mG&k8^j~uWBB1jx+;qKv-)B@pjnqR)$rN3 zn_`*Ow6fHf64Q6Zc{XAoo}=w*qR3Y=5p6aIE!n_AjA;htdt~)MPAqx_u*N@#!28?b zJuyi)t4S>b1CB<9ABKfbVD_2R@S>ZXPKS8Hp~wlb_~- zen!SF#ij$@vv#Z`Nk=p2FYB=QW=()MV-aS}UZ$djeR?d(EDYym#KHLEjNZc@@;`$S z*=EcdD9@ZzbKT#5w#Tt`V&7vH)^yOozKmPp;NE7mOkmVw=@n*IQNEFV>!m*kKyqwN zTY!_-&VcO?NH-p2LqSJ*R}Ez1Fb;_!Bpr7Brb2@MF2nw!Lh45z3*hLUqPx6PK~udj z;QGU_m;Rde^IH2GD*VKjIF;>oc12i>GP#p{dIYM1o90I8f%Iz8qYlJ+3}-EP{#oQ8 z{zO-|v-9#mtu*}r&@$c`)35?M0YF?TdG07I92v#mVF6P`uxdP=%-X!?Cva|{tjUu$ z*8FSr?eiKVt(w$8anv7BH*hc|U&SlL{OE4I;2~H!Q2zW3?G`_A*ct|@f+{Sf){9kL)2%G zWUA9$zExFDjGU%j=cz?G01c)bB~z*mG;LTJ9HjoT9^`N9k%fbK@ZTygzaV~#{~$mZ zYH!{M9I1?&Dd0A?8hD8AwhyTnlWE)cgBV>IDFY z5HMRu8K2^^CzP0vEzVgH1BsEES4$s=kclRM=mqfxG^Tw2m=)JGmqlfvg^8yI{0wID zJ?o*@%VXAF%P?9bT0}EL3_qj>L)9aB78Knkf$I7tP^~{?CH(>E`X#zhbNC7@;Rrm3lA3Lyk~<~EVg@FTTr zQ+Qc!B*jUw?VB45M;Ef9+dNrTTrQ2^v>GV9X>hZ~s6IB~2R%UXP04oo+svE#-? zMptan5Up`5a`$nxUGY8Yr=cVTD|MnjNTak+$XbS@R;yA5gLY4V6k@3BcGai)LCzah zQGY<5I18OEd@*+4n0KFN;#sz@OgoBuIe?~TwO@-m$@GQxi$`O7XH+^)Xdd6I?lw@j zx{!NBl9`Egg6}~Q-lSRsVjN%=293y=hDDon5)tHCEtn@*gBjFwPghQ}#gVn)0p14M z51T?+YMqfk*eG}dRiVQRkuW-zapEy>d1aZ%1{uW-T*`-YCA2uFABbz!Dk+zpGgB#QGgAF7Y+hMTz&gbv^m6S+Qv6;+bSUcimT7wAP7&`08z0 zL!{6aer+3wHx&i&BS^by1i)AI!k{AR;*Lfj?}IU3i7Qr0$`5V0>-`Iw;L~mhppSC_ z(9M6fBI^0L{^H(b{>{DV2)H*h7%T~{xRBO30!FuNNG#=6d409+?dQEGi}5G8wXc8S z2hzoSk#itBEoW$9Jaa-PJsRp z=;az77Q*N5^}<(%3=Zi7WswAP9*`GjMk5clSbxd2AHe-|Fht$riNz7(%)u_zvO*Y{ zrBe2-90(+&nsLknqv*Gg45FLs?|qmbM7LN&DpXrc#KgINMJJD}MPsdNRsGnfZ6Cf# zN`#4c(~GAd`5@YfNf9TYJVzy|F$5-*OXa?7?Td=vu`|x)lb8> zIqEZsj=1Rb52_yi`IXr&k&2@Z*O|5J+l*A9J@{?u`GI^=X#ik>T4TP!1fW&oe{0(V z?8*Z~$}M?;XP% z2vCeA@B7Xe5aYYS=0AK9Sha?nImE0&B_ZWs=Kr}DvkHrizX+?Ezg=s9LiDC(DK5P&X{4xiFfDJy@6_dA>wRujLY03%ZJ}hz=pLbYwmL`4IY0++ zEk;epIN6kb4vCnGIuG=vYEj|d*NN=P(z>#ty`kDqKM8Vmp8XEgKNLy{cJ+RwfJ+F% z0?*mkc2)7e?Cz~6X#b|dS0a#0cTs7A6n%@+X^ z8El3zLls!*2BMz^2}|YM8R-7~ z2tA**A-PS*Tcv9H$Io6m&u*<(tVGWY8>;3Iq*y;ZMQ}`(j?S>zBw<{+4kX{!zT&dO z6%0mArSCBT>OtrzaNdfdSe0Xl_V$hN8cgWL!t%Fti9=?u1A2Dv&^Eg8IR;a6%O8-W zSLe|VhXu$vw?5=544kJq0%bu4X*Qt*ygUMVumgU^WLa>a?5B;X(<_>^pWuCM`KrpP zy7)w>XY<)tjgXVxVqXIXzBg6vSQJ7R-u0Fx|`K|(jqtYzOW%B4K&)S^Qr zo-OkA=h|yuKa8e4nq_aKn2aBN`YZYZ*|tBUuT(l0a<0G$kZcXXfulF~KrwI%Kf63a z|7Sh7g0q|j7XrR+iSc-Mf+`OekK4{{w$G&{OR)ps!C&5d00ysqM11P20b%L#yNY9; zV8UAF7LBUKe{+oB(|UMu!BfQ`?^szhp`F_)9|b?mK+eTFrQ-@v|9jpu0C?+ z-CuTQs?0?goE4@sJ-$PyvV8kPqYl{go|$h2v+^EnZKE!8JDMR0gcpa?#6^mY$R@XzubNMm{{pyeCrO?e85KMT$5NuNnaB=Sn zkb*2HoLb;c4128Q@cn~$P}<61Nd3!Uk3I|g9&sO{L6ZEN`>?5K<5|Nd9KfJHCj!1u zdqAQBu2eDoKbBIgZUzj%HuXUL0!3)pW<@31L;eq?6yxtyphO+tFlUK@udE$$8~*As z3NG&(MVIE7FO9tYXkj{*)4}J~#Di|(QRNExGG7;M@MH4x`%;df_Zewaglhaxrz%2o zzU3mQQtlM2NldS;$4OmDP~HrSz4g+UOT1}WKH6#4&U)42`2zv|_&7a?zv?uoF_W13 zL35|W^`a+BP}O1b&zJ3IxMd(&V9>Z{TaIOnF!`B7E3(|HWGIu8w%j8U8ro1-G`iHY z%YrKoDOlz>U*w#(2UWbJXye-C!bDmVO)vOm@4 ze4Q;gLj@F2u~RaxSz67C1y8W!cs^asKiAs!M!Nl*lvgVYzh5evjf-iLi>dpD&OH3S z#ch(Wzm;zC&$*A~j5@S>!F+b2wcL#W1ZgLbFFlve;;Fl4)xJNFoY4Qh$D+%be*HOz zf>0hs;3Wl5IRWqld<}?_RiO=wa{0(fH;_Xu?Zd(z`!lFub$$9=ImF?y2UlGD3#H82IT#)6Klgo7gG7cbih=MU1J^lS9Mxbtwz<(h-$aM?}S#_H4 zDtLpsc)z+PVLsXT;-+@%o7X%1X*n+x@>h%B)s(-PN;}QK75=ERIg=_-c?8E0D&>!C z{+2oUL-V0p=yMjYJq1nX9OWedzZ-ju0-fV9Hvr3|lC~m(s$~l%523l~uk|@uX=ipX zN#^z?J>k1_-+RLZD1thqOI3L8O-%+{a$?&)o&Ip{)4+p}*S8n5&dS(91UtCPdN$=C z7024=&`=C zh7c9i6WNtEA6mah`c5P8`)@7x;VhF&!jAy5CGK0XDMjaA#FxxxnawBNFp&!s5S}a? z#WXC8XafxQiO7iO=mitoN+EQBcyLot2e;%>)1vq{VC#ez4Pyw2pDZfHME` zW$z?0(fXh}Meh_vu3o=0#8$C?9>rS2F@k{(HtYq@+pH_hjBg%H)qOjh zSLz@>Q;nFe=nhn78`yDrG3?^{n6LoQLJum3`672msAT~uS4c)<<4Mxjn(vwUgxmO` z{htdV25+n@>D_JAK8>bTZ$mSt+$@__cqCz~OVi~E>=jD-8qjk(k>pCU=9paF>3SY3 z`nP1|1o+3u0OKFQ(`pNfz6+^4-E!U1r69t#!SHU_UTfpqgSUKKVq%D1D0>*BPAuE8 z$Ff^#{@u&UX;+VuwwA|ZsZXGt1fJrRxa?BDnY1F(}Rqlc>Vo1Bm z<6Yh&v5wyOywy8uAJ0%r_MCb@Mk%Le-8xYCfEOve>Rt_3F zX58ZK&17lb;pQp%O{&mhF7}0F>9URzh@uyANZk|6RSjVd6~YHCnzVOgUy8h5^~#go z=hzv&bGD)?Vq!!pjP2D~Esjtl!%GnEP(QwV-X6K*8s0XFiMuCD5I>_LKBgOL_}5Nz zcI+^mSlq?9&YN4@X_7(($|z{QPo;;R54?Qr-^=~rYaZPqA_QBR{b6i;bj;FWNx}6% z3Eh2Ba?)k$(eVXtsA^E_tq}ZU?0^SfNB;6np%RfuLn@x9q20=Zs{k8&1SiS8>pde`=}tE$Clj_7Hr! zJbZ7t;dL&5I8m5tpM${QT|zAA16{hP+xYewy{M?7Q7xxDk>?%^MlVJ5S6=#t0Y-v> z#{#@ClcNHkaU!B?3KdG!Kpv6IJPD8&7}MC2Cjbsf}Fray`c%8Qz94G|_2X>X2HK#3QlU_1hBN z`Bs)7Lp{z--f?6rM5pbI{p~?nfcC8;apeUKL)58A&l2xM|#9jwqgs z(f7Un2rEXzpE687JU46Ks7PN<2FlyCE7Pv~^vhPeW8j48)j&t$TvUc0Tp6)Es7;AU zD4rKG7l7VvgwSsO3Xd>OOhrJKd<8K;sG;vzSITjbk-5ev*U#2DviWd{LqYZO(Iv8F z9wow2I>O;ZakJ*hs!POL!kSl%0yhmg1Fu^eNK=YkN$d>(zYk1u^H+A=s{y%(kojC= z7qYGWxQK^47A0{tfY?1gyeqKHMGHCRELECIyR4iO_h6JJVVOC0&-iolM1gOiqc=xb zzWc6m(nU)h^Ud~svLhZLdT=~IDZ}>`8+Lm>Bo8IrFIq~He^B6(blLmDNA1){S~jby zO_c7`l#fwiq6=}ypEAq^s%^!$%ai#7L#XR$jQdb*X+t^NdBkE!eI866>S|0Q6b3hJ?|C8A!^8byW zF$CI68QTBPB+uk^zW9tjS7`?7I}@fYwh^DF->2B~e@JL^TU1CJT$N_-)$*Wel@N)o z9_ei6jd^5GG36+{x)i{M5VG@*Z>|(sZ+-nrF}`C%2aIo^8Yj0!he5kV0E~Uj3JI@H z5{v?$!LT2QbO1Cl z9&@;fYztJDD6X2m82_YzsNOSE8slp=-s*Wq?sGdQm9>Ej74#EDy}#-|-`2@a1>5#) zdTQm}lGl(bY4_D@0WfqK@n4)i}q&5`xW%^=HASWSWeQU-94yugl!w8f;)S zxA#KBYT{e}C{+RlZJD1@Z!*6ffFnQtRllS@(lL%@?@rjmK;K!uE!0^hr}Zdgvnf1$I-1+VAPg^?;x1C~ zHkYl?3Os-4(ev**dj2XrW(NWqg4H-m(#`OzO>8ewop)II?FPtYcToa_=QK^T-=5e- z&B8}m>7JQ$v9kv?t*TP`L>vfT>3zIL)o8MG`Oy!v3nF~scWrm1oBUJ<15T^YP`zkF zTF2kiPLiui)H0G2u73BDUV-P4>LNg;rCVtc`O0eGAX5%Md%B1=(xPGYCO#Dv9#3}052dzVhy2#-2&0l*nuZeACIMotrg=!bn~6$mSigT|C{Rj~fwOt) z+ldQV_H!`?Rhn9C_s^cAWLz-|jt45S#^2K`o}PMZr(P}I5XX!7ni60!S{AuKTj9@Y z-rgb{Lu0yljOc_%uHs`=nD}&()+%nD7Fd+`oC#DGL|GjsPJ7|jwllffl06D8-7II~ zM^J9FH?xfzE}<=y?w@LFROw{Hk)#OU>RAYgv(E3L9>E z@uqfOFx3p>&WwXaB=>7a2j7nB`561juY#TD2W0?D69&_8YJ5Z7W}Dg{CR3> zxBV)3Bo^nskB#L!yy-d$C^TPQ_8w<>qjf|8T{FDbF3(Kl#Vz+j2)#rVu? zr3zZi{m@6pZ;dgrD^a;w$xj(ia8$mzUofm|lW(Tw;VjtP&ih6B8>mJVVr33#Zd>hk zfV~H@hruI*b3MGF>%}%E0}YzvrDa9&BRJbjSH5^}eFQv_}-> z>T%GA`?oBGaA85yYEm3sPjht7afrq%*)5@4*OA zFvr%ml8apk-;QB&D9Brs>F{;Dle2S7``&;=$uWZfm04&eGg+x>#$$jo-Uz?0BNL>k zN-JLeu_M>~?AZ$s9*AFpkae6#pWU!R8uNKzF-=w85(aM5qpXRyMb=6p7Iz+?UmQ;7G^nv$6%+Il;9(p@5>FLhpVq~71O6X|hfG#1ZP zP7+`l{Bw-ZnvmYI4-YNWm~Y6_x=(S*NQ6cbreQ=`H6_cHJdd3AIzpUef`c9ZIvfaM z`W#~knYLnGw7giEjAU@UfOsjZqjFnQO;gh-P*!nj77B%i7}XiiPSPUuXE>239ZP?7 zOC)KxxbiuFm-uBBZlF3VMb_VIi3G9+sOgPET0RhMj$Yi0<9zA8qNS2$dA&Z15p%lU z4))naEkmtGkF?YzRXCW9nF@)<*xOP0-MNnP%Ua$KN6*~~HX*Na`@|d>{nRexG{j6yi(ujE$tQ{ZA2!`O1R?wJK>+2xFH|PPK8;FpB7OPs2R>Szh=GL=7-y8(GQ(aCLk#ehMdBzn2|+LY{DZ*pHqR-FGONfp>h z<4jU_sjgV7|Cx81_rtHP8|j?*)A;|`D6Yb>rAM~b3^UZyc(JfL zo%e(keU8LXzof*-uPFm8XPy~$HnJ$~ff-~qp~|YM7}^y~gY)3FnQm=S=CzLkU%^~7 zz%-t*(jiKfgKB@6_s|(Lhsd&#iPu@f-r5|JRrc+2Yta?s$j?Ps_#urK?3W*|oQ;q@ zt9?%+T8hl*)?G3%R7r8dd-4y6FKAbFeqx>^ugc1m8Xc}FLDKs)2v>aLd^I@qSXePMEZzl^Hws4z_1uSkm;)nKFdeV1YdAxsS@rc_Icq{)h~Q2&ZBA{nxgxJXT*BzdGd9l&$UEdSk=OH2gM~ z0?m-^IXMbH+H(`Yo@)+p2`m#tnR=|ze7ct+)liz6w&Hf!`KW}_ru&aLp`~rIvWxZ< zWf?v4*Hsv=!QBK7a`%jzZZ?ZBDH;PYm$p_5uDkhAN=7Sx<2XXuErJ3pT#OHQvVZcX z@u>Uf5eMZ*RH}m6-t^B>fztqVmP zCa~ha1bk2S>EymDX)-1hl6G7qgKQmvvbj}NU2&p-tFkW#!Y0(IHK!>^F;!?(B!Aq+ z^3lduznD!rjAx`LH2H`LRNUKQAIW7}Eq zGS3wCvI#M|F@}BXGs&z|(``m2$8WAns&F#jQuedj0^pesiiC!RcgV?GWS0dS^Xe8G z7CwM)p7bKSZ-GN*9=Z96OG0AuVtdQ!yMsKBf7c};N#v0yUVMK+^2t5i(R#~D+%Y&< z4m{VG#lrgMH!*+y2SisxV?pB&z-vv~&-(KpRCr7d43Ql&F{FkW;K!C|6F|fLUeWhn zUNdXxcDrr}Ey;P{gU8P=p9}+|B_uou#!q`7PM}2mLjj-*JxJ(&Q0PXb()q&<1OU5A zF%mO!ge_d#20gSRHX@d3SVcacK6a6t79>ObC~p+kVQx8{)@|&1ihu5CId%ypJmx(2 zp~NbMC&ZxxaF2;&{IRU0t8LHLe+?al9f`WPg0V2mgagGz`KXN+Ur{wq@6*ZlPcELj z-)PbMF>gQ4$A=uu=nT$gi&A8RBYyJ)AWrQLmmF9&hVP3PtaA|BXV5`;ixbL2r}oD& zYce-1)Ee*GMvcGGhDS4GQhH_3E3oWnt+s@+y63&Rh3q~}jPiHD(x?vj z!KbL#D(va%`p1+y+uG-;o7`fDaeaYGrz8Km*R5x3=E#|hUg)8G0}Ly_uUrSkXZLv0 zJeR6>q~Kh+G+tox#5Q?MhV~09XZR|^^)<`d-&);A1thkkPR38zh)I>7{wup62cVU) z)kD&@wbg!SpUlUj(on@cxs+z_5V7toz z$V4$^Q+iu$wG32$M;u2gzSU~j`TlIBkfBlwtvabB;g)Ww)1raW?Dh?C3Mb$~CoDLm|-iVy7aI%sPljM`Q%=&x9 zi#_l`Jr$e`;TX);(fZ=MTm|PY-7>njL+P$W4G1HpQQ64*u6w6EU4a3?_eF*0C(r*< z%#KCn!5@sD25s;*PdaTK`T_waDzI*rxg|9I(&VXjDf1~b_<-3mwr1;NB;4kJKZ z-9f!ASk~>Pc7oy&U{u9vb!z#=L)2VIoSSTt{T~n~`=ymuzxugr@Sl-DqCMkS1;2TV z7EG4pxzSLOe|EANp=7zRr(te84z*)I<)Wc4If+bH*pV~ttNuMEk1b%1g=Z0(vGcC$MYG#C1Q z3I89w?|skY=%DaVUew8d2PhHH*aXP?AU33 z5*tr03qZBOp6v)nsT*K2sw9Sgn{C^{aAb^nHA4{uA>adsSnoH?Oif-~uld#2-LwJ* z1>mVbg5BXTv0O<75pOt@Hl9_Yd%Jwg+~#p1uCjL}ux{Vhrw7`{mkstOa)9FalMh9* z^Z|dIzE4`k?$>40^VbK0g{PxDR0qnrO&6{W6r~9l*!?&=%QqY{#`s0~6u}VI+!>PT~?sufBZ#!9V$MLBLNSL5|mU zHY8exT==yVZ!Fhts;nz5(t9q=(9tG+jJZ&Mgz8tBO~YcsOR<} z(0qP|eAH>z5o<_`Sm^uv&=CBT9Q%`(oh+?t>)m)ORco0Q6Z5#3qjpV!XsaxXs4+=r2Rvu4fHmI;ihVq)R8XNbwW`t zg51paroAV(+FgsHAuBOkzv&mwN(K-cOqwtV1!1pWkV3(x&heLEhFSSY*_$F#EDS&}O%xcOX5ZJ_7 zAav~Y2ju(JeKtcf!-=t``*up>V=tIbafuASebOu=2_(}ELj@D9qh}xLt(HTpz1cR zFe^iR38V2BL2kcUSxAQLhcr%dZ*QC;M9_ij)^!$J)3TN1j1_uw{BtI!reqJyoPG~w zfeJr5v3&W3w)(}K@e@_8@axF>%T~(&PYe~Y8x^3qMB>14e~-Wa9$kRH`LpZNCq}`S zOrKOnJfffCzH7>MfIKBE>G$$C1p@X3&siR2U|%c$LUjYJ9hs*@ja)*OdWccq+$3dl z=5o}yL3L5Y=Ee`eND&F&*^${|0UGVUE`;Y5sHTPy-r|JyvCJo*XE==>L`r#mI8eSe zRr-*No?dnilZI^81@Ei&KApe=7R8!0nEr1FZgCZu1yO{Mn@b>>6&RH}E0f3?zBnj; z!{N55M&y}-#oM$56zzyQgm1YBNmGmSlluNQep0zy{`QmF;Fz(3z7AK~Yrxr|*nkJn z_P7_I$aA*?4lr~hYx;m+}meSe#*(& zf#N5<^k^z)nO}F9@A5pWUKO4R*x9aH1%G>1vk2h8764c(8=_H6>h@A$&j1&CP=*=1wfpVv6O3%^o+a7lS1^6`v zqcV(Ct-r4R8@q~XJzVw13Lzwxa61(AT`AymuNGlDV5PR5Fdiz$+>wk#?_30fwprJl zYF z-_APQE=fA9T#qem6+d_%j$#tx#r| zqbg>ghkT|E**3|u4PF)zv7r62&&ufF!sgHPHF92l%5}rbT(g;q9#dau`f->J}j*;>54?V{XE8gcjBox z>KOr*r3`MOqvCG{SBUzP1D%m`104?2pK?OYpT=&GOQNie?=|5&_WMC1sn+ScXpP3M z4ppuXKS^BFHuPB=mfXX9FE@lvUIuxIGt0Ny=V#wghR_fBN}HcPiWEwf0!`scn5)Vuggk{bm7#*o+) z&PtlTLKFb#v;ctaF&W;q#g)f_MA196xktOp6jwRaIeP8AjpEc4ZDcCXySio)hb4TU z1$gSOm(Ici#+w)M*%itqnBv?!U|*k4AjiMHK*HgE<)CM*7KyMN2q=(-kzp zz#%4707pX#1(z8F?v?0{M!ySG2CbRU3;+0W%s>B6cx74eKYg)({@4HSUM!R#fTJ#f zHWQ!?qoSY!RkObz0l~q-(#eh02dxJcjXa!vMJUU?UIzEB&pJt7VsD3E6i?;CqnsOO zDcFG0=n()-t|2^tJEb*%ZeUwmK=+ButYMt&Idx$w$ZYDV>h6aN&}_f20mq4|#%CTw zro35S(vn=QEP-H1rzvYAier4avd7WK&&N4M`Ti&0@b|-6@u9c*(;6RlA1RHy`k4ME zscJ?m5-(xuZWfKK1n@i;vlSODA8H-CY*8Ej=xZb6ck$F{`K=gmZu|XsH;AcKM_fib zI{>9W2?^j?o}l~KP>1nq^7OCxEn1&?YAQB9Ed&UN%TWy3gFDHb;g&hDQyY-#mg!)v z>>GV^d4070>H<4^n~%{xc=}EaK_um{`8)4=XR5Eb7l?R23Vn%0xZHi@->5uLiKHO< zDvezlb6^~4S!bE|tv0cNO`N$?N+grsX=Nr`uwqo^86$*Dzm0h&b&&V%dveJW1HqB= z;%zsf#bY69MBIkuiGUM+YHQQLD*wFrLojkF&>R!OBU*)vPVp`Nk@LpRM$uc3&wwkt z^U~djyq3Axfhn^1jz+lN?*BxwYA=39pvjfhV3zLchDN!b{&&1xjmd5McY+qr1v^*i zRo5k`n^s=RbrWt_0iUq~9^Himau`;C^cMLUb;2AW*)%kw_e&Q&=Viv{0-1AK#Y%|o zY|q1d`=jLj*9QYP(B$kk#`%BCDav3@n;Bws8G2|$L)WrS+d@waiH5Q-S!#e*=*=01 zLmDt)DR<S_7fDILHE8Ji$+Y~;9zS?+AaWEL13w7iE~Y2-+X+D>7A?7&!DJ|CMnp~m{1u8LQj zm;5p=jov4kln#1=U~2@c1UsZ@2I2Y0JAZmx9YX^Zvn#k%`Gs)je*Lf}^Y;&~B;-EO zyhazP7Rbq;Mu6}Df$|85s_v06qhC9YX{zBx4lX+T$-5s~`6d%zcXiKpb7|<7T1RpR zeG$m}!Uru@*>_0{R95*b2L9zc+t8Rm0A*-N*riiQx{);_SxuJxggs(`Z%Fl7!$l6Xz|;Pz?V z7ccGvd>@KbW9584R;bOs_lfeC9kqfB2>P{3tiNpudPLB5lR$y(vc#s9`W7KVPko!9 zaNy4KjD3At^P-6k#4m%aLlFqs<2!+lqHAOWHSu~OqHoruM>K2TZbNg;^Bd=b64a(P z?o!Ks;hO|N5MdH?ZVQl&{{yn=4jKZ{#+ZSlV-+LTXg9b{2tV?^G^Ohkx}X30=9zOU zKWb&IA#>4^jbG;WS&lfYFfFch9WxtLjy}w+$DO77GNW~v6KIQz$=a=rDI;g)nwBg? z!4NYU;T(>VM+$7bqdyzKJrs%26KQa7CyFS2isBp+Q-b2A7ox8*sN}W8@B74B-R=e0 zMWA^9+lb2TZrAolhB~LyEQF0VTB}ex&nj~5a1+%@%;_!g|8V8q4aby$38~}W%WoW+ zP(+iJZGhv|{<5s|RBdNeXmtcqr^p6kGV*@M_WxBV^nd^Tf3Mqr+rWR@z<=Ao|Cepx z!)45h(oSoh8V9ZILa{yh4vVSosFZb_g<7P12rqQ64{2|L>>h*d?f__;==JNWiZ?GH zY)x2YJ6~$({;RXYqCaYA_G0wK-B43n+jxxBp4b|n`=2jD6)4hcge-rET-cl>kx+zz z5(DNXH2Z0do!gXP22GSX!T1*X^ywV0$ZASjiU>%R#@P~H#-2V`k{=AvI-k;$B>AQvOo8+T;?!#-B9@7c zdSB_@Nl&lJH47h#zty0pd@Qy%-Qy{lzaGT=T||~$p6KAu0qUnT?#z`w@#)wMnt^{W z;Th?v8|t2mtdV@fbvATI+S=i?;E0y25oNpz!@G}dB@I8|BRp49;Gbes?bwT`$Fw7s z-3uiI$*&5%ozJ$jpT1~!=XGSDsx+6+w~&wTy@kHEpSbh{o&d4GZsI&cC&%48&zs3h zdT6_D~3V>?u z3CPg9ygSQ7Nt@+fA6f5nM&F8Axc#6y3}9d1-`y5*ONJyz6dY#W2Q8&X$jM%#hZVIz zde@@wwC3oumwwFy9FT!`VEoG71oXJT6UHIi3MpPvFWQs+YY}dhkM1;>uruc-ozeI0 zIW_{$cqaq}SZ+Aq=%n4NGR)_$3Pzq$IlWl8!u?=kf3evm?h=JU=d?qqlekhv%LY?k zQ0GZyYe42GwFo4PtZPFbR6SZ8r+&A-tf0-|@R8ByrKAc|uOVmkxBTkJ=S&WFRhPw+ zAEYCI`BYyShAHy+oPuXVOuv~R#7k}Gl@z4pUB9=2B6$g!=cu-d=NUuOOMZEYaaMk-i5q<~}C61N&DDmgDj?68B|M)d`vY?7hq~w1=-vsx zqT;=xpY2X%0xlk@0guT_HLN#qP~v9UA+jxs#0%Ebgo~zM&+Q)vr zQqv0vZk{hGbX|WiC}wu8YD;nEQg@@FLh_GV`SxVx&dMA`hWgb1(cX1NHL-TES4a3IrUihs7?2r{L^@fiC8OrJfV4OoWQX{r zt$-Rp?{d@KT4!(Q1^U3el0u&E!s0A$=n;dIcEuJniq@<6-#Hgl$5W?yFoKdNbl58V zvV(m5*6A@SguxlCsBNmGRSu%S@-0{fB)Mx@!wcjCj-!CjXyI~YNKWv!arsnk%fkBB zs2dZy{_E%0)v4%4+|34DOQC>~?hp22tr2v^jo$bf&-g$&-vy+S84#7tF+BhUwx*-s$S{jC zc$&586klK=(hgoUz>G zZqVgEkK~uAH@IV=>SL)v)ZM^}fP{JZwugd9R~$^aw|ydNHTaPW-vC#7W6u%hA$eN!L{F*}SOCSGrt9 z-jcCWPh{?WX7eAe-Mg)sYTV*BCeC!5+q+S;cW1e|jg7;eZNr0V1Dd>~N)eoFP=2%*0HA!c-O@|GC$Pe)GENO(j z{SeXPAb0;UKMdaNQ>VtI^M{V0W*^h2-Q=C6vqs+HX?`gUKf#Ygu7ED6Z&if_ z&X4aV+jR0(=^dlbB^(DYt5PiLk4%QnFh+9~EVwf}%~6Lez#*&Zmmi2fI9(K0(P6O< zV4{Q<`)*7&dY0>!!+q>4?2NA_>kzq_p~EIdW#i^DxRq0n(qpcVsH!C?a;8*!wZooI z@3pwp)x2SJ0!cNg+kU7H304sp&*`dq))nNq9d?e5b->H1D6 zEJ*v=tBIgZpO0zFqi&>=TaQI3U1-pH9 zFSo7Ip2)rc#&cu#VC4*uvRV0jj+qR1x9EP)PG9J45FbHQlB@yX{>4nPVb zW1ED{;v+S1b<*0>M1^vP)95u(zYAr9UT_#fR+=&7Lc<3=z=v*in`bhvE}$s5E%^Hx zB|5Vms(D`zWaQ;{0+BQ`O+NO>|J#lqGH=kiAB8PN@Fbcj8+vY$GpTmx^O}fW*uIZN zyGN>l%2mM=h6UgRbr=xrS}SHE-XWgGJya35l@UA=GP`I|7irxyRHI3US{WrPeKFDP zwyPTo*rq8^XU}%DTP&x=5Y-n4USesUTLs3>mbbPOo9=ukixz-1jEfLakNa^Y=+gwk2IF!vqH&r0 z=+7+c^E@)CSAN>Ybnt>Xck+g!w(3Xs&A2Hx(Q{4#@Wh!_I;-gJf;QT#k6oo~a%6O3 ziHT-7H2NdqOD5j5-n0|}*&QyCgcRDGo{mW!;>tN+#Me!4cC*|g;n&Bc$_6Ich6DL= zBlDhuFk*4>kqGVAi`(zqQb`s!$BkGy7+H>gnaktylJUAJrI1F!Sq00zOImq(ZZ=r& zGZ250O!uv|k(8QxsYj=7Kk?*FdeG?hh|gs`%ly+vkSf-oz-!)5=6 znD0L!EdFoTCf&OCtR`RR12MhPm8~`aMd$JenSIKruL$ME+SKYQWspm#J#yt)ui(ku zl2|C|d2H8B5g8``*SD+qkd1tgSVEHt6vTeXp|6?^uvCEE#tScDQx_NHtxj(vawffh zKlv4^xAf5OV6{I|FnmIE*K?}FYUq0Z=6d;93rzfpYEGwZ5VL66oxkJHFS7;9VI+J_ z-JCbHy~2aDV?KK#gCioB+W_yoW5(u^j&G{udj4t)%}s@geYU1e7%o97xi>0zP4b4% z8UxQb|17pk|Kg1zed5H|_m%vP8KD$AEWPj*$aQ*FbQ~^r1 zx>or%KC30|o72M&(JnuSkK2?yP)N0T#SXv1;Hh%ayRE1gWgF3qR|QyB3f-w?+oArI zE5fFy455OH!zFbrAsQ@m2>|7)zXBjOoWx16kv15Nw6)h|Yh7BphTe+cWhj2?6Ie&+ zqS{_8@W-L#9f0Hr16v4$aPn3&O8)&{JnfoMOgPWGNaO{FdxH2FKaIijc>(8pHBi+X zb7w``d<#EDt8Q|XG!DUKi#5_4o=g?E-?^MZ^;_>1sPk1MgcKup`^KEa4lmy6StC0Og$@ zd+Aec{U;jaT&fD8n9my-;z}su^^57d7?;))Er;@6Y~9#wJ!E+Qp^XceqW`i#EZ&MY z-7f+#rv>m-elDZpm9`^u_UJ%1c?tt;FztT@8v_bkWdQHdbjLDZqdEM0SM~r0@5$q` z+Suh*a?ccFKW(i-JXS`)lSdM{kObP7VP=~(B2t|)y!7c~q2m?}s#_;BO`0+WOt~iV zFF<@D;4J)#0X*s`7hV>UB9J#Xm&=7R$c4|F{jwyg zJrVWErR`5XBV5D0TtsXmZPM!PT-|(>P@S9ji{_tyREhslm21izBpT}h2&M7#b+`9U z6?15sXwM+-N9F+B4nBPnNx0yO(J%Rq<$P@1^W!B@biuWLAM~k-+$UOEcLyCv#dfFr zU1A04#CM7FxPXUOcppT( z(Gcp$ywcJH_6bzW&{`0H0`{L2U3e!2$yh<@{ z_bBg-!#AQLNt#Eb1aMt-PFxt~3F_oSFT1|hmenreeFy)fVmMske5vYrlRnhGjfi^7 zzR#kBTAf$gK7!Y-^U+?N2c5y~rR3OewV{^leax^K^XVBhq_dfygr+E3w|tR}V3tkp zb-$X3BhNPn@5$LA5UKkm8!lOnCYXvg4-me<#XSlr*a%irEo}73V)wJIY)6JkefS0M zs_y&363=l|th1ByEj4Rsbfn!~F!jigKp$7Dy>*-kaddcT+a)lt>3$oSc(@C4etdf0 zAhUXWbtV4oUrVYs4eRI3s=dUso;oyd!A%Z826Zhy$}du{1Fpe|xSidhW|?#Pg$#wCe=GbTIsN&4!zGx0ZT1jpn02g8{8(&d$b_t>)9hOjRiHx}upb5I z%xIK&l{cDhWiY4){&^An2cWzCEcP2cQVLt`(pxVh)u=+xgZ(<+pSG31dS7_>MJrLU zMr3eF@rn8Y$m#^H++PQ(3JJN$Op!vZ&sx8uQmgZfKN5U7F(#n<{vAInYR+Xf5|4jT z*DU?hfjU8?y?(RF3BBv|kcYD{6?_3$-|lLd@5}ZR$6ru~{9dKgk_f|f;V@&l+dLdO zutzSETg;2QH+7K$=}2Bl@>iXvU5J(4c6wk?Ppt>{oT@i%I{>=)ypQ!+DVA7$FT?nW ztllzMTD0O#@bzaN0_jsmoMnm54_bquJ;;)xYuG%u`3wcBP&`!AA)ps+T{x*u9fex( z-i?)ED#a!H1iQMkTr+pjX~z#S$8A>iP>%Ea@mV-!j$-A{1=KZew2*T=sE@VR(l$0< zE#bAgB)D^4Z zhcXft4}sEV5Z4g@3!h7$#J%1eZH_Bwb6qQjwDQ36VZt{fX)PV4v%+tcO({0!h7$Ve zjHOu9J}c*jbg>phKBc@VWJebV3Ll!#ADkHJWa@BKRB1G0Gk7GAe&gxX)EKKSWF#2- zgrjC8H(@fCcv zu68^LnR5i0yI*u5J}4`9FU`eiq(3gAxq;tN0Tmu<5Ft~uRnp@k`P*8_pLNFCMusH0 z3lp;5xtIL+D*iG_Sn>*$5V)`W;++z4Z+9rT@AK!ch{!;BN@G^q1yC4j7o#KS6R3b` zdSo1oiH17`UX>?+AeqZB*e|b!#L1qkSjQL@i4E8{zLL3>dYbb+y76N|#VoYC zr1XyPUFS0c@M4LV5%anor=EhF|4oX5|E^wk|M)&hDgXub-aja+{#P#k_wjE|!tZ(b zJrBPz@EZfaG4LA$zcKI|1OMMKzj4odU0|VN^{$5>Xka>N!Ixf&TG?K literal 0 HcmV?d00001 diff --git a/frontend/src/components/common/WechatServiceButton.vue b/frontend/src/components/common/WechatServiceButton.vue new file mode 100644 index 00000000..9ee8d3d5 --- /dev/null +++ b/frontend/src/components/common/WechatServiceButton.vue @@ -0,0 +1,104 @@ + + + + + diff --git a/frontend/src/components/layout/AppHeader.vue b/frontend/src/components/layout/AppHeader.vue index a6b4030f..53a0c01e 100644 --- a/frontend/src/components/layout/AppHeader.vue +++ b/frontend/src/components/layout/AppHeader.vue @@ -121,23 +121,6 @@ {{ t('nav.apiKeys') }} - - - - - - {{ t('nav.github') }} - diff --git a/frontend/src/views/HomeView.vue b/frontend/src/views/HomeView.vue index 6a3753f1..babcf046 100644 --- a/frontend/src/views/HomeView.vue +++ b/frontend/src/views/HomeView.vue @@ -122,8 +122,11 @@ > {{ siteName }} -

- {{ siteSubtitle }} +

+ {{ t('home.heroSubtitle') }} +

+

+ {{ t('home.heroDescription') }}

@@ -177,7 +180,7 @@ -
+
@@ -204,6 +207,63 @@
+ +
+

+ {{ t('home.painPoints.title') }} +

+
+ +
+
+ + + +
+

{{ t('home.painPoints.items.expensive.title') }}

+

{{ t('home.painPoints.items.expensive.desc') }}

+
+ +
+
+ + + +
+

{{ t('home.painPoints.items.complex.title') }}

+

{{ t('home.painPoints.items.complex.desc') }}

+
+ +
+
+ + + +
+

{{ t('home.painPoints.items.unstable.title') }}

+

{{ t('home.painPoints.items.unstable.desc') }}

+
+ +
+
+ + + +
+

{{ t('home.painPoints.items.noControl.title') }}

+

{{ t('home.painPoints.items.noControl.desc') }}

+
+
+
+ + +
+

+ {{ t('home.solutions.title') }} +

+

{{ t('home.solutions.subtitle') }}

+
+
@@ -369,6 +429,77 @@ >
+ + +
+

+ {{ t('home.comparison.title') }} +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
{{ t('home.comparison.headers.feature') }}{{ t('home.comparison.headers.official') }}{{ t('home.comparison.headers.us') }}
{{ t('home.comparison.items.pricing.feature') }}{{ t('home.comparison.items.pricing.official') }}{{ t('home.comparison.items.pricing.us') }}
{{ t('home.comparison.items.models.feature') }}{{ t('home.comparison.items.models.official') }}{{ t('home.comparison.items.models.us') }}
{{ t('home.comparison.items.management.feature') }}{{ t('home.comparison.items.management.official') }}{{ t('home.comparison.items.management.us') }}
{{ t('home.comparison.items.stability.feature') }}{{ t('home.comparison.items.stability.official') }}{{ t('home.comparison.items.stability.us') }}
{{ t('home.comparison.items.control.feature') }}{{ t('home.comparison.items.control.official') }}{{ t('home.comparison.items.control.us') }}
+
+
+ + +
+

+ {{ t('home.cta.title') }} +

+

+ {{ t('home.cta.description') }} +

+ + {{ t('home.cta.button') }} + + + + {{ t('home.goToDashboard') }} + + +
@@ -380,27 +511,20 @@

© {{ currentYear }} {{ siteName }}. {{ t('home.footer.allRightsReserved') }}

- + + {{ t('home.docs') }} + + + + @@ -410,6 +534,7 @@ import { useI18n } from 'vue-i18n' import { useAuthStore, useAppStore } from '@/stores' import LocaleSwitcher from '@/components/common/LocaleSwitcher.vue' import Icon from '@/components/icons/Icon.vue' +import WechatServiceButton from '@/components/common/WechatServiceButton.vue' const { t } = useI18n() @@ -419,7 +544,6 @@ const appStore = useAppStore() // Site settings - directly from appStore (already initialized from injected config) const siteName = computed(() => appStore.cachedPublicSettings?.site_name || appStore.siteName || 'Sub2API') const siteLogo = computed(() => appStore.cachedPublicSettings?.site_logo || appStore.siteLogo || '') -const siteSubtitle = computed(() => appStore.cachedPublicSettings?.site_subtitle || 'AI API Gateway Platform') const docUrl = computed(() => appStore.cachedPublicSettings?.doc_url || appStore.docUrl || '') const homeContent = computed(() => appStore.cachedPublicSettings?.home_content || '') @@ -432,9 +556,6 @@ const isHomeContentUrl = computed(() => { // Theme const isDark = ref(document.documentElement.classList.contains('dark')) -// GitHub URL -const githubUrl = 'https://github.com/Wei-Shaw/sub2api' - // Auth state const isAuthenticated = computed(() => authStore.isAuthenticated) const isAdmin = computed(() => authStore.isAdmin) diff --git a/stress_test_gemini_session.sh b/stress_test_gemini_session.sh new file mode 100644 index 00000000..1f2aca57 --- /dev/null +++ b/stress_test_gemini_session.sh @@ -0,0 +1,127 @@ +#!/bin/bash + +# Gemini 粘性会话压力测试脚本 +# 测试目标:验证不同会话分配不同账号,同一会话保持同一账号 + +BASE_URL="http://host.clicodeplus.com:8080" +API_KEY="sk-32ad0a3197e528c840ea84f0dc6b2056dd3fead03526b5c605a60709bd408f7e" +MODEL="gemini-2.5-flash" + +# 创建临时目录存放结果 +RESULT_DIR="/tmp/gemini_stress_test_$(date +%s)" +mkdir -p "$RESULT_DIR" + +echo "==========================================" +echo "Gemini 粘性会话压力测试" +echo "结果目录: $RESULT_DIR" +echo "==========================================" + +# 函数:发送请求并记录 +send_request() { + local session_id=$1 + local round=$2 + local system_prompt=$3 + local contents=$4 + local output_file="$RESULT_DIR/session_${session_id}_round_${round}.json" + + local request_body=$(cat < "$output_file" 2>&1 + + echo "[Session $session_id Round $round] 完成" +} + +# 会话1:数学计算器(累加序列) +run_session_1() { + local sys_prompt="你是一个数学计算器,只返回计算结果数字,不要任何解释" + + # Round 1: 1+1=? + send_request 1 1 "$sys_prompt" '[{"role":"user","parts":[{"text":"1+1=?"}]}]' + + # Round 2: 继续 2+2=?(累加历史) + send_request 1 2 "$sys_prompt" '[{"role":"user","parts":[{"text":"1+1=?"}]},{"role":"model","parts":[{"text":"2"}]},{"role":"user","parts":[{"text":"2+2=?"}]}]' + + # Round 3: 继续 3+3=? + send_request 1 3 "$sys_prompt" '[{"role":"user","parts":[{"text":"1+1=?"}]},{"role":"model","parts":[{"text":"2"}]},{"role":"user","parts":[{"text":"2+2=?"}]},{"role":"model","parts":[{"text":"4"}]},{"role":"user","parts":[{"text":"3+3=?"}]}]' + + # Round 4: 批量计算 10+10, 20+20, 30+30 + send_request 1 4 "$sys_prompt" '[{"role":"user","parts":[{"text":"1+1=?"}]},{"role":"model","parts":[{"text":"2"}]},{"role":"user","parts":[{"text":"2+2=?"}]},{"role":"model","parts":[{"text":"4"}]},{"role":"user","parts":[{"text":"3+3=?"}]},{"role":"model","parts":[{"text":"6"}]},{"role":"user","parts":[{"text":"计算: 10+10=? 20+20=? 30+30=?"}]}]' +} + +# 会话2:英文翻译器(不同系统提示词 = 不同会话) +run_session_2() { + local sys_prompt="你是一个英文翻译器,将中文翻译成英文,只返回翻译结果" + + send_request 2 1 "$sys_prompt" '[{"role":"user","parts":[{"text":"你好"}]}]' + send_request 2 2 "$sys_prompt" '[{"role":"user","parts":[{"text":"你好"}]},{"role":"model","parts":[{"text":"Hello"}]},{"role":"user","parts":[{"text":"世界"}]}]' + send_request 2 3 "$sys_prompt" '[{"role":"user","parts":[{"text":"你好"}]},{"role":"model","parts":[{"text":"Hello"}]},{"role":"user","parts":[{"text":"世界"}]},{"role":"model","parts":[{"text":"World"}]},{"role":"user","parts":[{"text":"早上好"}]}]' +} + +# 会话3:日文翻译器 +run_session_3() { + local sys_prompt="你是一个日文翻译器,将中文翻译成日文,只返回翻译结果" + + send_request 3 1 "$sys_prompt" '[{"role":"user","parts":[{"text":"你好"}]}]' + send_request 3 2 "$sys_prompt" '[{"role":"user","parts":[{"text":"你好"}]},{"role":"model","parts":[{"text":"こんにちは"}]},{"role":"user","parts":[{"text":"谢谢"}]}]' + send_request 3 3 "$sys_prompt" '[{"role":"user","parts":[{"text":"你好"}]},{"role":"model","parts":[{"text":"こんにちは"}]},{"role":"user","parts":[{"text":"谢谢"}]},{"role":"model","parts":[{"text":"ありがとう"}]},{"role":"user","parts":[{"text":"再见"}]}]' +} + +# 会话4:乘法计算器(另一个数学会话,但系统提示词不同) +run_session_4() { + local sys_prompt="你是一个乘法专用计算器,只计算乘法,返回数字结果" + + send_request 4 1 "$sys_prompt" '[{"role":"user","parts":[{"text":"2*3=?"}]}]' + send_request 4 2 "$sys_prompt" '[{"role":"user","parts":[{"text":"2*3=?"}]},{"role":"model","parts":[{"text":"6"}]},{"role":"user","parts":[{"text":"4*5=?"}]}]' + send_request 4 3 "$sys_prompt" '[{"role":"user","parts":[{"text":"2*3=?"}]},{"role":"model","parts":[{"text":"6"}]},{"role":"user","parts":[{"text":"4*5=?"}]},{"role":"model","parts":[{"text":"20"}]},{"role":"user","parts":[{"text":"计算: 10*10=? 20*20=?"}]}]' +} + +# 会话5:诗人(完全不同的角色) +run_session_5() { + local sys_prompt="你是一位诗人,用简短的诗句回应每个话题,每次只写一句诗" + + send_request 5 1 "$sys_prompt" '[{"role":"user","parts":[{"text":"春天"}]}]' + send_request 5 2 "$sys_prompt" '[{"role":"user","parts":[{"text":"春天"}]},{"role":"model","parts":[{"text":"春风拂面花满枝"}]},{"role":"user","parts":[{"text":"夏天"}]}]' + send_request 5 3 "$sys_prompt" '[{"role":"user","parts":[{"text":"春天"}]},{"role":"model","parts":[{"text":"春风拂面花满枝"}]},{"role":"user","parts":[{"text":"夏天"}]},{"role":"model","parts":[{"text":"蝉鸣蛙声伴荷香"}]},{"role":"user","parts":[{"text":"秋天"}]}]' +} + +echo "" +echo "开始并发测试 5 个独立会话..." +echo "" + +# 并发运行所有会话 +run_session_1 & +run_session_2 & +run_session_3 & +run_session_4 & +run_session_5 & + +# 等待所有后台任务完成 +wait + +echo "" +echo "==========================================" +echo "所有请求完成,结果保存在: $RESULT_DIR" +echo "==========================================" + +# 显示结果摘要 +echo "" +echo "响应摘要:" +for f in "$RESULT_DIR"/*.json; do + filename=$(basename "$f") + response=$(cat "$f" | head -c 200) + echo "[$filename]: ${response}..." +done + +echo "" +echo "请检查服务器日志确认账号分配情况" From d269659e61fa8e714a7210ef54d6fe2681a77e13 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Tue, 10 Feb 2026 21:28:52 +0800 Subject: [PATCH 022/175] chore: bump version to 0.1.78.2 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index 5087e794..3d46fb65 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.76 \ No newline at end of file +0.1.78.2 From f2770da880ac2d82cef242c31bca1c5ce161c231 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Tue, 10 Feb 2026 23:13:37 +0800 Subject: [PATCH 023/175] refactor: extract failover error handling into shared HandleFailoverError MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Extract duplicated failover error handling from gateway_handler.go (Gemini-compat & Claude paths) and gemini_v1beta_handler.go into shared failover_loop.go - Introduce TempUnscheduler interface for testability (GatewayService implicitly satisfies it) - Add comprehensive unit tests for HandleFailoverError (32 test cases covering all paths) - Fix golangci-lint issues: errcheck in test type assertion, staticcheck QF1003 if/else→switch --- backend/internal/handler/failover_loop.go | 125 ++++ .../internal/handler/failover_loop_test.go | 656 ++++++++++++++++++ backend/internal/handler/gateway_handler.go | 151 +--- .../internal/handler/gemini_v1beta_handler.go | 50 +- .../service/error_passthrough_runtime_test.go | 4 +- backend/internal/service/gateway_service.go | 5 +- 6 files changed, 844 insertions(+), 147 deletions(-) create mode 100644 backend/internal/handler/failover_loop.go create mode 100644 backend/internal/handler/failover_loop_test.go diff --git a/backend/internal/handler/failover_loop.go b/backend/internal/handler/failover_loop.go new file mode 100644 index 00000000..fdba5620 --- /dev/null +++ b/backend/internal/handler/failover_loop.go @@ -0,0 +1,125 @@ +package handler + +import ( + "context" + "log" + "time" + + "sub2api/internal/service" +) + +// TempUnscheduler 用于 HandleFailoverError 中同账号重试耗尽后的临时封禁。 +// GatewayService 隐式实现此接口。 +type TempUnscheduler interface { + TempUnscheduleRetryableError(ctx context.Context, accountID int64, failoverErr *service.UpstreamFailoverError) +} + +// FailoverAction 表示 failover 错误处理后的下一步动作 +type FailoverAction int + +const ( + // FailoverRetry 同账号重试(调用方应 continue 重新进入循环,不更换账号) + FailoverRetry FailoverAction = iota + // FailoverSwitch 切换账号(调用方应 continue 重新选择账号) + FailoverSwitch + // FailoverExhausted 切换次数耗尽(调用方应返回错误响应) + FailoverExhausted + // FailoverCanceled context 已取消(调用方应直接 return) + FailoverCanceled +) + +const ( + // maxSameAccountRetries 同账号重试次数上限(针对 RetryableOnSameAccount 错误) + maxSameAccountRetries = 2 + // sameAccountRetryDelay 同账号重试间隔 + sameAccountRetryDelay = 500 * time.Millisecond +) + +// FailoverState 跨循环迭代共享的 failover 状态 +type FailoverState struct { + SwitchCount int + MaxSwitches int + FailedAccountIDs map[int64]struct{} + SameAccountRetryCount map[int64]int + LastFailoverErr *service.UpstreamFailoverError + ForceCacheBilling bool + hasBoundSession bool +} + +// NewFailoverState 创建 failover 状态 +func NewFailoverState(maxSwitches int, hasBoundSession bool) *FailoverState { + return &FailoverState{ + MaxSwitches: maxSwitches, + FailedAccountIDs: make(map[int64]struct{}), + SameAccountRetryCount: make(map[int64]int), + hasBoundSession: hasBoundSession, + } +} + +// HandleFailoverError 处理 UpstreamFailoverError,返回下一步动作。 +// 包含:缓存计费判断、同账号重试、临时封禁、切换计数、Antigravity 延时。 +func (s *FailoverState) HandleFailoverError( + ctx context.Context, + gatewayService TempUnscheduler, + accountID int64, + platform string, + failoverErr *service.UpstreamFailoverError, +) FailoverAction { + s.LastFailoverErr = failoverErr + + // 缓存计费判断 + if needForceCacheBilling(s.hasBoundSession, failoverErr) { + s.ForceCacheBilling = true + } + + // 同账号重试:对 RetryableOnSameAccount 的临时性错误,先在同一账号上重试 + if failoverErr.RetryableOnSameAccount && s.SameAccountRetryCount[accountID] < maxSameAccountRetries { + s.SameAccountRetryCount[accountID]++ + log.Printf("Account %d: retryable error %d, same-account retry %d/%d", + accountID, failoverErr.StatusCode, s.SameAccountRetryCount[accountID], maxSameAccountRetries) + if !sleepWithContext(ctx, sameAccountRetryDelay) { + return FailoverCanceled + } + return FailoverRetry + } + + // 同账号重试用尽,执行临时封禁 + if failoverErr.RetryableOnSameAccount { + gatewayService.TempUnscheduleRetryableError(ctx, accountID, failoverErr) + } + + // 加入失败列表 + s.FailedAccountIDs[accountID] = struct{}{} + + // 检查是否耗尽 + if s.SwitchCount >= s.MaxSwitches { + return FailoverExhausted + } + + // 递增切换计数 + s.SwitchCount++ + log.Printf("Account %d: upstream error %d, switching account %d/%d", + accountID, failoverErr.StatusCode, s.SwitchCount, s.MaxSwitches) + + // Antigravity 平台换号线性递增延时 + if platform == service.PlatformAntigravity { + if !sleepFailoverDelay(ctx, s.SwitchCount) { + return FailoverCanceled + } + } + + return FailoverSwitch +} + +// sleepWithContext 等待指定时长,返回 false 表示 context 已取消。 +func sleepWithContext(ctx context.Context, d time.Duration) bool { + if d <= 0 { + return true + } + select { + case <-ctx.Done(): + return false + case <-time.After(d): + return true + } +} diff --git a/backend/internal/handler/failover_loop_test.go b/backend/internal/handler/failover_loop_test.go new file mode 100644 index 00000000..00b8fec9 --- /dev/null +++ b/backend/internal/handler/failover_loop_test.go @@ -0,0 +1,656 @@ +package handler + +import ( + "context" + "testing" + "time" + + "sub2api/internal/service" + + "github.com/stretchr/testify/require" +) + +// --------------------------------------------------------------------------- +// Mock +// --------------------------------------------------------------------------- + +// mockTempUnscheduler 记录 TempUnscheduleRetryableError 的调用信息。 +type mockTempUnscheduler struct { + calls []tempUnscheduleCall +} + +type tempUnscheduleCall struct { + accountID int64 + failoverErr *service.UpstreamFailoverError +} + +func (m *mockTempUnscheduler) TempUnscheduleRetryableError(_ context.Context, accountID int64, failoverErr *service.UpstreamFailoverError) { + m.calls = append(m.calls, tempUnscheduleCall{accountID: accountID, failoverErr: failoverErr}) +} + +// --------------------------------------------------------------------------- +// Helper +// --------------------------------------------------------------------------- + +func newTestFailoverErr(statusCode int, retryable, forceBilling bool) *service.UpstreamFailoverError { + return &service.UpstreamFailoverError{ + StatusCode: statusCode, + RetryableOnSameAccount: retryable, + ForceCacheBilling: forceBilling, + } +} + +// --------------------------------------------------------------------------- +// NewFailoverState 测试 +// --------------------------------------------------------------------------- + +func TestNewFailoverState(t *testing.T) { + t.Run("初始化字段正确", func(t *testing.T) { + fs := NewFailoverState(5, true) + require.Equal(t, 5, fs.MaxSwitches) + require.Equal(t, 0, fs.SwitchCount) + require.NotNil(t, fs.FailedAccountIDs) + require.Empty(t, fs.FailedAccountIDs) + require.NotNil(t, fs.SameAccountRetryCount) + require.Empty(t, fs.SameAccountRetryCount) + require.Nil(t, fs.LastFailoverErr) + require.False(t, fs.ForceCacheBilling) + require.True(t, fs.hasBoundSession) + }) + + t.Run("无绑定会话", func(t *testing.T) { + fs := NewFailoverState(3, false) + require.Equal(t, 3, fs.MaxSwitches) + require.False(t, fs.hasBoundSession) + }) + + t.Run("零最大切换次数", func(t *testing.T) { + fs := NewFailoverState(0, false) + require.Equal(t, 0, fs.MaxSwitches) + }) +} + +// --------------------------------------------------------------------------- +// sleepWithContext 测试 +// --------------------------------------------------------------------------- + +func TestSleepWithContext(t *testing.T) { + t.Run("零时长立即返回true", func(t *testing.T) { + start := time.Now() + ok := sleepWithContext(context.Background(), 0) + require.True(t, ok) + require.Less(t, time.Since(start), 50*time.Millisecond) + }) + + t.Run("负时长立即返回true", func(t *testing.T) { + start := time.Now() + ok := sleepWithContext(context.Background(), -1*time.Second) + require.True(t, ok) + require.Less(t, time.Since(start), 50*time.Millisecond) + }) + + t.Run("正常等待后返回true", func(t *testing.T) { + start := time.Now() + ok := sleepWithContext(context.Background(), 50*time.Millisecond) + elapsed := time.Since(start) + require.True(t, ok) + require.GreaterOrEqual(t, elapsed, 40*time.Millisecond) + require.Less(t, elapsed, 500*time.Millisecond) + }) + + t.Run("已取消context立即返回false", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + start := time.Now() + ok := sleepWithContext(ctx, 5*time.Second) + require.False(t, ok) + require.Less(t, time.Since(start), 50*time.Millisecond) + }) + + t.Run("等待期间context取消返回false", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + go func() { + time.Sleep(30 * time.Millisecond) + cancel() + }() + + start := time.Now() + ok := sleepWithContext(ctx, 5*time.Second) + elapsed := time.Since(start) + require.False(t, ok) + require.Less(t, elapsed, 500*time.Millisecond) + }) +} + +// --------------------------------------------------------------------------- +// HandleFailoverError — 基本切换流程 +// --------------------------------------------------------------------------- + +func TestHandleFailoverError_BasicSwitch(t *testing.T) { + t.Run("非重试错误_非Antigravity_直接切换", func(t *testing.T) { + mock := &mockTempUnscheduler{} + fs := NewFailoverState(3, false) + err := newTestFailoverErr(500, false, false) + + action := fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) + + require.Equal(t, FailoverSwitch, action) + require.Equal(t, 1, fs.SwitchCount) + require.Contains(t, fs.FailedAccountIDs, int64(100)) + require.Equal(t, err, fs.LastFailoverErr) + require.False(t, fs.ForceCacheBilling) + require.Empty(t, mock.calls, "不应调用 TempUnschedule") + }) + + t.Run("非重试错误_Antigravity_第一次切换无延迟", func(t *testing.T) { + // switchCount 从 0→1 时,sleepFailoverDelay(ctx, 1) 的延时 = (1-1)*1s = 0 + mock := &mockTempUnscheduler{} + fs := NewFailoverState(3, false) + err := newTestFailoverErr(500, false, false) + + start := time.Now() + action := fs.HandleFailoverError(context.Background(), mock, 100, service.PlatformAntigravity, err) + elapsed := time.Since(start) + + require.Equal(t, FailoverSwitch, action) + require.Equal(t, 1, fs.SwitchCount) + require.Less(t, elapsed, 200*time.Millisecond, "第一次切换延迟应为 0") + }) + + t.Run("非重试错误_Antigravity_第二次切换有1秒延迟", func(t *testing.T) { + // switchCount 从 1→2 时,sleepFailoverDelay(ctx, 2) 的延时 = (2-1)*1s = 1s + mock := &mockTempUnscheduler{} + fs := NewFailoverState(3, false) + fs.SwitchCount = 1 // 模拟已切换一次 + + err := newTestFailoverErr(500, false, false) + start := time.Now() + action := fs.HandleFailoverError(context.Background(), mock, 200, service.PlatformAntigravity, err) + elapsed := time.Since(start) + + require.Equal(t, FailoverSwitch, action) + require.Equal(t, 2, fs.SwitchCount) + require.GreaterOrEqual(t, elapsed, 800*time.Millisecond, "第二次切换延迟应约 1s") + require.Less(t, elapsed, 3*time.Second) + }) + + t.Run("连续切换直到耗尽", func(t *testing.T) { + mock := &mockTempUnscheduler{} + fs := NewFailoverState(2, false) + + // 第一次切换:0→1 + err1 := newTestFailoverErr(500, false, false) + action := fs.HandleFailoverError(context.Background(), mock, 100, "openai", err1) + require.Equal(t, FailoverSwitch, action) + require.Equal(t, 1, fs.SwitchCount) + + // 第二次切换:1→2 + err2 := newTestFailoverErr(502, false, false) + action = fs.HandleFailoverError(context.Background(), mock, 200, "openai", err2) + require.Equal(t, FailoverSwitch, action) + require.Equal(t, 2, fs.SwitchCount) + + // 第三次已耗尽:SwitchCount(2) >= MaxSwitches(2) + err3 := newTestFailoverErr(503, false, false) + action = fs.HandleFailoverError(context.Background(), mock, 300, "openai", err3) + require.Equal(t, FailoverExhausted, action) + require.Equal(t, 2, fs.SwitchCount, "耗尽时不应继续递增") + + // 验证失败账号列表 + require.Len(t, fs.FailedAccountIDs, 3) + require.Contains(t, fs.FailedAccountIDs, int64(100)) + require.Contains(t, fs.FailedAccountIDs, int64(200)) + require.Contains(t, fs.FailedAccountIDs, int64(300)) + + // LastFailoverErr 应为最后一次的错误 + require.Equal(t, err3, fs.LastFailoverErr) + }) + + t.Run("MaxSwitches为0时首次即耗尽", func(t *testing.T) { + mock := &mockTempUnscheduler{} + fs := NewFailoverState(0, false) + err := newTestFailoverErr(500, false, false) + + action := fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) + require.Equal(t, FailoverExhausted, action) + require.Equal(t, 0, fs.SwitchCount) + require.Contains(t, fs.FailedAccountIDs, int64(100)) + }) +} + +// --------------------------------------------------------------------------- +// HandleFailoverError — 缓存计费 (ForceCacheBilling) +// --------------------------------------------------------------------------- + +func TestHandleFailoverError_CacheBilling(t *testing.T) { + t.Run("hasBoundSession为true时设置ForceCacheBilling", func(t *testing.T) { + mock := &mockTempUnscheduler{} + fs := NewFailoverState(3, true) // hasBoundSession=true + err := newTestFailoverErr(500, false, false) + + fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) + require.True(t, fs.ForceCacheBilling) + }) + + t.Run("failoverErr.ForceCacheBilling为true时设置", func(t *testing.T) { + mock := &mockTempUnscheduler{} + fs := NewFailoverState(3, false) + err := newTestFailoverErr(500, false, true) // ForceCacheBilling=true + + fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) + require.True(t, fs.ForceCacheBilling) + }) + + t.Run("两者均为false时不设置", func(t *testing.T) { + mock := &mockTempUnscheduler{} + fs := NewFailoverState(3, false) + err := newTestFailoverErr(500, false, false) + + fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) + require.False(t, fs.ForceCacheBilling) + }) + + t.Run("一旦设置不会被后续错误重置", func(t *testing.T) { + mock := &mockTempUnscheduler{} + fs := NewFailoverState(3, false) + + // 第一次:ForceCacheBilling=true → 设置 + err1 := newTestFailoverErr(500, false, true) + fs.HandleFailoverError(context.Background(), mock, 100, "openai", err1) + require.True(t, fs.ForceCacheBilling) + + // 第二次:ForceCacheBilling=false → 仍然保持 true + err2 := newTestFailoverErr(502, false, false) + fs.HandleFailoverError(context.Background(), mock, 200, "openai", err2) + require.True(t, fs.ForceCacheBilling, "ForceCacheBilling 一旦设置不应被重置") + }) +} + +// --------------------------------------------------------------------------- +// HandleFailoverError — 同账号重试 (RetryableOnSameAccount) +// --------------------------------------------------------------------------- + +func TestHandleFailoverError_SameAccountRetry(t *testing.T) { + t.Run("第一次重试返回FailoverRetry", func(t *testing.T) { + mock := &mockTempUnscheduler{} + fs := NewFailoverState(3, false) + err := newTestFailoverErr(400, true, false) + + start := time.Now() + action := fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) + elapsed := time.Since(start) + + require.Equal(t, FailoverRetry, action) + require.Equal(t, 1, fs.SameAccountRetryCount[100]) + require.Equal(t, 0, fs.SwitchCount, "同账号重试不应增加切换计数") + require.NotContains(t, fs.FailedAccountIDs, int64(100), "同账号重试不应加入失败列表") + require.Empty(t, mock.calls, "同账号重试期间不应调用 TempUnschedule") + // 验证等待了 sameAccountRetryDelay (500ms) + require.GreaterOrEqual(t, elapsed, 400*time.Millisecond) + require.Less(t, elapsed, 2*time.Second) + }) + + t.Run("第二次重试仍返回FailoverRetry", func(t *testing.T) { + mock := &mockTempUnscheduler{} + fs := NewFailoverState(3, false) + err := newTestFailoverErr(400, true, false) + + // 第一次 + action := fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) + require.Equal(t, FailoverRetry, action) + require.Equal(t, 1, fs.SameAccountRetryCount[100]) + + // 第二次 + action = fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) + require.Equal(t, FailoverRetry, action) + require.Equal(t, 2, fs.SameAccountRetryCount[100]) + + require.Empty(t, mock.calls, "两次重试期间均不应调用 TempUnschedule") + }) + + t.Run("第三次重试耗尽_触发TempUnschedule并切换", func(t *testing.T) { + mock := &mockTempUnscheduler{} + fs := NewFailoverState(3, false) + err := newTestFailoverErr(400, true, false) + + // 第一次、第二次重试 + fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) + fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) + require.Equal(t, 2, fs.SameAccountRetryCount[100]) + + // 第三次:重试已达到 maxSameAccountRetries(2),应切换账号 + action := fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) + require.Equal(t, FailoverSwitch, action) + require.Equal(t, 1, fs.SwitchCount) + require.Contains(t, fs.FailedAccountIDs, int64(100)) + + // 验证 TempUnschedule 被调用 + require.Len(t, mock.calls, 1) + require.Equal(t, int64(100), mock.calls[0].accountID) + require.Equal(t, err, mock.calls[0].failoverErr) + }) + + t.Run("不同账号独立跟踪重试次数", func(t *testing.T) { + mock := &mockTempUnscheduler{} + fs := NewFailoverState(5, false) + err := newTestFailoverErr(400, true, false) + + // 账号 100 第一次重试 + action := fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) + require.Equal(t, FailoverRetry, action) + require.Equal(t, 1, fs.SameAccountRetryCount[100]) + + // 账号 200 第一次重试(独立计数) + action = fs.HandleFailoverError(context.Background(), mock, 200, "openai", err) + require.Equal(t, FailoverRetry, action) + require.Equal(t, 1, fs.SameAccountRetryCount[200]) + require.Equal(t, 1, fs.SameAccountRetryCount[100], "账号 100 的计数不应受影响") + }) + + t.Run("重试耗尽后再次遇到同账号_直接切换", func(t *testing.T) { + mock := &mockTempUnscheduler{} + fs := NewFailoverState(5, false) + err := newTestFailoverErr(400, true, false) + + // 耗尽账号 100 的重试 + fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) // retry 1 + fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) // retry 2 + action := fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) // exhausted → switch + require.Equal(t, FailoverSwitch, action) + + // 再次遇到账号 100,计数仍为 2,条件不满足 → 直接切换 + action = fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) + require.Equal(t, FailoverSwitch, action) + require.Len(t, mock.calls, 2, "第二次耗尽也应调用 TempUnschedule") + }) +} + +// --------------------------------------------------------------------------- +// HandleFailoverError — TempUnschedule 调用验证 +// --------------------------------------------------------------------------- + +func TestHandleFailoverError_TempUnschedule(t *testing.T) { + t.Run("非重试错误不调用TempUnschedule", func(t *testing.T) { + mock := &mockTempUnscheduler{} + fs := NewFailoverState(3, false) + err := newTestFailoverErr(500, false, false) // RetryableOnSameAccount=false + + fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) + require.Empty(t, mock.calls) + }) + + t.Run("重试错误耗尽后调用TempUnschedule_传入正确参数", func(t *testing.T) { + mock := &mockTempUnscheduler{} + fs := NewFailoverState(3, false) + err := newTestFailoverErr(502, true, false) + + // 耗尽重试 + fs.HandleFailoverError(context.Background(), mock, 42, "openai", err) + fs.HandleFailoverError(context.Background(), mock, 42, "openai", err) + fs.HandleFailoverError(context.Background(), mock, 42, "openai", err) + + require.Len(t, mock.calls, 1) + require.Equal(t, int64(42), mock.calls[0].accountID) + require.Equal(t, 502, mock.calls[0].failoverErr.StatusCode) + require.True(t, mock.calls[0].failoverErr.RetryableOnSameAccount) + }) +} + +// --------------------------------------------------------------------------- +// HandleFailoverError — Context 取消 +// --------------------------------------------------------------------------- + +func TestHandleFailoverError_ContextCanceled(t *testing.T) { + t.Run("同账号重试sleep期间context取消", func(t *testing.T) { + mock := &mockTempUnscheduler{} + fs := NewFailoverState(3, false) + err := newTestFailoverErr(400, true, false) + + ctx, cancel := context.WithCancel(context.Background()) + cancel() // 立即取消 + + start := time.Now() + action := fs.HandleFailoverError(ctx, mock, 100, "openai", err) + elapsed := time.Since(start) + + require.Equal(t, FailoverCanceled, action) + require.Less(t, elapsed, 100*time.Millisecond, "应立即返回") + // 重试计数仍应递增 + require.Equal(t, 1, fs.SameAccountRetryCount[100]) + }) + + t.Run("Antigravity延迟期间context取消", func(t *testing.T) { + mock := &mockTempUnscheduler{} + fs := NewFailoverState(3, false) + fs.SwitchCount = 1 // 下一次 switchCount=2 → delay = 1s + err := newTestFailoverErr(500, false, false) + + ctx, cancel := context.WithCancel(context.Background()) + cancel() // 立即取消 + + start := time.Now() + action := fs.HandleFailoverError(ctx, mock, 100, service.PlatformAntigravity, err) + elapsed := time.Since(start) + + require.Equal(t, FailoverCanceled, action) + require.Less(t, elapsed, 100*time.Millisecond, "应立即返回而非等待 1s") + }) +} + +// --------------------------------------------------------------------------- +// HandleFailoverError — FailedAccountIDs 跟踪 +// --------------------------------------------------------------------------- + +func TestHandleFailoverError_FailedAccountIDs(t *testing.T) { + t.Run("切换时添加到失败列表", func(t *testing.T) { + mock := &mockTempUnscheduler{} + fs := NewFailoverState(3, false) + + fs.HandleFailoverError(context.Background(), mock, 100, "openai", newTestFailoverErr(500, false, false)) + require.Contains(t, fs.FailedAccountIDs, int64(100)) + + fs.HandleFailoverError(context.Background(), mock, 200, "openai", newTestFailoverErr(502, false, false)) + require.Contains(t, fs.FailedAccountIDs, int64(200)) + require.Len(t, fs.FailedAccountIDs, 2) + }) + + t.Run("耗尽时也添加到失败列表", func(t *testing.T) { + mock := &mockTempUnscheduler{} + fs := NewFailoverState(0, false) + + action := fs.HandleFailoverError(context.Background(), mock, 100, "openai", newTestFailoverErr(500, false, false)) + require.Equal(t, FailoverExhausted, action) + require.Contains(t, fs.FailedAccountIDs, int64(100)) + }) + + t.Run("同账号重试期间不添加到失败列表", func(t *testing.T) { + mock := &mockTempUnscheduler{} + fs := NewFailoverState(3, false) + + action := fs.HandleFailoverError(context.Background(), mock, 100, "openai", newTestFailoverErr(400, true, false)) + require.Equal(t, FailoverRetry, action) + require.NotContains(t, fs.FailedAccountIDs, int64(100)) + }) + + t.Run("同一账号多次切换不重复添加", func(t *testing.T) { + mock := &mockTempUnscheduler{} + fs := NewFailoverState(5, false) + + fs.HandleFailoverError(context.Background(), mock, 100, "openai", newTestFailoverErr(500, false, false)) + fs.HandleFailoverError(context.Background(), mock, 100, "openai", newTestFailoverErr(500, false, false)) + require.Len(t, fs.FailedAccountIDs, 1, "map 天然去重") + }) +} + +// --------------------------------------------------------------------------- +// HandleFailoverError — LastFailoverErr 更新 +// --------------------------------------------------------------------------- + +func TestHandleFailoverError_LastFailoverErr(t *testing.T) { + t.Run("每次调用都更新LastFailoverErr", func(t *testing.T) { + mock := &mockTempUnscheduler{} + fs := NewFailoverState(3, false) + + err1 := newTestFailoverErr(500, false, false) + fs.HandleFailoverError(context.Background(), mock, 100, "openai", err1) + require.Equal(t, err1, fs.LastFailoverErr) + + err2 := newTestFailoverErr(502, false, false) + fs.HandleFailoverError(context.Background(), mock, 200, "openai", err2) + require.Equal(t, err2, fs.LastFailoverErr) + }) + + t.Run("同账号重试时也更新LastFailoverErr", func(t *testing.T) { + mock := &mockTempUnscheduler{} + fs := NewFailoverState(3, false) + + err := newTestFailoverErr(400, true, false) + fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) + require.Equal(t, err, fs.LastFailoverErr) + }) +} + +// --------------------------------------------------------------------------- +// HandleFailoverError — 综合集成场景 +// --------------------------------------------------------------------------- + +func TestHandleFailoverError_IntegrationScenario(t *testing.T) { + t.Run("模拟完整failover流程_多账号混合重试与切换", func(t *testing.T) { + mock := &mockTempUnscheduler{} + fs := NewFailoverState(3, true) // hasBoundSession=true + + // 1. 账号 100 遇到可重试错误,同账号重试 2 次 + retryErr := newTestFailoverErr(400, true, false) + action := fs.HandleFailoverError(context.Background(), mock, 100, "openai", retryErr) + require.Equal(t, FailoverRetry, action) + require.True(t, fs.ForceCacheBilling, "hasBoundSession=true 应设置 ForceCacheBilling") + + action = fs.HandleFailoverError(context.Background(), mock, 100, "openai", retryErr) + require.Equal(t, FailoverRetry, action) + + // 2. 账号 100 重试耗尽 → TempUnschedule + 切换 + action = fs.HandleFailoverError(context.Background(), mock, 100, "openai", retryErr) + require.Equal(t, FailoverSwitch, action) + require.Equal(t, 1, fs.SwitchCount) + require.Len(t, mock.calls, 1) + + // 3. 账号 200 遇到不可重试错误 → 直接切换 + switchErr := newTestFailoverErr(500, false, false) + action = fs.HandleFailoverError(context.Background(), mock, 200, "openai", switchErr) + require.Equal(t, FailoverSwitch, action) + require.Equal(t, 2, fs.SwitchCount) + + // 4. 账号 300 遇到不可重试错误 → 再切换 + action = fs.HandleFailoverError(context.Background(), mock, 300, "openai", switchErr) + require.Equal(t, FailoverSwitch, action) + require.Equal(t, 3, fs.SwitchCount) + + // 5. 账号 400 → 已耗尽 (SwitchCount=3 >= MaxSwitches=3) + action = fs.HandleFailoverError(context.Background(), mock, 400, "openai", switchErr) + require.Equal(t, FailoverExhausted, action) + + // 最终状态验证 + require.Equal(t, 3, fs.SwitchCount, "耗尽时不再递增") + require.Len(t, fs.FailedAccountIDs, 4, "4个不同账号都在失败列表中") + require.True(t, fs.ForceCacheBilling) + require.Len(t, mock.calls, 1, "只有账号 100 触发了 TempUnschedule") + }) + + t.Run("模拟Antigravity平台完整流程", func(t *testing.T) { + mock := &mockTempUnscheduler{} + fs := NewFailoverState(2, false) + + err := newTestFailoverErr(500, false, false) + + // 第一次切换:delay = 0s + start := time.Now() + action := fs.HandleFailoverError(context.Background(), mock, 100, service.PlatformAntigravity, err) + elapsed := time.Since(start) + require.Equal(t, FailoverSwitch, action) + require.Less(t, elapsed, 200*time.Millisecond, "第一次切换延迟为 0") + + // 第二次切换:delay = 1s + start = time.Now() + action = fs.HandleFailoverError(context.Background(), mock, 200, service.PlatformAntigravity, err) + elapsed = time.Since(start) + require.Equal(t, FailoverSwitch, action) + require.GreaterOrEqual(t, elapsed, 800*time.Millisecond, "第二次切换延迟约 1s") + + // 第三次:耗尽(无延迟,因为在检查延迟之前就返回了) + start = time.Now() + action = fs.HandleFailoverError(context.Background(), mock, 300, service.PlatformAntigravity, err) + elapsed = time.Since(start) + require.Equal(t, FailoverExhausted, action) + require.Less(t, elapsed, 200*time.Millisecond, "耗尽时不应有延迟") + }) + + t.Run("ForceCacheBilling通过错误标志设置", func(t *testing.T) { + mock := &mockTempUnscheduler{} + fs := NewFailoverState(3, false) // hasBoundSession=false + + // 第一次:ForceCacheBilling=false + err1 := newTestFailoverErr(500, false, false) + fs.HandleFailoverError(context.Background(), mock, 100, "openai", err1) + require.False(t, fs.ForceCacheBilling) + + // 第二次:ForceCacheBilling=true(Antigravity 粘性会话切换) + err2 := newTestFailoverErr(500, false, true) + fs.HandleFailoverError(context.Background(), mock, 200, "openai", err2) + require.True(t, fs.ForceCacheBilling, "错误标志应触发 ForceCacheBilling") + + // 第三次:ForceCacheBilling=false,但状态仍保持 true + err3 := newTestFailoverErr(500, false, false) + fs.HandleFailoverError(context.Background(), mock, 300, "openai", err3) + require.True(t, fs.ForceCacheBilling, "不应重置") + }) +} + +// --------------------------------------------------------------------------- +// HandleFailoverError — 边界条件 +// --------------------------------------------------------------------------- + +func TestHandleFailoverError_EdgeCases(t *testing.T) { + t.Run("StatusCode为0的错误也能正常处理", func(t *testing.T) { + mock := &mockTempUnscheduler{} + fs := NewFailoverState(3, false) + err := newTestFailoverErr(0, false, false) + + action := fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) + require.Equal(t, FailoverSwitch, action) + }) + + t.Run("AccountID为0也能正常跟踪", func(t *testing.T) { + mock := &mockTempUnscheduler{} + fs := NewFailoverState(3, false) + err := newTestFailoverErr(500, true, false) + + action := fs.HandleFailoverError(context.Background(), mock, 0, "openai", err) + require.Equal(t, FailoverRetry, action) + require.Equal(t, 1, fs.SameAccountRetryCount[0]) + }) + + t.Run("负AccountID也能正常跟踪", func(t *testing.T) { + mock := &mockTempUnscheduler{} + fs := NewFailoverState(3, false) + err := newTestFailoverErr(500, true, false) + + action := fs.HandleFailoverError(context.Background(), mock, -1, "openai", err) + require.Equal(t, FailoverRetry, action) + require.Equal(t, 1, fs.SameAccountRetryCount[-1]) + }) + + t.Run("空平台名称不触发Antigravity延迟", func(t *testing.T) { + mock := &mockTempUnscheduler{} + fs := NewFailoverState(3, false) + fs.SwitchCount = 1 + err := newTestFailoverErr(500, false, false) + + start := time.Now() + action := fs.HandleFailoverError(context.Background(), mock, 100, "", err) + elapsed := time.Since(start) + + require.Equal(t, FailoverSwitch, action) + require.Less(t, elapsed, 200*time.Millisecond, "空平台不应触发 Antigravity 延迟") + }) +} diff --git a/backend/internal/handler/gateway_handler.go b/backend/internal/handler/gateway_handler.go index c28ee846..d0d0d9ff 100644 --- a/backend/internal/handler/gateway_handler.go +++ b/backend/internal/handler/gateway_handler.go @@ -232,12 +232,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { hasBoundSession := sessionKey != "" && sessionBoundAccountID > 0 if platform == service.PlatformGemini { - maxAccountSwitches := h.maxAccountSwitchesGemini - switchCount := 0 - failedAccountIDs := make(map[int64]struct{}) - sameAccountRetryCount := make(map[int64]int) // 同账号重试计数 - var lastFailoverErr *service.UpstreamFailoverError - var forceCacheBilling bool // 粘性会话切换时的缓存计费标记 + fs := NewFailoverState(h.maxAccountSwitchesGemini, hasBoundSession) // 单账号分组提前设置 SingleAccountRetry 标记,让 Service 层首次 503 就不设模型限流标记。 // 避免单账号分组收到 503 (MODEL_CAPACITY_EXHAUSTED) 时设 29s 限流,导致后续请求连续快速失败。 @@ -247,27 +242,27 @@ func (h *GatewayHandler) Messages(c *gin.Context) { } for { - selection, err := h.gatewayService.SelectAccountWithLoadAwareness(c.Request.Context(), apiKey.GroupID, sessionKey, reqModel, failedAccountIDs, "") // Gemini 不使用会话限制 + selection, err := h.gatewayService.SelectAccountWithLoadAwareness(c.Request.Context(), apiKey.GroupID, sessionKey, reqModel, fs.FailedAccountIDs, "") // Gemini 不使用会话限制 if err != nil { - if len(failedAccountIDs) == 0 { + if len(fs.FailedAccountIDs) == 0 { h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts: "+err.Error(), streamStarted) return } // Antigravity 单账号退避重试:分组内没有其他可用账号时, // 对 503 错误不直接返回,而是清除排除列表、等待退避后重试同一个账号。 // 谷歌上游 503 (MODEL_CAPACITY_EXHAUSTED) 通常是暂时性的,等几秒就能恢复。 - if lastFailoverErr != nil && lastFailoverErr.StatusCode == http.StatusServiceUnavailable && switchCount <= maxAccountSwitches { - if sleepAntigravitySingleAccountBackoff(c.Request.Context(), switchCount) { - log.Printf("Antigravity single-account 503 retry: clearing failed accounts, retry %d/%d", switchCount, maxAccountSwitches) - failedAccountIDs = make(map[int64]struct{}) + if fs.LastFailoverErr != nil && fs.LastFailoverErr.StatusCode == http.StatusServiceUnavailable && fs.SwitchCount <= fs.MaxSwitches { + if sleepAntigravitySingleAccountBackoff(c.Request.Context(), fs.SwitchCount) { + log.Printf("Antigravity single-account 503 retry: clearing failed accounts, retry %d/%d", fs.SwitchCount, fs.MaxSwitches) + fs.FailedAccountIDs = make(map[int64]struct{}) // 设置 context 标记,让 Service 层预检查等待限流过期而非直接切换 ctx := context.WithValue(c.Request.Context(), ctxkey.SingleAccountRetry, true) c.Request = c.Request.WithContext(ctx) continue } } - if lastFailoverErr != nil { - h.handleFailoverExhausted(c, lastFailoverErr, service.PlatformGemini, streamStarted) + if fs.LastFailoverErr != nil { + h.handleFailoverExhausted(c, fs.LastFailoverErr, service.PlatformGemini, streamStarted) } else { h.handleFailoverExhaustedSimple(c, 502, streamStarted) } @@ -346,8 +341,8 @@ func (h *GatewayHandler) Messages(c *gin.Context) { // 转发请求 - 根据账号平台分流 var result *service.ForwardResult requestCtx := c.Request.Context() - if switchCount > 0 { - requestCtx = context.WithValue(requestCtx, ctxkey.AccountSwitchCount, switchCount) + if fs.SwitchCount > 0 { + requestCtx = context.WithValue(requestCtx, ctxkey.AccountSwitchCount, fs.SwitchCount) } if account.Platform == service.PlatformAntigravity { result, err = h.antigravityGatewayService.ForwardGemini(requestCtx, c, account, reqModel, "generateContent", reqStream, body, hasBoundSession) @@ -360,40 +355,16 @@ func (h *GatewayHandler) Messages(c *gin.Context) { if err != nil { var failoverErr *service.UpstreamFailoverError if errors.As(err, &failoverErr) { - lastFailoverErr = failoverErr - if needForceCacheBilling(hasBoundSession, failoverErr) { - forceCacheBilling = true - } - - // 同账号重试:对 RetryableOnSameAccount 的临时性错误,先在同一账号上重试 - if failoverErr.RetryableOnSameAccount && sameAccountRetryCount[account.ID] < maxSameAccountRetries { - sameAccountRetryCount[account.ID]++ - log.Printf("Account %d: retryable error %d, same-account retry %d/%d", - account.ID, failoverErr.StatusCode, sameAccountRetryCount[account.ID], maxSameAccountRetries) - if !sleepSameAccountRetryDelay(c.Request.Context()) { - return - } + action := fs.HandleFailoverError(c.Request.Context(), h.gatewayService, account.ID, account.Platform, failoverErr) + switch action { + case FailoverRetry, FailoverSwitch: continue - } - - // 同账号重试用尽,执行临时封禁并切换账号 - if failoverErr.RetryableOnSameAccount { - h.gatewayService.TempUnscheduleRetryableError(c.Request.Context(), account.ID, failoverErr) - } - - failedAccountIDs[account.ID] = struct{}{} - if switchCount >= maxAccountSwitches { - h.handleFailoverExhausted(c, failoverErr, service.PlatformGemini, streamStarted) + case FailoverExhausted: + h.handleFailoverExhausted(c, fs.LastFailoverErr, service.PlatformGemini, streamStarted) + return + case FailoverCanceled: return } - switchCount++ - log.Printf("Account %d: upstream error %d, switching account %d/%d", account.ID, failoverErr.StatusCode, switchCount, maxAccountSwitches) - if account.Platform == service.PlatformAntigravity { - if !sleepFailoverDelay(c.Request.Context(), switchCount) { - return - } - } - continue } // 错误响应已在Forward中处理,这里只记录日志 log.Printf("Forward request failed: %v", err) @@ -421,7 +392,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { }); err != nil { log.Printf("Record usage failed: %v", err) } - }(result, account, userAgent, clientIP, forceCacheBilling) + }(result, account, userAgent, clientIP, fs.ForceCacheBilling) return } } @@ -442,37 +413,32 @@ func (h *GatewayHandler) Messages(c *gin.Context) { } for { - maxAccountSwitches := h.maxAccountSwitches - switchCount := 0 - failedAccountIDs := make(map[int64]struct{}) - sameAccountRetryCount := make(map[int64]int) // 同账号重试计数 - var lastFailoverErr *service.UpstreamFailoverError + fs := NewFailoverState(h.maxAccountSwitches, hasBoundSession) retryWithFallback := false - var forceCacheBilling bool // 粘性会话切换时的缓存计费标记 for { // 选择支持该模型的账号 - selection, err := h.gatewayService.SelectAccountWithLoadAwareness(c.Request.Context(), currentAPIKey.GroupID, sessionKey, reqModel, failedAccountIDs, parsedReq.MetadataUserID) + selection, err := h.gatewayService.SelectAccountWithLoadAwareness(c.Request.Context(), currentAPIKey.GroupID, sessionKey, reqModel, fs.FailedAccountIDs, parsedReq.MetadataUserID) if err != nil { - if len(failedAccountIDs) == 0 { + if len(fs.FailedAccountIDs) == 0 { h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts: "+err.Error(), streamStarted) return } // Antigravity 单账号退避重试:分组内没有其他可用账号时, // 对 503 错误不直接返回,而是清除排除列表、等待退避后重试同一个账号。 // 谷歌上游 503 (MODEL_CAPACITY_EXHAUSTED) 通常是暂时性的,等几秒就能恢复。 - if lastFailoverErr != nil && lastFailoverErr.StatusCode == http.StatusServiceUnavailable && switchCount <= maxAccountSwitches { - if sleepAntigravitySingleAccountBackoff(c.Request.Context(), switchCount) { - log.Printf("Antigravity single-account 503 retry: clearing failed accounts, retry %d/%d", switchCount, maxAccountSwitches) - failedAccountIDs = make(map[int64]struct{}) + if fs.LastFailoverErr != nil && fs.LastFailoverErr.StatusCode == http.StatusServiceUnavailable && fs.SwitchCount <= fs.MaxSwitches { + if sleepAntigravitySingleAccountBackoff(c.Request.Context(), fs.SwitchCount) { + log.Printf("Antigravity single-account 503 retry: clearing failed accounts, retry %d/%d", fs.SwitchCount, fs.MaxSwitches) + fs.FailedAccountIDs = make(map[int64]struct{}) // 设置 context 标记,让 Service 层预检查等待限流过期而非直接切换 ctx := context.WithValue(c.Request.Context(), ctxkey.SingleAccountRetry, true) c.Request = c.Request.WithContext(ctx) continue } } - if lastFailoverErr != nil { - h.handleFailoverExhausted(c, lastFailoverErr, platform, streamStarted) + if fs.LastFailoverErr != nil { + h.handleFailoverExhausted(c, fs.LastFailoverErr, platform, streamStarted) } else { h.handleFailoverExhaustedSimple(c, 502, streamStarted) } @@ -549,8 +515,8 @@ func (h *GatewayHandler) Messages(c *gin.Context) { // 转发请求 - 根据账号平台分流 var result *service.ForwardResult requestCtx := c.Request.Context() - if switchCount > 0 { - requestCtx = context.WithValue(requestCtx, ctxkey.AccountSwitchCount, switchCount) + if fs.SwitchCount > 0 { + requestCtx = context.WithValue(requestCtx, ctxkey.AccountSwitchCount, fs.SwitchCount) } if account.Platform == service.PlatformAntigravity && account.Type != service.AccountTypeAPIKey { result, err = h.antigravityGatewayService.Forward(requestCtx, c, account, body, hasBoundSession) @@ -598,40 +564,16 @@ func (h *GatewayHandler) Messages(c *gin.Context) { } var failoverErr *service.UpstreamFailoverError if errors.As(err, &failoverErr) { - lastFailoverErr = failoverErr - if needForceCacheBilling(hasBoundSession, failoverErr) { - forceCacheBilling = true - } - - // 同账号重试:对 RetryableOnSameAccount 的临时性错误,先在同一账号上重试 - if failoverErr.RetryableOnSameAccount && sameAccountRetryCount[account.ID] < maxSameAccountRetries { - sameAccountRetryCount[account.ID]++ - log.Printf("Account %d: retryable error %d, same-account retry %d/%d", - account.ID, failoverErr.StatusCode, sameAccountRetryCount[account.ID], maxSameAccountRetries) - if !sleepSameAccountRetryDelay(c.Request.Context()) { - return - } + action := fs.HandleFailoverError(c.Request.Context(), h.gatewayService, account.ID, account.Platform, failoverErr) + switch action { + case FailoverRetry, FailoverSwitch: continue - } - - // 同账号重试用尽,执行临时封禁并切换账号 - if failoverErr.RetryableOnSameAccount { - h.gatewayService.TempUnscheduleRetryableError(c.Request.Context(), account.ID, failoverErr) - } - - failedAccountIDs[account.ID] = struct{}{} - if switchCount >= maxAccountSwitches { - h.handleFailoverExhausted(c, failoverErr, account.Platform, streamStarted) + case FailoverExhausted: + h.handleFailoverExhausted(c, fs.LastFailoverErr, account.Platform, streamStarted) + return + case FailoverCanceled: return } - switchCount++ - log.Printf("Account %d: upstream error %d, switching account %d/%d", account.ID, failoverErr.StatusCode, switchCount, maxAccountSwitches) - if account.Platform == service.PlatformAntigravity { - if !sleepFailoverDelay(c.Request.Context(), switchCount) { - return - } - } - continue } // 错误响应已在Forward中处理,这里只记录日志 log.Printf("Account %d: Forward request failed: %v", account.ID, err) @@ -659,7 +601,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { }); err != nil { log.Printf("Record usage failed: %v", err) } - }(result, account, userAgent, clientIP, forceCacheBilling) + }(result, account, userAgent, clientIP, fs.ForceCacheBilling) return } if !retryWithFallback { @@ -899,23 +841,6 @@ func needForceCacheBilling(hasBoundSession bool, failoverErr *service.UpstreamFa return hasBoundSession || (failoverErr != nil && failoverErr.ForceCacheBilling) } -const ( - // maxSameAccountRetries 同账号重试次数上限(针对 RetryableOnSameAccount 错误) - maxSameAccountRetries = 2 - // sameAccountRetryDelay 同账号重试间隔 - sameAccountRetryDelay = 500 * time.Millisecond -) - -// sleepSameAccountRetryDelay 同账号重试固定延时,返回 false 表示 context 已取消。 -func sleepSameAccountRetryDelay(ctx context.Context) bool { - select { - case <-ctx.Done(): - return false - case <-time.After(sameAccountRetryDelay): - return true - } -} - // sleepFailoverDelay 账号切换线性递增延时:第1次0s、第2次1s、第3次2s… // 返回 false 表示 context 已取消。 func sleepFailoverDelay(ctx context.Context, switchCount int) bool { diff --git a/backend/internal/handler/gemini_v1beta_handler.go b/backend/internal/handler/gemini_v1beta_handler.go index f8fb0dcb..14666093 100644 --- a/backend/internal/handler/gemini_v1beta_handler.go +++ b/backend/internal/handler/gemini_v1beta_handler.go @@ -321,11 +321,7 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { hasBoundSession := sessionKey != "" && sessionBoundAccountID > 0 cleanedForUnknownBinding := false - maxAccountSwitches := h.maxAccountSwitchesGemini - switchCount := 0 - failedAccountIDs := make(map[int64]struct{}) - var lastFailoverErr *service.UpstreamFailoverError - var forceCacheBilling bool // 粘性会话切换时的缓存计费标记 + fs := NewFailoverState(h.maxAccountSwitchesGemini, hasBoundSession) // 单账号分组提前设置 SingleAccountRetry 标记,让 Service 层首次 503 就不设模型限流标记。 // 避免单账号分组收到 503 (MODEL_CAPACITY_EXHAUSTED) 时设 29s 限流,导致后续请求连续快速失败。 @@ -335,26 +331,26 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { } for { - selection, err := h.gatewayService.SelectAccountWithLoadAwareness(c.Request.Context(), apiKey.GroupID, sessionKey, modelName, failedAccountIDs, "") // Gemini 不使用会话限制 + selection, err := h.gatewayService.SelectAccountWithLoadAwareness(c.Request.Context(), apiKey.GroupID, sessionKey, modelName, fs.FailedAccountIDs, "") // Gemini 不使用会话限制 if err != nil { - if len(failedAccountIDs) == 0 { + if len(fs.FailedAccountIDs) == 0 { googleError(c, http.StatusServiceUnavailable, "No available Gemini accounts: "+err.Error()) return } // Antigravity 单账号退避重试:分组内没有其他可用账号时, // 对 503 错误不直接返回,而是清除排除列表、等待退避后重试同一个账号。 // 谷歌上游 503 (MODEL_CAPACITY_EXHAUSTED) 通常是暂时性的,等几秒就能恢复。 - if lastFailoverErr != nil && lastFailoverErr.StatusCode == http.StatusServiceUnavailable && switchCount <= maxAccountSwitches { - if sleepAntigravitySingleAccountBackoff(c.Request.Context(), switchCount) { - log.Printf("Antigravity single-account 503 retry: clearing failed accounts, retry %d/%d", switchCount, maxAccountSwitches) - failedAccountIDs = make(map[int64]struct{}) + if fs.LastFailoverErr != nil && fs.LastFailoverErr.StatusCode == http.StatusServiceUnavailable && fs.SwitchCount <= fs.MaxSwitches { + if sleepAntigravitySingleAccountBackoff(c.Request.Context(), fs.SwitchCount) { + log.Printf("Antigravity single-account 503 retry: clearing failed accounts, retry %d/%d", fs.SwitchCount, fs.MaxSwitches) + fs.FailedAccountIDs = make(map[int64]struct{}) // 设置 context 标记,让 Service 层预检查等待限流过期而非直接切换 ctx := context.WithValue(c.Request.Context(), ctxkey.SingleAccountRetry, true) c.Request = c.Request.WithContext(ctx) continue } } - h.handleGeminiFailoverExhausted(c, lastFailoverErr) + h.handleGeminiFailoverExhausted(c, fs.LastFailoverErr) return } account := selection.Account @@ -429,8 +425,8 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { // 5) forward (根据平台分流) var result *service.ForwardResult requestCtx := c.Request.Context() - if switchCount > 0 { - requestCtx = context.WithValue(requestCtx, ctxkey.AccountSwitchCount, switchCount) + if fs.SwitchCount > 0 { + requestCtx = context.WithValue(requestCtx, ctxkey.AccountSwitchCount, fs.SwitchCount) } if account.Platform == service.PlatformAntigravity && account.Type != service.AccountTypeAPIKey { result, err = h.antigravityGatewayService.ForwardGemini(requestCtx, c, account, modelName, action, stream, body, hasBoundSession) @@ -443,24 +439,16 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { if err != nil { var failoverErr *service.UpstreamFailoverError if errors.As(err, &failoverErr) { - failedAccountIDs[account.ID] = struct{}{} - if needForceCacheBilling(hasBoundSession, failoverErr) { - forceCacheBilling = true - } - if switchCount >= maxAccountSwitches { - lastFailoverErr = failoverErr - h.handleGeminiFailoverExhausted(c, lastFailoverErr) + action := fs.HandleFailoverError(c.Request.Context(), h.gatewayService, account.ID, account.Platform, failoverErr) + switch action { + case FailoverRetry, FailoverSwitch: + continue + case FailoverExhausted: + h.handleGeminiFailoverExhausted(c, fs.LastFailoverErr) + return + case FailoverCanceled: return } - lastFailoverErr = failoverErr - switchCount++ - log.Printf("Gemini account %d: upstream error %d, switching account %d/%d", account.ID, failoverErr.StatusCode, switchCount, maxAccountSwitches) - if account.Platform == service.PlatformAntigravity { - if !sleepFailoverDelay(c.Request.Context(), switchCount) { - return - } - } - continue } // ForwardNative already wrote the response log.Printf("Gemini native forward failed: %v", err) @@ -506,7 +494,7 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { }); err != nil { log.Printf("Record usage failed: %v", err) } - }(result, account, userAgent, clientIP, forceCacheBilling) + }(result, account, userAgent, clientIP, fs.ForceCacheBilling) return } } diff --git a/backend/internal/service/error_passthrough_runtime_test.go b/backend/internal/service/error_passthrough_runtime_test.go index f963913b..4a4309f9 100644 --- a/backend/internal/service/error_passthrough_runtime_test.go +++ b/backend/internal/service/error_passthrough_runtime_test.go @@ -219,7 +219,9 @@ func TestApplyErrorPassthroughRule_SkipMonitoringSetsContextKey(t *testing.T) { assert.True(t, matched) v, exists := c.Get(OpsSkipPassthroughKey) assert.True(t, exists, "OpsSkipPassthroughKey should be set when skip_monitoring=true") - assert.True(t, v.(bool)) + boolVal, ok := v.(bool) + assert.True(t, ok, "value should be a bool") + assert.True(t, boolVal) } func TestApplyErrorPassthroughRule_NoSkipMonitoringDoesNotSetContextKey(t *testing.T) { diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 07cb1028..71b1f594 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -385,9 +385,10 @@ func (s *GatewayService) TempUnscheduleRetryableError(ctx context.Context, accou return } // 根据状态码选择封禁策略 - if failoverErr.StatusCode == http.StatusBadRequest { + switch failoverErr.StatusCode { + case http.StatusBadRequest: tempUnscheduleGoogleConfigError(ctx, s.accountRepo, accountID, "[handler]") - } else if failoverErr.StatusCode == http.StatusBadGateway { + case http.StatusBadGateway: tempUnscheduleEmptyResponse(ctx, s.accountRepo, accountID, "[handler]") } } From a095468850ef1c6be2ad5a96aa0e897a785f0849 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Tue, 10 Feb 2026 23:35:21 +0800 Subject: [PATCH 024/175] fix: correct import path in failover_loop.go and failover_loop_test.go Use github.com/Wei-Shaw/sub2api/internal/service instead of sub2api/internal/service to match the module path in go.mod. --- backend/internal/handler/failover_loop.go | 2 +- backend/internal/handler/failover_loop_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/backend/internal/handler/failover_loop.go b/backend/internal/handler/failover_loop.go index fdba5620..a161a866 100644 --- a/backend/internal/handler/failover_loop.go +++ b/backend/internal/handler/failover_loop.go @@ -5,7 +5,7 @@ import ( "log" "time" - "sub2api/internal/service" + "github.com/Wei-Shaw/sub2api/internal/service" ) // TempUnscheduler 用于 HandleFailoverError 中同账号重试耗尽后的临时封禁。 diff --git a/backend/internal/handler/failover_loop_test.go b/backend/internal/handler/failover_loop_test.go index 00b8fec9..b534f02e 100644 --- a/backend/internal/handler/failover_loop_test.go +++ b/backend/internal/handler/failover_loop_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "sub2api/internal/service" + "github.com/Wei-Shaw/sub2api/internal/service" "github.com/stretchr/testify/require" ) From f702c6665926cbe92845c6df1bd1fc90bd5ce7a9 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Tue, 10 Feb 2026 23:40:37 +0800 Subject: [PATCH 025/175] fix: resolve gofmt alignment issue in failover_loop_test.go Move inline comments to separate lines to avoid gofmt consecutive-line comment alignment requirements. --- backend/internal/handler/failover_loop_test.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/backend/internal/handler/failover_loop_test.go b/backend/internal/handler/failover_loop_test.go index b534f02e..ff48e77e 100644 --- a/backend/internal/handler/failover_loop_test.go +++ b/backend/internal/handler/failover_loop_test.go @@ -354,9 +354,10 @@ func TestHandleFailoverError_SameAccountRetry(t *testing.T) { err := newTestFailoverErr(400, true, false) // 耗尽账号 100 的重试 - fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) // retry 1 - fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) // retry 2 - action := fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) // exhausted → switch + fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) + fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) + // 第三次: 重试耗尽 → 切换 + action := fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) require.Equal(t, FailoverSwitch, action) // 再次遇到账号 100,计数仍为 2,条件不满足 → 直接切换 From 57a778dccf905e9ac5e9864e22c07db50ac94707 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Tue, 10 Feb 2026 23:48:23 +0800 Subject: [PATCH 026/175] chore: bump version to 0.1.79.1 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index 3d46fb65..0612b239 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.78.2 +0.1.79.1 From 91ad94d941ea977bcb67ea68a7e7201fe3ace32e Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 11 Feb 2026 00:20:35 +0800 Subject: [PATCH 027/175] =?UTF-8?q?docs:=20=E9=83=A8=E7=BD=B2=E6=B5=81?= =?UTF-8?q?=E7=A8=8B=E6=94=B9=E4=B8=BA=E6=9E=84=E5=BB=BA=E6=9C=8D=E5=8A=A1?= =?UTF-8?q?=E5=99=A8=E4=B8=8E=E7=94=9F=E4=BA=A7=E6=9C=8D=E5=8A=A1=E5=99=A8?= =?UTF-8?q?=E5=88=86=E7=A6=BB?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 镜像构建从生产服务器(clicodeplus)迁移到构建服务器(us-asaki-root), 通过 docker save/load 管道传输,避免编译时资源占用影响线上服务。 --- AGENTS.md | 143 ++++++++++++++++++++++++++++++++++++------------------ CLAUDE.md | 143 ++++++++++++++++++++++++++++++++++++------------------ 2 files changed, 190 insertions(+), 96 deletions(-) diff --git a/AGENTS.md b/AGENTS.md index a7a3e34a..9532d448 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -97,13 +97,22 @@ git push origin main ### 前置条件 -- 本地已配置 SSH 别名 `clicodeplus` 连接到服务器 -- 服务器部署目录:`/root/sub2api`(正式)、`/root/sub2api-beta`(测试) -- 服务器使用 Docker Compose 部署 +- 本地已配置 SSH 别名 `clicodeplus` 连接到生产服务器(运行服务) +- 本地已配置 SSH 别名 `us-asaki-root` 连接到构建服务器(拉取代码、构建镜像) +- 生产服务器部署目录:`/root/sub2api`(正式)、`/root/sub2api-beta`(测试) +- 生产服务器使用 Docker Compose 部署 +- **镜像统一在构建服务器上构建**,避免生产服务器因编译占用 CPU/内存影响线上服务 + +### 服务器角色说明 + +| 服务器 | SSH 别名 | 职责 | +|--------|----------|------| +| 构建服务器 | `us-asaki-root` | 拉取代码、`docker build` 构建镜像 | +| 生产服务器 | `clicodeplus` | 加载镜像、运行服务、部署验证 | ### 部署环境说明 -| 环境 | 目录 | 端口 | 数据库 | 容器名 | +| 环境 | 目录(生产服务器) | 端口 | 数据库 | 容器名 | |------|------|------|--------|--------| | 正式 | `/root/sub2api` | 8080 | `sub2api` | `sub2api` | | Beta | `/root/sub2api-beta` | 8084 | `beta` | `sub2api-beta` | @@ -155,26 +164,33 @@ git commit -m "chore: bump version to 0.1.69.2" git push origin release/custom-0.1.69 ``` -#### 1. 服务器拉取代码 +#### 1. 构建服务器拉取代码 ```bash -ssh clicodeplus "cd /root/sub2api && git fetch fork && git checkout -B release/custom-0.1.69 fork/release/custom-0.1.69" +ssh us-asaki-root "cd /root/sub2api && git fetch fork && git checkout -B release/custom-0.1.69 fork/release/custom-0.1.69" ``` -#### 2. 服务器构建镜像 +#### 2. 构建服务器构建镜像 ```bash -ssh clicodeplus "cd /root/sub2api && docker build --no-cache -t sub2api:latest -f Dockerfile ." +ssh us-asaki-root "cd /root/sub2api && docker build --no-cache -t sub2api:latest -f Dockerfile ." ``` -#### 3. 更新镜像标签并重启服务 +#### 3. 传输镜像到生产服务器并加载 + +```bash +# 导出镜像 → 通过管道传输 → 生产服务器加载 +ssh us-asaki-root "docker save sub2api:latest" | ssh clicodeplus "docker load" +``` + +#### 4. 更新镜像标签并重启服务 ```bash ssh clicodeplus "docker tag sub2api:latest weishaw/sub2api:latest" ssh clicodeplus "cd /root/sub2api/deploy && docker compose up -d --force-recreate sub2api" ``` -#### 4. 验证部署 +#### 5. 验证部署 ```bash # 查看启动日志 @@ -213,8 +229,8 @@ ssh clicodeplus "docker ps --format 'table {{.Names}}\t{{.Image}}\t{{.Ports}}' | ### 首次部署步骤 ```bash -# 0) 进入服务器 -ssh clicodeplus +# 0) 进入构建服务器 +ssh us-asaki-root # 1) 克隆代码到新目录(示例使用你的 fork) cd /root @@ -222,7 +238,23 @@ git clone https://github.com/touwaeriol/sub2api.git sub2api-beta cd /root/sub2api-beta git checkout release/custom-0.1.71 -# 2) 准备 beta 的 .env(敏感信息只写这里) +# 2) 构建 beta 镜像 +docker build -t sub2api:beta -f Dockerfile . +exit + +# 3) 传输镜像到生产服务器 +ssh us-asaki-root "docker save sub2api:beta" | ssh clicodeplus "docker load" + +# 4) 在生产服务器上准备 beta 环境 +ssh clicodeplus + +# 克隆代码(仅用于 deploy 配置和版本号确认,不在此构建) +cd /root +git clone https://github.com/touwaeriol/sub2api.git sub2api-beta +cd /root/sub2api-beta +git checkout release/custom-0.1.71 + +# 5) 准备 beta 的 .env(敏感信息只写这里) cd /root/sub2api-beta/deploy # 推荐:从现网 .env 复制,保证除 DB 名/用户/端口外完全一致 @@ -233,7 +265,7 @@ perl -pi -e 's/^SERVER_PORT=.*/SERVER_PORT=8084/' ./.env perl -pi -e 's/^POSTGRES_USER=.*/POSTGRES_USER=beta/' ./.env perl -pi -e 's/^POSTGRES_DB=.*/POSTGRES_DB=beta/' ./.env -# 3) 写 compose override(避免与现网容器名冲突,镜像使用本地构建的 sub2api:beta) +# 6) 写 compose override(避免与现网容器名冲突,镜像使用构建服务器传输的 sub2api:beta) cat > docker-compose.override.yml <<'YAML' services: sub2api: @@ -243,15 +275,11 @@ services: container_name: sub2api-beta-redis YAML -# 4) 构建 beta 镜像(基于当前代码) -cd /root/sub2api-beta -docker build -t sub2api:beta -f Dockerfile . - -# 5) 启动 beta(独立 project,确保不影响现网) +# 7) 启动 beta(独立 project,确保不影响现网) cd /root/sub2api-beta/deploy docker compose -p sub2api-beta --env-file .env -f docker-compose.yml -f docker-compose.override.yml up -d -# 6) 验证 beta +# 8) 验证 beta curl -fsS http://127.0.0.1:8084/health docker logs sub2api-beta --tail 50 ``` @@ -265,11 +293,20 @@ docker logs sub2api-beta --tail 50 注意:需要数据库侧已存在 `beta` 用户与 `beta` 数据库,并授予权限;否则容器会启动失败并不断重启。 -### 更新 beta(拉代码 + 仅重建 beta 容器) +### 更新 beta(构建服务器构建 + 传输 + 仅重启 beta 容器) ```bash +# 1) 构建服务器拉取代码并构建镜像 +ssh us-asaki-root "set -e; cd /root/sub2api-beta && git fetch --all --tags && git checkout -f release/custom-0.1.71 && git reset --hard origin/release/custom-0.1.71" +ssh us-asaki-root "cd /root/sub2api-beta && docker build -t sub2api:beta -f Dockerfile ." + +# 2) 传输镜像到生产服务器 +ssh us-asaki-root "docker save sub2api:beta" | ssh clicodeplus "docker load" + +# 3) 生产服务器同步代码(用于版本号确认和 deploy 配置) ssh clicodeplus "set -e; cd /root/sub2api-beta && git fetch --all --tags && git checkout -f release/custom-0.1.71 && git reset --hard origin/release/custom-0.1.71" -ssh clicodeplus "cd /root/sub2api-beta && docker build -t sub2api:beta -f Dockerfile ." + +# 4) 重启 beta 容器 ssh clicodeplus "cd /root/sub2api-beta/deploy && docker compose -p sub2api-beta --env-file .env -f docker-compose.yml -f docker-compose.override.yml up -d --no-deps --force-recreate sub2api" ssh clicodeplus "curl -fsS http://127.0.0.1:8084/health" ``` @@ -284,7 +321,36 @@ ssh clicodeplus "cd /root/sub2api-beta/deploy && docker compose -p sub2api-beta ## 服务器首次部署 -### 1. 克隆代码并配置远程仓库 +### 1. 构建服务器:克隆代码并配置远程仓库 + +```bash +ssh us-asaki-root +cd /root +git clone https://github.com/Wei-Shaw/sub2api.git +cd sub2api + +# 添加 fork 仓库 +git remote add fork https://github.com/touwaeriol/sub2api.git +``` + +### 2. 构建服务器:切换到定制分支并构建镜像 + +```bash +git fetch fork +git checkout -B release/custom-0.1.69 fork/release/custom-0.1.69 + +cd /root/sub2api +docker build -t sub2api:latest -f Dockerfile . +exit +``` + +### 3. 传输镜像到生产服务器 + +```bash +ssh us-asaki-root "docker save sub2api:latest" | ssh clicodeplus "docker load" +``` + +### 4. 生产服务器:克隆代码并配置环境 ```bash ssh clicodeplus @@ -294,42 +360,23 @@ cd sub2api # 添加 fork 仓库 git remote add fork https://github.com/touwaeriol/sub2api.git -``` - -### 2. 切换到定制分支并配置环境 - -```bash git fetch fork git checkout -B release/custom-0.1.69 fork/release/custom-0.1.69 +# 配置环境变量 cd deploy cp .env.example .env vim .env # 配置 DATABASE_URL, REDIS_URL, JWT_SECRET 等 ``` -### 3. 构建并启动 +### 5. 生产服务器:更新镜像标签并启动服务 ```bash -cd /root/sub2api -docker build -t sub2api:latest -f Dockerfile . docker tag sub2api:latest weishaw/sub2api:latest -cd deploy && docker compose up -d +cd /root/sub2api/deploy && docker compose up -d ``` -### 6. 启动服务 - -```bash -# 进入 deploy 目录 -cd deploy - -# 启动所有服务(PostgreSQL、Redis、sub2api) -docker compose up -d - -# 查看服务状态 -docker compose ps -``` - -### 7. 验证部署 +### 6. 验证部署 ```bash # 查看应用日志 @@ -342,7 +389,7 @@ curl http://localhost:8080/health cat /root/sub2api/backend/cmd/server/VERSION ``` -### 8. 常用运维命令 +### 7. 常用运维命令 ```bash # 查看实时日志 @@ -415,7 +462,7 @@ docker stats sub2api ## 注意事项 -1. **前端必须打包进镜像**:使用 `docker build` 在服务器上构建,Dockerfile 会自动编译前端并 embed 到后端二进制中 +1. **前端必须打包进镜像**:使用 `docker build` 在构建服务器(`us-asaki-root`)上构建,Dockerfile 会自动编译前端并 embed 到后端二进制中,构建完成后通过 `docker save | docker load` 传输到生产服务器(`clicodeplus`) 2. **镜像标签**:docker-compose.yml 使用 `weishaw/sub2api:latest`,本地构建后需要 `docker tag` 覆盖 diff --git a/CLAUDE.md b/CLAUDE.md index a7a3e34a..9532d448 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -97,13 +97,22 @@ git push origin main ### 前置条件 -- 本地已配置 SSH 别名 `clicodeplus` 连接到服务器 -- 服务器部署目录:`/root/sub2api`(正式)、`/root/sub2api-beta`(测试) -- 服务器使用 Docker Compose 部署 +- 本地已配置 SSH 别名 `clicodeplus` 连接到生产服务器(运行服务) +- 本地已配置 SSH 别名 `us-asaki-root` 连接到构建服务器(拉取代码、构建镜像) +- 生产服务器部署目录:`/root/sub2api`(正式)、`/root/sub2api-beta`(测试) +- 生产服务器使用 Docker Compose 部署 +- **镜像统一在构建服务器上构建**,避免生产服务器因编译占用 CPU/内存影响线上服务 + +### 服务器角色说明 + +| 服务器 | SSH 别名 | 职责 | +|--------|----------|------| +| 构建服务器 | `us-asaki-root` | 拉取代码、`docker build` 构建镜像 | +| 生产服务器 | `clicodeplus` | 加载镜像、运行服务、部署验证 | ### 部署环境说明 -| 环境 | 目录 | 端口 | 数据库 | 容器名 | +| 环境 | 目录(生产服务器) | 端口 | 数据库 | 容器名 | |------|------|------|--------|--------| | 正式 | `/root/sub2api` | 8080 | `sub2api` | `sub2api` | | Beta | `/root/sub2api-beta` | 8084 | `beta` | `sub2api-beta` | @@ -155,26 +164,33 @@ git commit -m "chore: bump version to 0.1.69.2" git push origin release/custom-0.1.69 ``` -#### 1. 服务器拉取代码 +#### 1. 构建服务器拉取代码 ```bash -ssh clicodeplus "cd /root/sub2api && git fetch fork && git checkout -B release/custom-0.1.69 fork/release/custom-0.1.69" +ssh us-asaki-root "cd /root/sub2api && git fetch fork && git checkout -B release/custom-0.1.69 fork/release/custom-0.1.69" ``` -#### 2. 服务器构建镜像 +#### 2. 构建服务器构建镜像 ```bash -ssh clicodeplus "cd /root/sub2api && docker build --no-cache -t sub2api:latest -f Dockerfile ." +ssh us-asaki-root "cd /root/sub2api && docker build --no-cache -t sub2api:latest -f Dockerfile ." ``` -#### 3. 更新镜像标签并重启服务 +#### 3. 传输镜像到生产服务器并加载 + +```bash +# 导出镜像 → 通过管道传输 → 生产服务器加载 +ssh us-asaki-root "docker save sub2api:latest" | ssh clicodeplus "docker load" +``` + +#### 4. 更新镜像标签并重启服务 ```bash ssh clicodeplus "docker tag sub2api:latest weishaw/sub2api:latest" ssh clicodeplus "cd /root/sub2api/deploy && docker compose up -d --force-recreate sub2api" ``` -#### 4. 验证部署 +#### 5. 验证部署 ```bash # 查看启动日志 @@ -213,8 +229,8 @@ ssh clicodeplus "docker ps --format 'table {{.Names}}\t{{.Image}}\t{{.Ports}}' | ### 首次部署步骤 ```bash -# 0) 进入服务器 -ssh clicodeplus +# 0) 进入构建服务器 +ssh us-asaki-root # 1) 克隆代码到新目录(示例使用你的 fork) cd /root @@ -222,7 +238,23 @@ git clone https://github.com/touwaeriol/sub2api.git sub2api-beta cd /root/sub2api-beta git checkout release/custom-0.1.71 -# 2) 准备 beta 的 .env(敏感信息只写这里) +# 2) 构建 beta 镜像 +docker build -t sub2api:beta -f Dockerfile . +exit + +# 3) 传输镜像到生产服务器 +ssh us-asaki-root "docker save sub2api:beta" | ssh clicodeplus "docker load" + +# 4) 在生产服务器上准备 beta 环境 +ssh clicodeplus + +# 克隆代码(仅用于 deploy 配置和版本号确认,不在此构建) +cd /root +git clone https://github.com/touwaeriol/sub2api.git sub2api-beta +cd /root/sub2api-beta +git checkout release/custom-0.1.71 + +# 5) 准备 beta 的 .env(敏感信息只写这里) cd /root/sub2api-beta/deploy # 推荐:从现网 .env 复制,保证除 DB 名/用户/端口外完全一致 @@ -233,7 +265,7 @@ perl -pi -e 's/^SERVER_PORT=.*/SERVER_PORT=8084/' ./.env perl -pi -e 's/^POSTGRES_USER=.*/POSTGRES_USER=beta/' ./.env perl -pi -e 's/^POSTGRES_DB=.*/POSTGRES_DB=beta/' ./.env -# 3) 写 compose override(避免与现网容器名冲突,镜像使用本地构建的 sub2api:beta) +# 6) 写 compose override(避免与现网容器名冲突,镜像使用构建服务器传输的 sub2api:beta) cat > docker-compose.override.yml <<'YAML' services: sub2api: @@ -243,15 +275,11 @@ services: container_name: sub2api-beta-redis YAML -# 4) 构建 beta 镜像(基于当前代码) -cd /root/sub2api-beta -docker build -t sub2api:beta -f Dockerfile . - -# 5) 启动 beta(独立 project,确保不影响现网) +# 7) 启动 beta(独立 project,确保不影响现网) cd /root/sub2api-beta/deploy docker compose -p sub2api-beta --env-file .env -f docker-compose.yml -f docker-compose.override.yml up -d -# 6) 验证 beta +# 8) 验证 beta curl -fsS http://127.0.0.1:8084/health docker logs sub2api-beta --tail 50 ``` @@ -265,11 +293,20 @@ docker logs sub2api-beta --tail 50 注意:需要数据库侧已存在 `beta` 用户与 `beta` 数据库,并授予权限;否则容器会启动失败并不断重启。 -### 更新 beta(拉代码 + 仅重建 beta 容器) +### 更新 beta(构建服务器构建 + 传输 + 仅重启 beta 容器) ```bash +# 1) 构建服务器拉取代码并构建镜像 +ssh us-asaki-root "set -e; cd /root/sub2api-beta && git fetch --all --tags && git checkout -f release/custom-0.1.71 && git reset --hard origin/release/custom-0.1.71" +ssh us-asaki-root "cd /root/sub2api-beta && docker build -t sub2api:beta -f Dockerfile ." + +# 2) 传输镜像到生产服务器 +ssh us-asaki-root "docker save sub2api:beta" | ssh clicodeplus "docker load" + +# 3) 生产服务器同步代码(用于版本号确认和 deploy 配置) ssh clicodeplus "set -e; cd /root/sub2api-beta && git fetch --all --tags && git checkout -f release/custom-0.1.71 && git reset --hard origin/release/custom-0.1.71" -ssh clicodeplus "cd /root/sub2api-beta && docker build -t sub2api:beta -f Dockerfile ." + +# 4) 重启 beta 容器 ssh clicodeplus "cd /root/sub2api-beta/deploy && docker compose -p sub2api-beta --env-file .env -f docker-compose.yml -f docker-compose.override.yml up -d --no-deps --force-recreate sub2api" ssh clicodeplus "curl -fsS http://127.0.0.1:8084/health" ``` @@ -284,7 +321,36 @@ ssh clicodeplus "cd /root/sub2api-beta/deploy && docker compose -p sub2api-beta ## 服务器首次部署 -### 1. 克隆代码并配置远程仓库 +### 1. 构建服务器:克隆代码并配置远程仓库 + +```bash +ssh us-asaki-root +cd /root +git clone https://github.com/Wei-Shaw/sub2api.git +cd sub2api + +# 添加 fork 仓库 +git remote add fork https://github.com/touwaeriol/sub2api.git +``` + +### 2. 构建服务器:切换到定制分支并构建镜像 + +```bash +git fetch fork +git checkout -B release/custom-0.1.69 fork/release/custom-0.1.69 + +cd /root/sub2api +docker build -t sub2api:latest -f Dockerfile . +exit +``` + +### 3. 传输镜像到生产服务器 + +```bash +ssh us-asaki-root "docker save sub2api:latest" | ssh clicodeplus "docker load" +``` + +### 4. 生产服务器:克隆代码并配置环境 ```bash ssh clicodeplus @@ -294,42 +360,23 @@ cd sub2api # 添加 fork 仓库 git remote add fork https://github.com/touwaeriol/sub2api.git -``` - -### 2. 切换到定制分支并配置环境 - -```bash git fetch fork git checkout -B release/custom-0.1.69 fork/release/custom-0.1.69 +# 配置环境变量 cd deploy cp .env.example .env vim .env # 配置 DATABASE_URL, REDIS_URL, JWT_SECRET 等 ``` -### 3. 构建并启动 +### 5. 生产服务器:更新镜像标签并启动服务 ```bash -cd /root/sub2api -docker build -t sub2api:latest -f Dockerfile . docker tag sub2api:latest weishaw/sub2api:latest -cd deploy && docker compose up -d +cd /root/sub2api/deploy && docker compose up -d ``` -### 6. 启动服务 - -```bash -# 进入 deploy 目录 -cd deploy - -# 启动所有服务(PostgreSQL、Redis、sub2api) -docker compose up -d - -# 查看服务状态 -docker compose ps -``` - -### 7. 验证部署 +### 6. 验证部署 ```bash # 查看应用日志 @@ -342,7 +389,7 @@ curl http://localhost:8080/health cat /root/sub2api/backend/cmd/server/VERSION ``` -### 8. 常用运维命令 +### 7. 常用运维命令 ```bash # 查看实时日志 @@ -415,7 +462,7 @@ docker stats sub2api ## 注意事项 -1. **前端必须打包进镜像**:使用 `docker build` 在服务器上构建,Dockerfile 会自动编译前端并 embed 到后端二进制中 +1. **前端必须打包进镜像**:使用 `docker build` 在构建服务器(`us-asaki-root`)上构建,Dockerfile 会自动编译前端并 embed 到后端二进制中,构建完成后通过 `docker save | docker load` 传输到生产服务器(`clicodeplus`) 2. **镜像标签**:docker-compose.yml 使用 `weishaw/sub2api:latest`,本地构建后需要 `docker tag` 覆盖 From 30d25084f0e0cea7ac6a2f2b4121f2ea077785bb Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 11 Feb 2026 00:22:19 +0800 Subject: [PATCH 028/175] chore: bump version to 0.1.79.2 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index 0612b239..a98898f3 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.79.1 +0.1.79.2 From 9375f1809cb3d068c68ec025ecb90b0162917c0b Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 11 Feb 2026 00:34:48 +0800 Subject: [PATCH 029/175] =?UTF-8?q?docs:=20=E9=83=A8=E7=BD=B2=E6=AD=A5?= =?UTF-8?q?=E9=AA=A4=E5=A2=9E=E5=8A=A0=E5=89=8D=E7=BD=AE=E6=A3=80=E6=9F=A5?= =?UTF-8?q?=E5=92=8C=E9=AA=8C=E8=AF=81=E7=8E=AF=E8=8A=82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 各步骤增加检查点说明,确保每步执行成功后再继续 - 步骤 0 强调推送成功确认和未提交改动检查 - 步骤 1 增加版本号验证 - 步骤 2 增加常见构建问题排查(buildx 版本、磁盘空间) - 步骤 4 增加生产服务器代码同步步骤 - 新增「构建服务器首次初始化」章节 --- AGENTS.md | 59 ++++++++++++++++++++++++++++++++++++++++++++++++++----- CLAUDE.md | 59 ++++++++++++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 108 insertions(+), 10 deletions(-) diff --git a/AGENTS.md b/AGENTS.md index 9532d448..7b261891 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -148,9 +148,9 @@ ssh clicodeplus "source /root/sub2api-beta/deploy/.env && PGPASSWORD=\"\$POSTGRE **重要:每次部署都必须递增版本号!** -#### 0. 递增版本号(本地操作) +#### 0. 递增版本号并推送(本地操作) -每次部署前,先在本地递增小版本号: +每次部署前,先在本地递增小版本号并确保推送成功: ```bash # 查看当前版本号 @@ -162,30 +162,52 @@ echo "0.1.69.2" > backend/cmd/server/VERSION git add backend/cmd/server/VERSION git commit -m "chore: bump version to 0.1.69.2" git push origin release/custom-0.1.69 + +# ⚠️ 确认推送成功(必须看到分支更新输出,不能有 rejected 错误) ``` +> **检查点**:如果有其他未提交的改动,应先 commit 并 push,确保 release 分支上的所有代码都已推送到远程。 + #### 1. 构建服务器拉取代码 ```bash -ssh us-asaki-root "cd /root/sub2api && git fetch fork && git checkout -B release/custom-0.1.69 fork/release/custom-0.1.69" +# 拉取最新代码并切换分支 +ssh us-asaki-root "cd /root/sub2api && git fetch origin && git checkout -B release/custom-0.1.69 origin/release/custom-0.1.69" + +# ⚠️ 验证版本号与步骤 0 一致 +ssh us-asaki-root "cat /root/sub2api/backend/cmd/server/VERSION" ``` +> **首次使用构建服务器?** 需要先初始化仓库,参见下方「构建服务器首次初始化」章节。 + #### 2. 构建服务器构建镜像 ```bash ssh us-asaki-root "cd /root/sub2api && docker build --no-cache -t sub2api:latest -f Dockerfile ." + +# ⚠️ 必须看到构建成功输出,如果失败需要先排查问题 ``` +> **常见构建问题**: +> - `buildx` 版本过旧导致 API 版本不兼容 → 更新 buildx:`curl -fsSL "https://github.com/docker/buildx/releases/latest/download/buildx-$(curl -fsSL https://api.github.com/repos/docker/buildx/releases/latest | grep tag_name | cut -d'"' -f4).linux-amd64" -o ~/.docker/cli-plugins/docker-buildx && chmod +x ~/.docker/cli-plugins/docker-buildx` +> - 磁盘空间不足 → `docker system prune -f` 清理无用镜像 + #### 3. 传输镜像到生产服务器并加载 ```bash # 导出镜像 → 通过管道传输 → 生产服务器加载 ssh us-asaki-root "docker save sub2api:latest" | ssh clicodeplus "docker load" + +# ⚠️ 必须看到 "Loaded image: sub2api:latest" 输出 ``` -#### 4. 更新镜像标签并重启服务 +#### 4. 生产服务器同步代码、更新标签并重启 ```bash +# 同步代码(用于版本号确认和 deploy 配置) +ssh clicodeplus "cd /root/sub2api && git fetch fork && git checkout -B release/custom-0.1.69 fork/release/custom-0.1.69" + +# 更新镜像标签并重启 ssh clicodeplus "docker tag sub2api:latest weishaw/sub2api:latest" ssh clicodeplus "cd /root/sub2api/deploy && docker compose up -d --force-recreate sub2api" ``` @@ -199,12 +221,39 @@ ssh clicodeplus "docker logs sub2api --tail 20" # 确认版本号(必须与步骤 0 中设置的版本号一致) ssh clicodeplus "cat /root/sub2api/backend/cmd/server/VERSION" -# 检查容器状态 +# 检查容器状态(必须显示 healthy) ssh clicodeplus "docker ps | grep sub2api" ``` --- +### 构建服务器首次初始化 + +首次使用 `us-asaki-root` 作为构建服务器时,需要执行以下一次性操作: + +```bash +ssh us-asaki-root + +# 1) 克隆仓库 +cd /root +git clone https://github.com/touwaeriol/sub2api.git sub2api +cd sub2api + +# 2) 验证 Docker 和 buildx 版本 +docker version +docker buildx version +# 如果 buildx 版本过旧(< v0.14),执行更新: +# LATEST=$(curl -fsSL https://api.github.com/repos/docker/buildx/releases/latest | grep tag_name | cut -d'"' -f4) +# curl -fsSL "https://github.com/docker/buildx/releases/download/${LATEST}/buildx-${LATEST}.linux-amd64" -o ~/.docker/cli-plugins/docker-buildx +# chmod +x ~/.docker/cli-plugins/docker-buildx + +# 3) 验证构建能力 +docker build --no-cache -t sub2api:test -f Dockerfile . +docker rmi sub2api:test +``` + +--- + ## Beta 并行部署(不影响现网) 目标:在同一台服务器上并行启动一个 beta 实例(例如端口 `8084`),**严禁改动/重启**现网实例(默认目录 `/root/sub2api`)。 diff --git a/CLAUDE.md b/CLAUDE.md index 9532d448..7b261891 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -148,9 +148,9 @@ ssh clicodeplus "source /root/sub2api-beta/deploy/.env && PGPASSWORD=\"\$POSTGRE **重要:每次部署都必须递增版本号!** -#### 0. 递增版本号(本地操作) +#### 0. 递增版本号并推送(本地操作) -每次部署前,先在本地递增小版本号: +每次部署前,先在本地递增小版本号并确保推送成功: ```bash # 查看当前版本号 @@ -162,30 +162,52 @@ echo "0.1.69.2" > backend/cmd/server/VERSION git add backend/cmd/server/VERSION git commit -m "chore: bump version to 0.1.69.2" git push origin release/custom-0.1.69 + +# ⚠️ 确认推送成功(必须看到分支更新输出,不能有 rejected 错误) ``` +> **检查点**:如果有其他未提交的改动,应先 commit 并 push,确保 release 分支上的所有代码都已推送到远程。 + #### 1. 构建服务器拉取代码 ```bash -ssh us-asaki-root "cd /root/sub2api && git fetch fork && git checkout -B release/custom-0.1.69 fork/release/custom-0.1.69" +# 拉取最新代码并切换分支 +ssh us-asaki-root "cd /root/sub2api && git fetch origin && git checkout -B release/custom-0.1.69 origin/release/custom-0.1.69" + +# ⚠️ 验证版本号与步骤 0 一致 +ssh us-asaki-root "cat /root/sub2api/backend/cmd/server/VERSION" ``` +> **首次使用构建服务器?** 需要先初始化仓库,参见下方「构建服务器首次初始化」章节。 + #### 2. 构建服务器构建镜像 ```bash ssh us-asaki-root "cd /root/sub2api && docker build --no-cache -t sub2api:latest -f Dockerfile ." + +# ⚠️ 必须看到构建成功输出,如果失败需要先排查问题 ``` +> **常见构建问题**: +> - `buildx` 版本过旧导致 API 版本不兼容 → 更新 buildx:`curl -fsSL "https://github.com/docker/buildx/releases/latest/download/buildx-$(curl -fsSL https://api.github.com/repos/docker/buildx/releases/latest | grep tag_name | cut -d'"' -f4).linux-amd64" -o ~/.docker/cli-plugins/docker-buildx && chmod +x ~/.docker/cli-plugins/docker-buildx` +> - 磁盘空间不足 → `docker system prune -f` 清理无用镜像 + #### 3. 传输镜像到生产服务器并加载 ```bash # 导出镜像 → 通过管道传输 → 生产服务器加载 ssh us-asaki-root "docker save sub2api:latest" | ssh clicodeplus "docker load" + +# ⚠️ 必须看到 "Loaded image: sub2api:latest" 输出 ``` -#### 4. 更新镜像标签并重启服务 +#### 4. 生产服务器同步代码、更新标签并重启 ```bash +# 同步代码(用于版本号确认和 deploy 配置) +ssh clicodeplus "cd /root/sub2api && git fetch fork && git checkout -B release/custom-0.1.69 fork/release/custom-0.1.69" + +# 更新镜像标签并重启 ssh clicodeplus "docker tag sub2api:latest weishaw/sub2api:latest" ssh clicodeplus "cd /root/sub2api/deploy && docker compose up -d --force-recreate sub2api" ``` @@ -199,12 +221,39 @@ ssh clicodeplus "docker logs sub2api --tail 20" # 确认版本号(必须与步骤 0 中设置的版本号一致) ssh clicodeplus "cat /root/sub2api/backend/cmd/server/VERSION" -# 检查容器状态 +# 检查容器状态(必须显示 healthy) ssh clicodeplus "docker ps | grep sub2api" ``` --- +### 构建服务器首次初始化 + +首次使用 `us-asaki-root` 作为构建服务器时,需要执行以下一次性操作: + +```bash +ssh us-asaki-root + +# 1) 克隆仓库 +cd /root +git clone https://github.com/touwaeriol/sub2api.git sub2api +cd sub2api + +# 2) 验证 Docker 和 buildx 版本 +docker version +docker buildx version +# 如果 buildx 版本过旧(< v0.14),执行更新: +# LATEST=$(curl -fsSL https://api.github.com/repos/docker/buildx/releases/latest | grep tag_name | cut -d'"' -f4) +# curl -fsSL "https://github.com/docker/buildx/releases/download/${LATEST}/buildx-${LATEST}.linux-amd64" -o ~/.docker/cli-plugins/docker-buildx +# chmod +x ~/.docker/cli-plugins/docker-buildx + +# 3) 验证构建能力 +docker build --no-cache -t sub2api:test -f Dockerfile . +docker rmi sub2api:test +``` + +--- + ## Beta 并行部署(不影响现网) 目标:在同一台服务器上并行启动一个 beta 实例(例如端口 `8084`),**严禁改动/重启**现网实例(默认目录 `/root/sub2api`)。 From 6a1c28b70e78e8c053a468235b6bd92158a65d33 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 11 Feb 2026 00:41:05 +0800 Subject: [PATCH 030/175] =?UTF-8?q?docs:=20=E4=BC=98=E5=8C=96=20beta=20?= =?UTF-8?q?=E9=83=A8=E7=BD=B2=E6=B5=81=E7=A8=8B=EF=BC=8C=E6=9E=84=E5=BB=BA?= =?UTF-8?q?=E6=9C=8D=E5=8A=A1=E5=99=A8=E5=85=B1=E7=94=A8=E4=BB=93=E5=BA=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - beta 和正式共用构建服务器 /root/sub2api 仓库,通过镜像标签区分 - 首次部署和更新 beta 流程统一使用共用仓库 + 分支切换 - 各步骤增加检查点和验证说明 --- AGENTS.md | 44 ++++++++++++++++++++++---------------------- CLAUDE.md | 44 ++++++++++++++++++++++---------------------- 2 files changed, 44 insertions(+), 44 deletions(-) diff --git a/AGENTS.md b/AGENTS.md index 7b261891..ccac56d1 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -277,24 +277,21 @@ ssh clicodeplus "docker ps --format 'table {{.Names}}\t{{.Image}}\t{{.Ports}}' | ### 首次部署步骤 +> **构建服务器说明**:正式和 beta 共用构建服务器上的 `/root/sub2api` 仓库,通过不同的镜像标签区分(`sub2api:latest` 用于正式,`sub2api:beta` 用于测试)。 + ```bash -# 0) 进入构建服务器 -ssh us-asaki-root +# 1) 构建服务器构建 beta 镜像(共用 /root/sub2api 仓库,切到目标分支后打 beta 标签) +ssh us-asaki-root "cd /root/sub2api && git fetch origin && git checkout -B release/custom-0.1.71 origin/release/custom-0.1.71" +ssh us-asaki-root "cd /root/sub2api && docker build --no-cache -t sub2api:beta -f Dockerfile ." -# 1) 克隆代码到新目录(示例使用你的 fork) -cd /root -git clone https://github.com/touwaeriol/sub2api.git sub2api-beta -cd /root/sub2api-beta -git checkout release/custom-0.1.71 +# ⚠️ 构建完成后如需恢复正式分支: +# ssh us-asaki-root "cd /root/sub2api && git checkout release/custom-<正式版本>" -# 2) 构建 beta 镜像 -docker build -t sub2api:beta -f Dockerfile . -exit - -# 3) 传输镜像到生产服务器 +# 2) 传输镜像到生产服务器 ssh us-asaki-root "docker save sub2api:beta" | ssh clicodeplus "docker load" +# ⚠️ 必须看到 "Loaded image: sub2api:beta" 输出 -# 4) 在生产服务器上准备 beta 环境 +# 3) 在生产服务器上准备 beta 环境 ssh clicodeplus # 克隆代码(仅用于 deploy 配置和版本号确认,不在此构建) @@ -303,7 +300,7 @@ git clone https://github.com/touwaeriol/sub2api.git sub2api-beta cd /root/sub2api-beta git checkout release/custom-0.1.71 -# 5) 准备 beta 的 .env(敏感信息只写这里) +# 4) 准备 beta 的 .env(敏感信息只写这里) cd /root/sub2api-beta/deploy # 推荐:从现网 .env 复制,保证除 DB 名/用户/端口外完全一致 @@ -314,7 +311,7 @@ perl -pi -e 's/^SERVER_PORT=.*/SERVER_PORT=8084/' ./.env perl -pi -e 's/^POSTGRES_USER=.*/POSTGRES_USER=beta/' ./.env perl -pi -e 's/^POSTGRES_DB=.*/POSTGRES_DB=beta/' ./.env -# 6) 写 compose override(避免与现网容器名冲突,镜像使用构建服务器传输的 sub2api:beta) +# 5) 写 compose override(避免与现网容器名冲突,镜像使用构建服务器传输的 sub2api:beta) cat > docker-compose.override.yml <<'YAML' services: sub2api: @@ -324,11 +321,11 @@ services: container_name: sub2api-beta-redis YAML -# 7) 启动 beta(独立 project,确保不影响现网) +# 6) 启动 beta(独立 project,确保不影响现网) cd /root/sub2api-beta/deploy docker compose -p sub2api-beta --env-file .env -f docker-compose.yml -f docker-compose.override.yml up -d -# 8) 验证 beta +# 7) 验证 beta curl -fsS http://127.0.0.1:8084/health docker logs sub2api-beta --tail 50 ``` @@ -345,19 +342,22 @@ docker logs sub2api-beta --tail 50 ### 更新 beta(构建服务器构建 + 传输 + 仅重启 beta 容器) ```bash -# 1) 构建服务器拉取代码并构建镜像 -ssh us-asaki-root "set -e; cd /root/sub2api-beta && git fetch --all --tags && git checkout -f release/custom-0.1.71 && git reset --hard origin/release/custom-0.1.71" -ssh us-asaki-root "cd /root/sub2api-beta && docker build -t sub2api:beta -f Dockerfile ." +# 1) 构建服务器拉取代码并构建镜像(共用 /root/sub2api 仓库) +ssh us-asaki-root "cd /root/sub2api && git fetch origin && git checkout -B release/custom-0.1.71 origin/release/custom-0.1.71" +ssh us-asaki-root "cd /root/sub2api && docker build --no-cache -t sub2api:beta -f Dockerfile ." +# ⚠️ 必须看到构建成功输出 # 2) 传输镜像到生产服务器 ssh us-asaki-root "docker save sub2api:beta" | ssh clicodeplus "docker load" +# ⚠️ 必须看到 "Loaded image: sub2api:beta" 输出 # 3) 生产服务器同步代码(用于版本号确认和 deploy 配置) ssh clicodeplus "set -e; cd /root/sub2api-beta && git fetch --all --tags && git checkout -f release/custom-0.1.71 && git reset --hard origin/release/custom-0.1.71" -# 4) 重启 beta 容器 +# 4) 重启 beta 容器并验证 ssh clicodeplus "cd /root/sub2api-beta/deploy && docker compose -p sub2api-beta --env-file .env -f docker-compose.yml -f docker-compose.override.yml up -d --no-deps --force-recreate sub2api" -ssh clicodeplus "curl -fsS http://127.0.0.1:8084/health" +ssh clicodeplus "sleep 5 && curl -fsS http://127.0.0.1:8084/health" +ssh clicodeplus "cat /root/sub2api-beta/backend/cmd/server/VERSION" ``` ### 停止/回滚 beta(只影响 beta) diff --git a/CLAUDE.md b/CLAUDE.md index 7b261891..ccac56d1 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -277,24 +277,21 @@ ssh clicodeplus "docker ps --format 'table {{.Names}}\t{{.Image}}\t{{.Ports}}' | ### 首次部署步骤 +> **构建服务器说明**:正式和 beta 共用构建服务器上的 `/root/sub2api` 仓库,通过不同的镜像标签区分(`sub2api:latest` 用于正式,`sub2api:beta` 用于测试)。 + ```bash -# 0) 进入构建服务器 -ssh us-asaki-root +# 1) 构建服务器构建 beta 镜像(共用 /root/sub2api 仓库,切到目标分支后打 beta 标签) +ssh us-asaki-root "cd /root/sub2api && git fetch origin && git checkout -B release/custom-0.1.71 origin/release/custom-0.1.71" +ssh us-asaki-root "cd /root/sub2api && docker build --no-cache -t sub2api:beta -f Dockerfile ." -# 1) 克隆代码到新目录(示例使用你的 fork) -cd /root -git clone https://github.com/touwaeriol/sub2api.git sub2api-beta -cd /root/sub2api-beta -git checkout release/custom-0.1.71 +# ⚠️ 构建完成后如需恢复正式分支: +# ssh us-asaki-root "cd /root/sub2api && git checkout release/custom-<正式版本>" -# 2) 构建 beta 镜像 -docker build -t sub2api:beta -f Dockerfile . -exit - -# 3) 传输镜像到生产服务器 +# 2) 传输镜像到生产服务器 ssh us-asaki-root "docker save sub2api:beta" | ssh clicodeplus "docker load" +# ⚠️ 必须看到 "Loaded image: sub2api:beta" 输出 -# 4) 在生产服务器上准备 beta 环境 +# 3) 在生产服务器上准备 beta 环境 ssh clicodeplus # 克隆代码(仅用于 deploy 配置和版本号确认,不在此构建) @@ -303,7 +300,7 @@ git clone https://github.com/touwaeriol/sub2api.git sub2api-beta cd /root/sub2api-beta git checkout release/custom-0.1.71 -# 5) 准备 beta 的 .env(敏感信息只写这里) +# 4) 准备 beta 的 .env(敏感信息只写这里) cd /root/sub2api-beta/deploy # 推荐:从现网 .env 复制,保证除 DB 名/用户/端口外完全一致 @@ -314,7 +311,7 @@ perl -pi -e 's/^SERVER_PORT=.*/SERVER_PORT=8084/' ./.env perl -pi -e 's/^POSTGRES_USER=.*/POSTGRES_USER=beta/' ./.env perl -pi -e 's/^POSTGRES_DB=.*/POSTGRES_DB=beta/' ./.env -# 6) 写 compose override(避免与现网容器名冲突,镜像使用构建服务器传输的 sub2api:beta) +# 5) 写 compose override(避免与现网容器名冲突,镜像使用构建服务器传输的 sub2api:beta) cat > docker-compose.override.yml <<'YAML' services: sub2api: @@ -324,11 +321,11 @@ services: container_name: sub2api-beta-redis YAML -# 7) 启动 beta(独立 project,确保不影响现网) +# 6) 启动 beta(独立 project,确保不影响现网) cd /root/sub2api-beta/deploy docker compose -p sub2api-beta --env-file .env -f docker-compose.yml -f docker-compose.override.yml up -d -# 8) 验证 beta +# 7) 验证 beta curl -fsS http://127.0.0.1:8084/health docker logs sub2api-beta --tail 50 ``` @@ -345,19 +342,22 @@ docker logs sub2api-beta --tail 50 ### 更新 beta(构建服务器构建 + 传输 + 仅重启 beta 容器) ```bash -# 1) 构建服务器拉取代码并构建镜像 -ssh us-asaki-root "set -e; cd /root/sub2api-beta && git fetch --all --tags && git checkout -f release/custom-0.1.71 && git reset --hard origin/release/custom-0.1.71" -ssh us-asaki-root "cd /root/sub2api-beta && docker build -t sub2api:beta -f Dockerfile ." +# 1) 构建服务器拉取代码并构建镜像(共用 /root/sub2api 仓库) +ssh us-asaki-root "cd /root/sub2api && git fetch origin && git checkout -B release/custom-0.1.71 origin/release/custom-0.1.71" +ssh us-asaki-root "cd /root/sub2api && docker build --no-cache -t sub2api:beta -f Dockerfile ." +# ⚠️ 必须看到构建成功输出 # 2) 传输镜像到生产服务器 ssh us-asaki-root "docker save sub2api:beta" | ssh clicodeplus "docker load" +# ⚠️ 必须看到 "Loaded image: sub2api:beta" 输出 # 3) 生产服务器同步代码(用于版本号确认和 deploy 配置) ssh clicodeplus "set -e; cd /root/sub2api-beta && git fetch --all --tags && git checkout -f release/custom-0.1.71 && git reset --hard origin/release/custom-0.1.71" -# 4) 重启 beta 容器 +# 4) 重启 beta 容器并验证 ssh clicodeplus "cd /root/sub2api-beta/deploy && docker compose -p sub2api-beta --env-file .env -f docker-compose.yml -f docker-compose.override.yml up -d --no-deps --force-recreate sub2api" -ssh clicodeplus "curl -fsS http://127.0.0.1:8084/health" +ssh clicodeplus "sleep 5 && curl -fsS http://127.0.0.1:8084/health" +ssh clicodeplus "cat /root/sub2api-beta/backend/cmd/server/VERSION" ``` ### 停止/回滚 beta(只影响 beta) From 86e600aa5248163518dc2b549939614910dfcda8 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 11 Feb 2026 01:41:34 +0800 Subject: [PATCH 031/175] =?UTF-8?q?fix:=20=E5=B9=B6=E5=8F=91/=E6=8E=92?= =?UTF-8?q?=E9=98=9F=E9=9D=A2=E6=9D=BF=E6=94=AF=E6=8C=81=20platform/group?= =?UTF-8?q?=20=E8=BF=87=E6=BB=A4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 添加 platformFilter/groupIdFilter props 变化监听器,过滤条件变化时 立即重新加载数据(修复选择平台后显示"暂无数据"的问题) - 全栈为 getUserConcurrencyStats 添加 platform/group_id 过滤支持: 前端 API → Handler 解析 query params → Service 层过滤逻辑 - Service 层通过账号的 group 关联反查用户的 AllowedGroups, 与 GetConcurrencyStats 的过滤模式保持一致 --- .../handler/admin/ops_realtime_handler.go | 17 ++++- backend/internal/service/ops_concurrency.go | 70 ++++++++++++++++++- frontend/src/api/admin/ops.ts | 12 +++- .../ops/components/OpsConcurrencyCard.vue | 10 ++- 4 files changed, 103 insertions(+), 6 deletions(-) diff --git a/backend/internal/handler/admin/ops_realtime_handler.go b/backend/internal/handler/admin/ops_realtime_handler.go index c175dcd0..2d3cce4b 100644 --- a/backend/internal/handler/admin/ops_realtime_handler.go +++ b/backend/internal/handler/admin/ops_realtime_handler.go @@ -65,6 +65,10 @@ func (h *OpsHandler) GetConcurrencyStats(c *gin.Context) { // GetUserConcurrencyStats returns real-time concurrency usage for all active users. // GET /api/v1/admin/ops/user-concurrency +// +// Query params: +// - platform: optional, filter users by allowed platform +// - group_id: optional, filter users by allowed group func (h *OpsHandler) GetUserConcurrencyStats(c *gin.Context) { if h.opsService == nil { response.Error(c, http.StatusServiceUnavailable, "Ops service not available") @@ -84,7 +88,18 @@ func (h *OpsHandler) GetUserConcurrencyStats(c *gin.Context) { return } - users, collectedAt, err := h.opsService.GetUserConcurrencyStats(c.Request.Context()) + platformFilter := strings.TrimSpace(c.Query("platform")) + var groupID *int64 + if v := strings.TrimSpace(c.Query("group_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid group_id") + return + } + groupID = &id + } + + users, collectedAt, err := h.opsService.GetUserConcurrencyStats(c.Request.Context(), platformFilter, groupID) if err != nil { response.ErrorFrom(c, err) return diff --git a/backend/internal/service/ops_concurrency.go b/backend/internal/service/ops_concurrency.go index f6541d08..faac2d5b 100644 --- a/backend/internal/service/ops_concurrency.go +++ b/backend/internal/service/ops_concurrency.go @@ -344,8 +344,16 @@ func (s *OpsService) getUsersLoadMapBestEffort(ctx context.Context, users []User return out } -// GetUserConcurrencyStats returns real-time concurrency usage for all active users. -func (s *OpsService) GetUserConcurrencyStats(ctx context.Context) (map[int64]*UserConcurrencyInfo, *time.Time, error) { +// GetUserConcurrencyStats returns real-time concurrency usage for active users. +// +// Optional filters: +// - platformFilter: only include users who have access to groups belonging to that platform +// - groupIDFilter: only include users who have access to that specific group +func (s *OpsService) GetUserConcurrencyStats( + ctx context.Context, + platformFilter string, + groupIDFilter *int64, +) (map[int64]*UserConcurrencyInfo, *time.Time, error) { if err := s.RequireMonitoringEnabled(ctx); err != nil { return nil, nil, err } @@ -355,6 +363,15 @@ func (s *OpsService) GetUserConcurrencyStats(ctx context.Context) (map[int64]*Us return nil, nil, err } + // Build a set of allowed group IDs when filtering is requested. + var allowedGroupIDs map[int64]struct{} + if platformFilter != "" || (groupIDFilter != nil && *groupIDFilter > 0) { + allowedGroupIDs, err = s.buildAllowedGroupIDsForFilter(ctx, platformFilter, groupIDFilter) + if err != nil { + return nil, nil, err + } + } + collectedAt := time.Now() loadMap := s.getUsersLoadMapBestEffort(ctx, users) @@ -365,6 +382,12 @@ func (s *OpsService) GetUserConcurrencyStats(ctx context.Context) (map[int64]*Us continue } + // Apply group/platform filter: skip users whose AllowedGroups + // have no intersection with the matching group IDs. + if allowedGroupIDs != nil && !userMatchesGroupFilter(u.AllowedGroups, allowedGroupIDs) { + continue + } + load := loadMap[u.ID] currentInUse := int64(0) waiting := int64(0) @@ -394,3 +417,46 @@ func (s *OpsService) GetUserConcurrencyStats(ctx context.Context) (map[int64]*Us return result, &collectedAt, nil } + +// buildAllowedGroupIDsForFilter returns the set of group IDs that match the given +// platform and/or group ID filter. It reuses listAllAccountsForOps (which already +// supports platform filtering at the DB level) to collect group IDs from accounts. +func (s *OpsService) buildAllowedGroupIDsForFilter(ctx context.Context, platformFilter string, groupIDFilter *int64) (map[int64]struct{}, error) { + // Fast path: only group ID filter, no platform filter needed. + if platformFilter == "" && groupIDFilter != nil && *groupIDFilter > 0 { + return map[int64]struct{}{*groupIDFilter: {}}, nil + } + + // Use the same account-based approach as GetConcurrencyStats to collect group IDs. + accounts, err := s.listAllAccountsForOps(ctx, platformFilter) + if err != nil { + return nil, err + } + + groupIDs := make(map[int64]struct{}) + for _, acc := range accounts { + for _, grp := range acc.Groups { + if grp == nil || grp.ID <= 0 { + continue + } + // If groupIDFilter is set, only include that specific group. + if groupIDFilter != nil && *groupIDFilter > 0 && grp.ID != *groupIDFilter { + continue + } + groupIDs[grp.ID] = struct{}{} + } + } + + return groupIDs, nil +} + +// userMatchesGroupFilter returns true if the user's AllowedGroups contains +// at least one group ID in the allowed set. +func userMatchesGroupFilter(userGroups []int64, allowedGroupIDs map[int64]struct{}) bool { + for _, gid := range userGroups { + if _, ok := allowedGroupIDs[gid]; ok { + return true + } + } + return false +} diff --git a/frontend/src/api/admin/ops.ts b/frontend/src/api/admin/ops.ts index 9f980a12..523fbd00 100644 --- a/frontend/src/api/admin/ops.ts +++ b/frontend/src/api/admin/ops.ts @@ -366,8 +366,16 @@ export async function getConcurrencyStats(platform?: string, groupId?: number | return data } -export async function getUserConcurrencyStats(): Promise { - const { data } = await apiClient.get('/admin/ops/user-concurrency') +export async function getUserConcurrencyStats(platform?: string, groupId?: number | null): Promise { + const params: Record = {} + if (platform) { + params.platform = platform + } + if (typeof groupId === 'number' && groupId > 0) { + params.group_id = groupId + } + + const { data } = await apiClient.get('/admin/ops/user-concurrency', { params }) return data } diff --git a/frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue b/frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue index ca640ade..0956caa5 100644 --- a/frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue +++ b/frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue @@ -265,7 +265,7 @@ async function loadData() { try { if (showByUser.value) { // 用户视图模式只加载用户并发数据 - const userData = await opsAPI.getUserConcurrencyStats() + const userData = await opsAPI.getUserConcurrencyStats(props.platformFilter, props.groupIdFilter) userConcurrency.value = userData } else { // 常规模式加载账号/平台/分组数据 @@ -301,6 +301,14 @@ watch( } ) +// 过滤条件变化时重新加载数据 +watch( + [() => props.platformFilter, () => props.groupIdFilter], + () => { + loadData() + } +) + function getLoadBarClass(loadPct: number): string { if (loadPct >= 90) return 'bg-red-500 dark:bg-red-600' if (loadPct >= 70) return 'bg-orange-500 dark:bg-orange-600' From 37c76a93ab07636d27615314b95473ffdef50a5f Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 11 Feb 2026 01:56:52 +0800 Subject: [PATCH 032/175] chore: bump version to 0.1.79.3 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index a98898f3..a281565e 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.79.2 +0.1.79.3 From 79fba9c8d33f60e481299aec77006abef94a13e0 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 11 Feb 2026 04:54:05 +0800 Subject: [PATCH 033/175] refactor: consolidate failover logic into FailoverState MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Merge FailoverRetry/FailoverSwitch into single FailoverContinue action - Extract HandleSelectionExhausted into FailoverState (was duplicated 3×) - Move helper functions (needForceCacheBilling, sleepWithContext) into failover_loop.go - Inline sleepFailoverDelay, replace sleepAntigravitySingleAccountBackoff with constant - Delete gateway_handler_single_account_retry_test.go (tested removed function) - Add 6 test cases for HandleSelectionExhausted --- backend/internal/handler/failover_loop.go | 49 ++++++- .../internal/handler/failover_loop_test.go | 129 ++++++++++++++---- backend/internal/handler/gateway_handler.go | 108 ++++----------- ...teway_handler_single_account_retry_test.go | 51 ------- .../internal/handler/gemini_v1beta_handler.go | 27 ++-- 5 files changed, 186 insertions(+), 178 deletions(-) delete mode 100644 backend/internal/handler/gateway_handler_single_account_retry_test.go diff --git a/backend/internal/handler/failover_loop.go b/backend/internal/handler/failover_loop.go index a161a866..1f8a7e9a 100644 --- a/backend/internal/handler/failover_loop.go +++ b/backend/internal/handler/failover_loop.go @@ -3,6 +3,7 @@ package handler import ( "context" "log" + "net/http" "time" "github.com/Wei-Shaw/sub2api/internal/service" @@ -18,10 +19,8 @@ type TempUnscheduler interface { type FailoverAction int const ( - // FailoverRetry 同账号重试(调用方应 continue 重新进入循环,不更换账号) - FailoverRetry FailoverAction = iota - // FailoverSwitch 切换账号(调用方应 continue 重新选择账号) - FailoverSwitch + // FailoverContinue 继续循环(同账号重试或切换账号,调用方统一 continue) + FailoverContinue FailoverAction = iota // FailoverExhausted 切换次数耗尽(调用方应返回错误响应) FailoverExhausted // FailoverCanceled context 已取消(调用方应直接 return) @@ -33,6 +32,10 @@ const ( maxSameAccountRetries = 2 // sameAccountRetryDelay 同账号重试间隔 sameAccountRetryDelay = 500 * time.Millisecond + // singleAccountBackoffDelay 单账号分组 503 退避重试固定延时。 + // Service 层在 SingleAccountRetry 模式下已做充分原地重试(最多 3 次、总等待 30s), + // Handler 层只需短暂间隔后重新进入 Service 层即可。 + singleAccountBackoffDelay = 2 * time.Second ) // FailoverState 跨循环迭代共享的 failover 状态 @@ -80,7 +83,7 @@ func (s *FailoverState) HandleFailoverError( if !sleepWithContext(ctx, sameAccountRetryDelay) { return FailoverCanceled } - return FailoverRetry + return FailoverContinue } // 同账号重试用尽,执行临时封禁 @@ -103,12 +106,44 @@ func (s *FailoverState) HandleFailoverError( // Antigravity 平台换号线性递增延时 if platform == service.PlatformAntigravity { - if !sleepFailoverDelay(ctx, s.SwitchCount) { + delay := time.Duration(s.SwitchCount-1) * time.Second + if !sleepWithContext(ctx, delay) { return FailoverCanceled } } - return FailoverSwitch + return FailoverContinue +} + +// HandleSelectionExhausted 处理选号失败(所有候选账号都在排除列表中)时的退避重试决策。 +// 针对 Antigravity 单账号分组的 503 (MODEL_CAPACITY_EXHAUSTED) 场景: +// 清除排除列表、等待退避后重新选号。 +// +// 返回 FailoverContinue 时,调用方应设置 SingleAccountRetry context 并 continue。 +// 返回 FailoverExhausted 时,调用方应返回错误响应。 +// 返回 FailoverCanceled 时,调用方应直接 return。 +func (s *FailoverState) HandleSelectionExhausted(ctx context.Context) FailoverAction { + if s.LastFailoverErr != nil && + s.LastFailoverErr.StatusCode == http.StatusServiceUnavailable && + s.SwitchCount <= s.MaxSwitches { + + log.Printf("Antigravity single-account 503 backoff: waiting %v before retry (attempt %d)", + singleAccountBackoffDelay, s.SwitchCount) + if !sleepWithContext(ctx, singleAccountBackoffDelay) { + return FailoverCanceled + } + log.Printf("Antigravity single-account 503 retry: clearing failed accounts, retry %d/%d", + s.SwitchCount, s.MaxSwitches) + s.FailedAccountIDs = make(map[int64]struct{}) + return FailoverContinue + } + return FailoverExhausted +} + +// needForceCacheBilling 判断 failover 时是否需要强制缓存计费。 +// 粘性会话切换账号、或上游明确标记时,将 input_tokens 转为 cache_read 计费。 +func needForceCacheBilling(hasBoundSession bool, failoverErr *service.UpstreamFailoverError) bool { + return hasBoundSession || (failoverErr != nil && failoverErr.ForceCacheBilling) } // sleepWithContext 等待指定时长,返回 false 表示 context 已取消。 diff --git a/backend/internal/handler/failover_loop_test.go b/backend/internal/handler/failover_loop_test.go index ff48e77e..5a41b2dd 100644 --- a/backend/internal/handler/failover_loop_test.go +++ b/backend/internal/handler/failover_loop_test.go @@ -135,7 +135,7 @@ func TestHandleFailoverError_BasicSwitch(t *testing.T) { action := fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) - require.Equal(t, FailoverSwitch, action) + require.Equal(t, FailoverContinue, action) require.Equal(t, 1, fs.SwitchCount) require.Contains(t, fs.FailedAccountIDs, int64(100)) require.Equal(t, err, fs.LastFailoverErr) @@ -153,7 +153,7 @@ func TestHandleFailoverError_BasicSwitch(t *testing.T) { action := fs.HandleFailoverError(context.Background(), mock, 100, service.PlatformAntigravity, err) elapsed := time.Since(start) - require.Equal(t, FailoverSwitch, action) + require.Equal(t, FailoverContinue, action) require.Equal(t, 1, fs.SwitchCount) require.Less(t, elapsed, 200*time.Millisecond, "第一次切换延迟应为 0") }) @@ -169,7 +169,7 @@ func TestHandleFailoverError_BasicSwitch(t *testing.T) { action := fs.HandleFailoverError(context.Background(), mock, 200, service.PlatformAntigravity, err) elapsed := time.Since(start) - require.Equal(t, FailoverSwitch, action) + require.Equal(t, FailoverContinue, action) require.Equal(t, 2, fs.SwitchCount) require.GreaterOrEqual(t, elapsed, 800*time.Millisecond, "第二次切换延迟应约 1s") require.Less(t, elapsed, 3*time.Second) @@ -182,13 +182,13 @@ func TestHandleFailoverError_BasicSwitch(t *testing.T) { // 第一次切换:0→1 err1 := newTestFailoverErr(500, false, false) action := fs.HandleFailoverError(context.Background(), mock, 100, "openai", err1) - require.Equal(t, FailoverSwitch, action) + require.Equal(t, FailoverContinue, action) require.Equal(t, 1, fs.SwitchCount) // 第二次切换:1→2 err2 := newTestFailoverErr(502, false, false) action = fs.HandleFailoverError(context.Background(), mock, 200, "openai", err2) - require.Equal(t, FailoverSwitch, action) + require.Equal(t, FailoverContinue, action) require.Equal(t, 2, fs.SwitchCount) // 第三次已耗尽:SwitchCount(2) >= MaxSwitches(2) @@ -272,7 +272,7 @@ func TestHandleFailoverError_CacheBilling(t *testing.T) { // --------------------------------------------------------------------------- func TestHandleFailoverError_SameAccountRetry(t *testing.T) { - t.Run("第一次重试返回FailoverRetry", func(t *testing.T) { + t.Run("第一次重试返回FailoverContinue", func(t *testing.T) { mock := &mockTempUnscheduler{} fs := NewFailoverState(3, false) err := newTestFailoverErr(400, true, false) @@ -281,7 +281,7 @@ func TestHandleFailoverError_SameAccountRetry(t *testing.T) { action := fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) elapsed := time.Since(start) - require.Equal(t, FailoverRetry, action) + require.Equal(t, FailoverContinue, action) require.Equal(t, 1, fs.SameAccountRetryCount[100]) require.Equal(t, 0, fs.SwitchCount, "同账号重试不应增加切换计数") require.NotContains(t, fs.FailedAccountIDs, int64(100), "同账号重试不应加入失败列表") @@ -291,19 +291,19 @@ func TestHandleFailoverError_SameAccountRetry(t *testing.T) { require.Less(t, elapsed, 2*time.Second) }) - t.Run("第二次重试仍返回FailoverRetry", func(t *testing.T) { + t.Run("第二次重试仍返回FailoverContinue", func(t *testing.T) { mock := &mockTempUnscheduler{} fs := NewFailoverState(3, false) err := newTestFailoverErr(400, true, false) // 第一次 action := fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) - require.Equal(t, FailoverRetry, action) + require.Equal(t, FailoverContinue, action) require.Equal(t, 1, fs.SameAccountRetryCount[100]) // 第二次 action = fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) - require.Equal(t, FailoverRetry, action) + require.Equal(t, FailoverContinue, action) require.Equal(t, 2, fs.SameAccountRetryCount[100]) require.Empty(t, mock.calls, "两次重试期间均不应调用 TempUnschedule") @@ -321,7 +321,7 @@ func TestHandleFailoverError_SameAccountRetry(t *testing.T) { // 第三次:重试已达到 maxSameAccountRetries(2),应切换账号 action := fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) - require.Equal(t, FailoverSwitch, action) + require.Equal(t, FailoverContinue, action) require.Equal(t, 1, fs.SwitchCount) require.Contains(t, fs.FailedAccountIDs, int64(100)) @@ -338,12 +338,12 @@ func TestHandleFailoverError_SameAccountRetry(t *testing.T) { // 账号 100 第一次重试 action := fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) - require.Equal(t, FailoverRetry, action) + require.Equal(t, FailoverContinue, action) require.Equal(t, 1, fs.SameAccountRetryCount[100]) // 账号 200 第一次重试(独立计数) action = fs.HandleFailoverError(context.Background(), mock, 200, "openai", err) - require.Equal(t, FailoverRetry, action) + require.Equal(t, FailoverContinue, action) require.Equal(t, 1, fs.SameAccountRetryCount[200]) require.Equal(t, 1, fs.SameAccountRetryCount[100], "账号 100 的计数不应受影响") }) @@ -358,11 +358,11 @@ func TestHandleFailoverError_SameAccountRetry(t *testing.T) { fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) // 第三次: 重试耗尽 → 切换 action := fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) - require.Equal(t, FailoverSwitch, action) + require.Equal(t, FailoverContinue, action) // 再次遇到账号 100,计数仍为 2,条件不满足 → 直接切换 action = fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) - require.Equal(t, FailoverSwitch, action) + require.Equal(t, FailoverContinue, action) require.Len(t, mock.calls, 2, "第二次耗尽也应调用 TempUnschedule") }) } @@ -470,7 +470,7 @@ func TestHandleFailoverError_FailedAccountIDs(t *testing.T) { fs := NewFailoverState(3, false) action := fs.HandleFailoverError(context.Background(), mock, 100, "openai", newTestFailoverErr(400, true, false)) - require.Equal(t, FailoverRetry, action) + require.Equal(t, FailoverContinue, action) require.NotContains(t, fs.FailedAccountIDs, int64(100)) }) @@ -524,27 +524,27 @@ func TestHandleFailoverError_IntegrationScenario(t *testing.T) { // 1. 账号 100 遇到可重试错误,同账号重试 2 次 retryErr := newTestFailoverErr(400, true, false) action := fs.HandleFailoverError(context.Background(), mock, 100, "openai", retryErr) - require.Equal(t, FailoverRetry, action) + require.Equal(t, FailoverContinue, action) require.True(t, fs.ForceCacheBilling, "hasBoundSession=true 应设置 ForceCacheBilling") action = fs.HandleFailoverError(context.Background(), mock, 100, "openai", retryErr) - require.Equal(t, FailoverRetry, action) + require.Equal(t, FailoverContinue, action) // 2. 账号 100 重试耗尽 → TempUnschedule + 切换 action = fs.HandleFailoverError(context.Background(), mock, 100, "openai", retryErr) - require.Equal(t, FailoverSwitch, action) + require.Equal(t, FailoverContinue, action) require.Equal(t, 1, fs.SwitchCount) require.Len(t, mock.calls, 1) // 3. 账号 200 遇到不可重试错误 → 直接切换 switchErr := newTestFailoverErr(500, false, false) action = fs.HandleFailoverError(context.Background(), mock, 200, "openai", switchErr) - require.Equal(t, FailoverSwitch, action) + require.Equal(t, FailoverContinue, action) require.Equal(t, 2, fs.SwitchCount) // 4. 账号 300 遇到不可重试错误 → 再切换 action = fs.HandleFailoverError(context.Background(), mock, 300, "openai", switchErr) - require.Equal(t, FailoverSwitch, action) + require.Equal(t, FailoverContinue, action) require.Equal(t, 3, fs.SwitchCount) // 5. 账号 400 → 已耗尽 (SwitchCount=3 >= MaxSwitches=3) @@ -568,14 +568,14 @@ func TestHandleFailoverError_IntegrationScenario(t *testing.T) { start := time.Now() action := fs.HandleFailoverError(context.Background(), mock, 100, service.PlatformAntigravity, err) elapsed := time.Since(start) - require.Equal(t, FailoverSwitch, action) + require.Equal(t, FailoverContinue, action) require.Less(t, elapsed, 200*time.Millisecond, "第一次切换延迟为 0") // 第二次切换:delay = 1s start = time.Now() action = fs.HandleFailoverError(context.Background(), mock, 200, service.PlatformAntigravity, err) elapsed = time.Since(start) - require.Equal(t, FailoverSwitch, action) + require.Equal(t, FailoverContinue, action) require.GreaterOrEqual(t, elapsed, 800*time.Millisecond, "第二次切换延迟约 1s") // 第三次:耗尽(无延迟,因为在检查延迟之前就返回了) @@ -618,7 +618,7 @@ func TestHandleFailoverError_EdgeCases(t *testing.T) { err := newTestFailoverErr(0, false, false) action := fs.HandleFailoverError(context.Background(), mock, 100, "openai", err) - require.Equal(t, FailoverSwitch, action) + require.Equal(t, FailoverContinue, action) }) t.Run("AccountID为0也能正常跟踪", func(t *testing.T) { @@ -627,7 +627,7 @@ func TestHandleFailoverError_EdgeCases(t *testing.T) { err := newTestFailoverErr(500, true, false) action := fs.HandleFailoverError(context.Background(), mock, 0, "openai", err) - require.Equal(t, FailoverRetry, action) + require.Equal(t, FailoverContinue, action) require.Equal(t, 1, fs.SameAccountRetryCount[0]) }) @@ -637,7 +637,7 @@ func TestHandleFailoverError_EdgeCases(t *testing.T) { err := newTestFailoverErr(500, true, false) action := fs.HandleFailoverError(context.Background(), mock, -1, "openai", err) - require.Equal(t, FailoverRetry, action) + require.Equal(t, FailoverContinue, action) require.Equal(t, 1, fs.SameAccountRetryCount[-1]) }) @@ -651,7 +651,82 @@ func TestHandleFailoverError_EdgeCases(t *testing.T) { action := fs.HandleFailoverError(context.Background(), mock, 100, "", err) elapsed := time.Since(start) - require.Equal(t, FailoverSwitch, action) + require.Equal(t, FailoverContinue, action) require.Less(t, elapsed, 200*time.Millisecond, "空平台不应触发 Antigravity 延迟") }) } + +// --------------------------------------------------------------------------- +// HandleSelectionExhausted 测试 +// --------------------------------------------------------------------------- + +func TestHandleSelectionExhausted(t *testing.T) { + t.Run("无LastFailoverErr时返回Exhausted", func(t *testing.T) { + fs := NewFailoverState(3, false) + // LastFailoverErr 为 nil + + action := fs.HandleSelectionExhausted(context.Background()) + require.Equal(t, FailoverExhausted, action) + }) + + t.Run("非503错误返回Exhausted", func(t *testing.T) { + fs := NewFailoverState(3, false) + fs.LastFailoverErr = newTestFailoverErr(500, false, false) + + action := fs.HandleSelectionExhausted(context.Background()) + require.Equal(t, FailoverExhausted, action) + }) + + t.Run("503且未耗尽_等待后返回Continue并清除失败列表", func(t *testing.T) { + fs := NewFailoverState(3, false) + fs.LastFailoverErr = newTestFailoverErr(503, false, false) + fs.FailedAccountIDs[100] = struct{}{} + fs.SwitchCount = 1 + + start := time.Now() + action := fs.HandleSelectionExhausted(context.Background()) + elapsed := time.Since(start) + + require.Equal(t, FailoverContinue, action) + require.Empty(t, fs.FailedAccountIDs, "应清除失败账号列表") + require.GreaterOrEqual(t, elapsed, 1500*time.Millisecond, "应等待约 2s") + require.Less(t, elapsed, 5*time.Second) + }) + + t.Run("503但SwitchCount已超过MaxSwitches_返回Exhausted", func(t *testing.T) { + fs := NewFailoverState(2, false) + fs.LastFailoverErr = newTestFailoverErr(503, false, false) + fs.SwitchCount = 3 // > MaxSwitches(2) + + start := time.Now() + action := fs.HandleSelectionExhausted(context.Background()) + elapsed := time.Since(start) + + require.Equal(t, FailoverExhausted, action) + require.Less(t, elapsed, 100*time.Millisecond, "不应等待") + }) + + t.Run("503但context已取消_返回Canceled", func(t *testing.T) { + fs := NewFailoverState(3, false) + fs.LastFailoverErr = newTestFailoverErr(503, false, false) + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + start := time.Now() + action := fs.HandleSelectionExhausted(ctx) + elapsed := time.Since(start) + + require.Equal(t, FailoverCanceled, action) + require.Less(t, elapsed, 100*time.Millisecond, "应立即返回") + }) + + t.Run("503且SwitchCount等于MaxSwitches_仍可重试", func(t *testing.T) { + fs := NewFailoverState(2, false) + fs.LastFailoverErr = newTestFailoverErr(503, false, false) + fs.SwitchCount = 2 // == MaxSwitches,条件是 <=,仍可重试 + + action := fs.HandleSelectionExhausted(context.Background()) + require.Equal(t, FailoverContinue, action) + }) +} diff --git a/backend/internal/handler/gateway_handler.go b/backend/internal/handler/gateway_handler.go index 3bb6def4..0cc86bb4 100644 --- a/backend/internal/handler/gateway_handler.go +++ b/backend/internal/handler/gateway_handler.go @@ -248,25 +248,22 @@ func (h *GatewayHandler) Messages(c *gin.Context) { h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts: "+err.Error(), streamStarted) return } - // Antigravity 单账号退避重试:分组内没有其他可用账号时, - // 对 503 错误不直接返回,而是清除排除列表、等待退避后重试同一个账号。 - // 谷歌上游 503 (MODEL_CAPACITY_EXHAUSTED) 通常是暂时性的,等几秒就能恢复。 - if fs.LastFailoverErr != nil && fs.LastFailoverErr.StatusCode == http.StatusServiceUnavailable && fs.SwitchCount <= fs.MaxSwitches { - if sleepAntigravitySingleAccountBackoff(c.Request.Context(), fs.SwitchCount) { - log.Printf("Antigravity single-account 503 retry: clearing failed accounts, retry %d/%d", fs.SwitchCount, fs.MaxSwitches) - fs.FailedAccountIDs = make(map[int64]struct{}) - // 设置 context 标记,让 Service 层预检查等待限流过期而非直接切换 - ctx := context.WithValue(c.Request.Context(), ctxkey.SingleAccountRetry, true) - c.Request = c.Request.WithContext(ctx) - continue + action := fs.HandleSelectionExhausted(c.Request.Context()) + switch action { + case FailoverContinue: + ctx := context.WithValue(c.Request.Context(), ctxkey.SingleAccountRetry, true) + c.Request = c.Request.WithContext(ctx) + continue + case FailoverCanceled: + return + default: // FailoverExhausted + if fs.LastFailoverErr != nil { + h.handleFailoverExhausted(c, fs.LastFailoverErr, service.PlatformGemini, streamStarted) + } else { + h.handleFailoverExhaustedSimple(c, 502, streamStarted) } + return } - if fs.LastFailoverErr != nil { - h.handleFailoverExhausted(c, fs.LastFailoverErr, service.PlatformGemini, streamStarted) - } else { - h.handleFailoverExhaustedSimple(c, 502, streamStarted) - } - return } account := selection.Account setOpsSelectedAccount(c, account.ID) @@ -357,7 +354,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { if errors.As(err, &failoverErr) { action := fs.HandleFailoverError(c.Request.Context(), h.gatewayService, account.ID, account.Platform, failoverErr) switch action { - case FailoverRetry, FailoverSwitch: + case FailoverContinue: continue case FailoverExhausted: h.handleFailoverExhausted(c, fs.LastFailoverErr, service.PlatformGemini, streamStarted) @@ -424,25 +421,22 @@ func (h *GatewayHandler) Messages(c *gin.Context) { h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts: "+err.Error(), streamStarted) return } - // Antigravity 单账号退避重试:分组内没有其他可用账号时, - // 对 503 错误不直接返回,而是清除排除列表、等待退避后重试同一个账号。 - // 谷歌上游 503 (MODEL_CAPACITY_EXHAUSTED) 通常是暂时性的,等几秒就能恢复。 - if fs.LastFailoverErr != nil && fs.LastFailoverErr.StatusCode == http.StatusServiceUnavailable && fs.SwitchCount <= fs.MaxSwitches { - if sleepAntigravitySingleAccountBackoff(c.Request.Context(), fs.SwitchCount) { - log.Printf("Antigravity single-account 503 retry: clearing failed accounts, retry %d/%d", fs.SwitchCount, fs.MaxSwitches) - fs.FailedAccountIDs = make(map[int64]struct{}) - // 设置 context 标记,让 Service 层预检查等待限流过期而非直接切换 - ctx := context.WithValue(c.Request.Context(), ctxkey.SingleAccountRetry, true) - c.Request = c.Request.WithContext(ctx) - continue + action := fs.HandleSelectionExhausted(c.Request.Context()) + switch action { + case FailoverContinue: + ctx := context.WithValue(c.Request.Context(), ctxkey.SingleAccountRetry, true) + c.Request = c.Request.WithContext(ctx) + continue + case FailoverCanceled: + return + default: // FailoverExhausted + if fs.LastFailoverErr != nil { + h.handleFailoverExhausted(c, fs.LastFailoverErr, platform, streamStarted) + } else { + h.handleFailoverExhaustedSimple(c, 502, streamStarted) } + return } - if fs.LastFailoverErr != nil { - h.handleFailoverExhausted(c, fs.LastFailoverErr, platform, streamStarted) - } else { - h.handleFailoverExhaustedSimple(c, 502, streamStarted) - } - return } account := selection.Account setOpsSelectedAccount(c, account.ID) @@ -566,7 +560,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { if errors.As(err, &failoverErr) { action := fs.HandleFailoverError(c.Request.Context(), h.gatewayService, account.ID, account.Platform, failoverErr) switch action { - case FailoverRetry, FailoverSwitch: + case FailoverContinue: continue case FailoverExhausted: h.handleFailoverExhausted(c, fs.LastFailoverErr, account.Platform, streamStarted) @@ -835,48 +829,6 @@ func (h *GatewayHandler) handleConcurrencyError(c *gin.Context, err error, slotT fmt.Sprintf("Concurrency limit exceeded for %s, please retry later", slotType), streamStarted) } -// needForceCacheBilling 判断 failover 时是否需要强制缓存计费 -// 粘性会话切换账号、或上游明确标记时,将 input_tokens 转为 cache_read 计费 -func needForceCacheBilling(hasBoundSession bool, failoverErr *service.UpstreamFailoverError) bool { - return hasBoundSession || (failoverErr != nil && failoverErr.ForceCacheBilling) -} - -// sleepFailoverDelay 账号切换线性递增延时:第1次0s、第2次1s、第3次2s… -// 返回 false 表示 context 已取消。 -func sleepFailoverDelay(ctx context.Context, switchCount int) bool { - delay := time.Duration(switchCount-1) * time.Second - if delay <= 0 { - return true - } - select { - case <-ctx.Done(): - return false - case <-time.After(delay): - return true - } -} - -// sleepAntigravitySingleAccountBackoff Antigravity 平台单账号分组的 503 退避重试延时。 -// 当分组内只有一个可用账号且上游返回 503(MODEL_CAPACITY_EXHAUSTED)时使用, -// 采用短固定延时策略。Service 层在 SingleAccountRetry 模式下已经做了充分的原地重试 -// (最多 3 次、总等待 30s),所以 Handler 层的退避只需短暂等待即可。 -// 返回 false 表示 context 已取消。 -func sleepAntigravitySingleAccountBackoff(ctx context.Context, retryCount int) bool { - // 固定短延时:2s - // Service 层已经在原地等待了足够长的时间(retryDelay × 重试次数), - // Handler 层只需短暂间隔后重新进入 Service 层即可。 - const delay = 2 * time.Second - - log.Printf("Antigravity single-account 503 backoff: waiting %v before retry (attempt %d)", delay, retryCount) - - select { - case <-ctx.Done(): - return false - case <-time.After(delay): - return true - } -} - func (h *GatewayHandler) handleFailoverExhausted(c *gin.Context, failoverErr *service.UpstreamFailoverError, platform string, streamStarted bool) { statusCode := failoverErr.StatusCode responseBody := failoverErr.ResponseBody diff --git a/backend/internal/handler/gateway_handler_single_account_retry_test.go b/backend/internal/handler/gateway_handler_single_account_retry_test.go deleted file mode 100644 index 96aa14c6..00000000 --- a/backend/internal/handler/gateway_handler_single_account_retry_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package handler - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -// --------------------------------------------------------------------------- -// sleepAntigravitySingleAccountBackoff 测试 -// --------------------------------------------------------------------------- - -func TestSleepAntigravitySingleAccountBackoff_ReturnsTrue(t *testing.T) { - ctx := context.Background() - start := time.Now() - ok := sleepAntigravitySingleAccountBackoff(ctx, 1) - elapsed := time.Since(start) - - require.True(t, ok, "should return true when context is not canceled") - // 固定延迟 2s - require.GreaterOrEqual(t, elapsed, 1500*time.Millisecond, "should wait approximately 2s") - require.Less(t, elapsed, 5*time.Second, "should not wait too long") -} - -func TestSleepAntigravitySingleAccountBackoff_ContextCanceled(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - cancel() // 立即取消 - - start := time.Now() - ok := sleepAntigravitySingleAccountBackoff(ctx, 1) - elapsed := time.Since(start) - - require.False(t, ok, "should return false when context is canceled") - require.Less(t, elapsed, 500*time.Millisecond, "should return immediately on cancel") -} - -func TestSleepAntigravitySingleAccountBackoff_FixedDelay(t *testing.T) { - // 验证不同 retryCount 都使用固定 2s 延迟 - ctx := context.Background() - - start := time.Now() - ok := sleepAntigravitySingleAccountBackoff(ctx, 5) - elapsed := time.Since(start) - - require.True(t, ok) - // 即使 retryCount=5,延迟仍然是固定的 2s - require.GreaterOrEqual(t, elapsed, 1500*time.Millisecond) - require.Less(t, elapsed, 5*time.Second) -} diff --git a/backend/internal/handler/gemini_v1beta_handler.go b/backend/internal/handler/gemini_v1beta_handler.go index 8c6303b1..51b77037 100644 --- a/backend/internal/handler/gemini_v1beta_handler.go +++ b/backend/internal/handler/gemini_v1beta_handler.go @@ -337,21 +337,18 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { googleError(c, http.StatusServiceUnavailable, "No available Gemini accounts: "+err.Error()) return } - // Antigravity 单账号退避重试:分组内没有其他可用账号时, - // 对 503 错误不直接返回,而是清除排除列表、等待退避后重试同一个账号。 - // 谷歌上游 503 (MODEL_CAPACITY_EXHAUSTED) 通常是暂时性的,等几秒就能恢复。 - if fs.LastFailoverErr != nil && fs.LastFailoverErr.StatusCode == http.StatusServiceUnavailable && fs.SwitchCount <= fs.MaxSwitches { - if sleepAntigravitySingleAccountBackoff(c.Request.Context(), fs.SwitchCount) { - log.Printf("Antigravity single-account 503 retry: clearing failed accounts, retry %d/%d", fs.SwitchCount, fs.MaxSwitches) - fs.FailedAccountIDs = make(map[int64]struct{}) - // 设置 context 标记,让 Service 层预检查等待限流过期而非直接切换 - ctx := context.WithValue(c.Request.Context(), ctxkey.SingleAccountRetry, true) - c.Request = c.Request.WithContext(ctx) - continue - } + action := fs.HandleSelectionExhausted(c.Request.Context()) + switch action { + case FailoverContinue: + ctx := context.WithValue(c.Request.Context(), ctxkey.SingleAccountRetry, true) + c.Request = c.Request.WithContext(ctx) + continue + case FailoverCanceled: + return + default: // FailoverExhausted + h.handleGeminiFailoverExhausted(c, fs.LastFailoverErr) + return } - h.handleGeminiFailoverExhausted(c, fs.LastFailoverErr) - return } account := selection.Account setOpsSelectedAccount(c, account.ID) @@ -441,7 +438,7 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { if errors.As(err, &failoverErr) { action := fs.HandleFailoverError(c.Request.Context(), h.gatewayService, account.ID, account.Platform, failoverErr) switch action { - case FailoverRetry, FailoverSwitch: + case FailoverContinue: continue case FailoverExhausted: h.handleGeminiFailoverExhausted(c, fs.LastFailoverErr) From 9ecb6211d69162a987bd1805f694a3ac76c54287 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 11 Feb 2026 04:54:11 +0800 Subject: [PATCH 034/175] docs: update CLAUDE.md and .gitignore --- .gitignore | 7 +++++- CLAUDE.md | 67 +++++++++++++++++++++++++++++++++++++++++++----------- 2 files changed, 60 insertions(+), 14 deletions(-) diff --git a/.gitignore b/.gitignore index 2f2bfbdf..527ccd6c 100644 --- a/.gitignore +++ b/.gitignore @@ -130,4 +130,9 @@ deploy/docker-compose.override.yml .gocache/ vite.config.js docs/* -.serena/ \ No newline at end of file +.serena/ + +# =================== +# 压测工具 +# =================== +tools/loadtest/ \ No newline at end of file diff --git a/CLAUDE.md b/CLAUDE.md index ccac56d1..e855e961 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -109,20 +109,42 @@ git push origin main |--------|----------|------| | 构建服务器 | `us-asaki-root` | 拉取代码、`docker build` 构建镜像 | | 生产服务器 | `clicodeplus` | 加载镜像、运行服务、部署验证 | +| 数据库服务器 | `db-clicodeplus` | PostgreSQL 16 + Redis 7,所有环境共用 | + +> 数据库服务器运维手册:`db-clicodeplus:/root/README.md` ### 部署环境说明 -| 环境 | 目录(生产服务器) | 端口 | 数据库 | 容器名 | -|------|------|------|--------|--------| -| 正式 | `/root/sub2api` | 8080 | `sub2api` | `sub2api` | -| Beta | `/root/sub2api-beta` | 8084 | `beta` | `sub2api-beta` | +| 环境 | 目录(生产服务器) | 端口 | 数据库 | Redis DB | 容器名 | +|------|------|------|--------|----------|--------| +| 正式 | `/root/sub2api` | 8080 | `sub2api` | 0 | `sub2api` | +| Beta | `/root/sub2api-beta` | 8084 | `beta` | 2 | `sub2api-beta` | +| OpenAI | `/root/sub2api-openai` | 8083 | `openai` | 3 | `sub2api-openai` | -### 外部数据库 +### 外部数据库与 Redis -正式和 Beta 环境**共用外部 PostgreSQL 数据库**(非容器内数据库),配置在 `.env` 文件中: -- `DATABASE_HOST`:外部数据库地址 -- `DATABASE_SSLMODE`:SSL 模式(通常为 `require`) -- `POSTGRES_USER` / `POSTGRES_DB`:用户名和数据库名 +所有环境(正式、Beta、OpenAI)共用 `db.clicodeplus.com` 上的 **PostgreSQL 16** 和 **Redis 7**,不使用容器内数据库或 Redis。 + +**PostgreSQL**(端口 5432,TLS 加密,scram-sha-256 认证): + +| 环境 | 用户名 | 数据库 | +|------|--------|--------| +| 正式 | `sub2api` | `sub2api` | +| Beta | `beta` | `beta` | +| OpenAI | `openai` | `openai` | + +**Redis**(端口 6379,密码认证): + +| 环境 | DB | +|------|-----| +| 正式 | 0 | +| Beta | 2 | +| OpenAI | 3 | + +**配置方式**: +- 数据库通过 `.env` 中的 `DATABASE_HOST`、`DATABASE_SSLMODE`、`POSTGRES_USER`、`POSTGRES_PASSWORD`、`POSTGRES_DB` 配置 +- Redis 通过 `docker-compose.override.yml` 覆盖 `REDIS_HOST`(因主 compose 文件硬编码为 `redis`),密码通过 `.env` 中的 `REDIS_PASSWORD` 配置 +- 各环境的 `docker-compose.override.yml` 已通过 `depends_on: !reset {}` 和 `redis: profiles: [disabled]` 去掉了对容器 Redis 的依赖 #### 数据库操作命令 @@ -311,14 +333,20 @@ perl -pi -e 's/^SERVER_PORT=.*/SERVER_PORT=8084/' ./.env perl -pi -e 's/^POSTGRES_USER=.*/POSTGRES_USER=beta/' ./.env perl -pi -e 's/^POSTGRES_DB=.*/POSTGRES_DB=beta/' ./.env -# 5) 写 compose override(避免与现网容器名冲突,镜像使用构建服务器传输的 sub2api:beta) +# 5) 写 compose override(避免与现网容器名冲突,镜像使用构建服务器传输的 sub2api:beta,Redis 使用外部服务) cat > docker-compose.override.yml <<'YAML' services: sub2api: image: sub2api:beta container_name: sub2api-beta + environment: + - DATABASE_HOST=${DATABASE_HOST:-postgres} + - DATABASE_SSLMODE=${DATABASE_SSLMODE:-disable} + - REDIS_HOST=db.clicodeplus.com + depends_on: !reset {} redis: - container_name: sub2api-beta-redis + profiles: + - disabled YAML # 6) 启动 beta(独立 project,确保不影响现网) @@ -332,10 +360,11 @@ docker logs sub2api-beta --tail 50 ### 数据库配置约定(beta) -- 数据库地址/SSL/密码:与现网一致(从现网 `.env` 复制即可)。 +- 数据库地址/SSL/密码:与现网一致(从现网 `.env` 复制即可),均指向 `db.clicodeplus.com`。 - 仅修改: - `POSTGRES_USER=beta` - `POSTGRES_DB=beta` + - `REDIS_DB=2` 注意:需要数据库侧已存在 `beta` 用户与 `beta` 数据库,并授予权限;否则容器会启动失败并不断重启。 @@ -415,7 +444,19 @@ git checkout -B release/custom-0.1.69 fork/release/custom-0.1.69 # 配置环境变量 cd deploy cp .env.example .env -vim .env # 配置 DATABASE_URL, REDIS_URL, JWT_SECRET 等 +vim .env # 配置 DATABASE_HOST=db.clicodeplus.com, POSTGRES_PASSWORD, REDIS_PASSWORD, JWT_SECRET 等 + +# 创建 override 文件(Redis 指向外部服务,去掉容器 Redis 依赖) +cat > docker-compose.override.yml <<'YAML' +services: + sub2api: + environment: + - REDIS_HOST=db.clicodeplus.com + depends_on: !reset {} + redis: + profiles: + - disabled +YAML ``` ### 5. 生产服务器:更新镜像标签并启动服务 From b368bb6ea15ba3298d835d3d7c57c519d34bcb3f Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 11 Feb 2026 04:55:17 +0800 Subject: [PATCH 035/175] chore: bump version to 0.1.79.4 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index a281565e..7180e218 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.79.3 +0.1.79.4 From 130112a84a69ec62576d84e2825eab0b1010f853 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 11 Feb 2026 12:25:04 +0800 Subject: [PATCH 036/175] =?UTF-8?q?fix:=20=E8=A1=A5=E9=BD=90=20Antigravity?= =?UTF-8?q?=20OAuth=20=E8=B4=A6=E5=8F=B7=20project=5Fid=20=E8=8E=B7?= =?UTF-8?q?=E5=8F=96=E9=80=BB=E8=BE=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 部分账号 loadCodeAssist 不会立即返回 cloudaicompanionProject, 导致转发时 project 字段为空,上游返回 400 "Invalid project resource name projects/"。 - 新增 OnboardUser API:当 loadCodeAssist 未返回 project_id 时, 通过 onboardUser 完成账号初始化并获取 project_id - token 刷新时增加 onboard 兜底逻辑 - GetAccessToken 按需补齐:转发时发现 project_id 为空立即触发刷新 - 新增 resolveDefaultTierID 单元测试 --- .gitignore | 4 +- antigravity_projectid_fix.patch | 323 ++++++++++++++++++ backend/internal/pkg/antigravity/client.go | 124 +++++++ .../service/antigravity_oauth_service.go | 57 +++- .../service/antigravity_oauth_service_test.go | 64 ++++ .../service/antigravity_token_provider.go | 20 ++ 6 files changed, 590 insertions(+), 2 deletions(-) create mode 100644 antigravity_projectid_fix.patch create mode 100644 backend/internal/service/antigravity_oauth_service_test.go diff --git a/.gitignore b/.gitignore index 527ccd6c..4ee1894a 100644 --- a/.gitignore +++ b/.gitignore @@ -135,4 +135,6 @@ docs/* # =================== # 压测工具 # =================== -tools/loadtest/ \ No newline at end of file +tools/loadtest/ +# Antigravity Manager +Antigravity-Manager/ diff --git a/antigravity_projectid_fix.patch b/antigravity_projectid_fix.patch new file mode 100644 index 00000000..96ecf443 --- /dev/null +++ b/antigravity_projectid_fix.patch @@ -0,0 +1,323 @@ +diff --git a/backend/internal/pkg/antigravity/client.go b/backend/internal/pkg/antigravity/client.go +index a6279b11..3556da88 100644 +--- a/backend/internal/pkg/antigravity/client.go ++++ b/backend/internal/pkg/antigravity/client.go +@@ -115,6 +115,23 @@ type LoadCodeAssistResponse struct { + IneligibleTiers []*IneligibleTier `json:"ineligibleTiers,omitempty"` + } + ++// OnboardUserRequest onboardUser 请求 ++type OnboardUserRequest struct { ++ TierID string `json:"tierId"` ++ Metadata struct { ++ IDEType string `json:"ideType"` ++ Platform string `json:"platform,omitempty"` ++ PluginType string `json:"pluginType,omitempty"` ++ } `json:"metadata"` ++} ++ ++// OnboardUserResponse onboardUser 响应 ++type OnboardUserResponse struct { ++ Name string `json:"name,omitempty"` ++ Done bool `json:"done,omitempty"` ++ Response map[string]any `json:"response,omitempty"` ++} ++ + // GetTier 获取账户类型 + // 优先返回 paidTier(付费订阅级别),否则返回 currentTier + func (r *LoadCodeAssistResponse) GetTier() string { +@@ -361,6 +378,113 @@ func (c *Client) LoadCodeAssist(ctx context.Context, accessToken string) (*LoadC + return nil, nil, lastErr + } + ++// OnboardUser 触发账号 onboarding,并返回 project_id ++// 说明: ++// 1) 部分账号 loadCodeAssist 不会立即返回 cloudaicompanionProject; ++// 2) 这时需要调用 onboardUser 完成初始化,之后才能拿到 project_id。 ++func (c *Client) OnboardUser(ctx context.Context, accessToken, tierID string) (string, error) { ++ tierID = strings.TrimSpace(tierID) ++ if tierID == "" { ++ return "", fmt.Errorf("tier_id 为空") ++ } ++ ++ reqBody := OnboardUserRequest{TierID: tierID} ++ reqBody.Metadata.IDEType = "ANTIGRAVITY" ++ reqBody.Metadata.Platform = "PLATFORM_UNSPECIFIED" ++ reqBody.Metadata.PluginType = "GEMINI" ++ ++ bodyBytes, err := json.Marshal(reqBody) ++ if err != nil { ++ return "", fmt.Errorf("序列化请求失败: %w", err) ++ } ++ ++ availableURLs := BaseURLs ++ var lastErr error ++ ++ for urlIdx, baseURL := range availableURLs { ++ apiURL := baseURL + "/v1internal:onboardUser" ++ ++ for attempt := 1; attempt <= 5; attempt++ { ++ req, err := http.NewRequestWithContext(ctx, http.MethodPost, apiURL, strings.NewReader(string(bodyBytes))) ++ if err != nil { ++ lastErr = fmt.Errorf("创建请求失败: %w", err) ++ break ++ } ++ req.Header.Set("Authorization", "Bearer "+accessToken) ++ req.Header.Set("Content-Type", "application/json") ++ req.Header.Set("User-Agent", UserAgent) ++ ++ resp, err := c.httpClient.Do(req) ++ if err != nil { ++ lastErr = fmt.Errorf("onboardUser 请求失败: %w", err) ++ if shouldFallbackToNextURL(err, 0) && urlIdx < len(availableURLs)-1 { ++ log.Printf("[antigravity] onboardUser URL fallback: %s -> %s", baseURL, availableURLs[urlIdx+1]) ++ break ++ } ++ return "", lastErr ++ } ++ ++ respBodyBytes, err := io.ReadAll(resp.Body) ++ _ = resp.Body.Close() ++ if err != nil { ++ return "", fmt.Errorf("读取响应失败: %w", err) ++ } ++ ++ if shouldFallbackToNextURL(nil, resp.StatusCode) && urlIdx < len(availableURLs)-1 { ++ log.Printf("[antigravity] onboardUser URL fallback (HTTP %d): %s -> %s", resp.StatusCode, baseURL, availableURLs[urlIdx+1]) ++ break ++ } ++ ++ if resp.StatusCode != http.StatusOK { ++ lastErr = fmt.Errorf("onboardUser 失败 (HTTP %d): %s", resp.StatusCode, string(respBodyBytes)) ++ return "", lastErr ++ } ++ ++ var onboardResp OnboardUserResponse ++ if err := json.Unmarshal(respBodyBytes, &onboardResp); err != nil { ++ lastErr = fmt.Errorf("onboardUser 响应解析失败: %w", err) ++ return "", lastErr ++ } ++ ++ if onboardResp.Done { ++ if projectID := extractProjectIDFromOnboardResponse(onboardResp.Response); projectID != "" { ++ DefaultURLAvailability.MarkSuccess(baseURL) ++ return projectID, nil ++ } ++ lastErr = fmt.Errorf("onboardUser 完成但未返回 project_id") ++ return "", lastErr ++ } ++ ++ // done=false 时等待后重试(与 CLIProxyAPI 行为一致) ++ time.Sleep(2 * time.Second) ++ } ++ } ++ ++ if lastErr != nil { ++ return "", lastErr ++ } ++ return "", fmt.Errorf("onboardUser 未返回 project_id") ++} ++ ++func extractProjectIDFromOnboardResponse(resp map[string]any) string { ++ if len(resp) == 0 { ++ return "" ++ } ++ ++ if v, ok := resp["cloudaicompanionProject"]; ok { ++ switch project := v.(type) { ++ case string: ++ return strings.TrimSpace(project) ++ case map[string]any: ++ if id, ok := project["id"].(string); ok { ++ return strings.TrimSpace(id) ++ } ++ } ++ } ++ ++ return "" ++} ++ + // ModelQuotaInfo 模型配额信息 + type ModelQuotaInfo struct { + RemainingFraction float64 `json:"remainingFraction"` +diff --git a/backend/internal/service/antigravity_oauth_service.go b/backend/internal/service/antigravity_oauth_service.go +index fa8379ed..86b7cc2e 100644 +--- a/backend/internal/service/antigravity_oauth_service.go ++++ b/backend/internal/service/antigravity_oauth_service.go +@@ -273,12 +273,20 @@ func (s *AntigravityOAuthService) loadProjectIDWithRetry(ctx context.Context, ac + } + + client := antigravity.NewClient(proxyURL) +- loadResp, _, err := client.LoadCodeAssist(ctx, accessToken) ++ loadResp, loadRaw, err := client.LoadCodeAssist(ctx, accessToken) + + if err == nil && loadResp != nil && loadResp.CloudAICompanionProject != "" { + return loadResp.CloudAICompanionProject, nil + } + ++ if err == nil { ++ if projectID, onboardErr := tryOnboardProjectID(ctx, client, accessToken, loadRaw); onboardErr == nil && projectID != "" { ++ return projectID, nil ++ } else if onboardErr != nil { ++ lastErr = onboardErr ++ } ++ } ++ + // 记录错误 + if err != nil { + lastErr = err +@@ -292,6 +300,53 @@ func (s *AntigravityOAuthService) loadProjectIDWithRetry(ctx context.Context, ac + return "", fmt.Errorf("获取 project_id 失败 (重试 %d 次后): %w", maxRetries, lastErr) + } + ++func tryOnboardProjectID(ctx context.Context, client *antigravity.Client, accessToken string, loadRaw map[string]any) (string, error) { ++ tierID := resolveDefaultTierID(loadRaw) ++ if tierID == "" { ++ return "", fmt.Errorf("loadCodeAssist 未返回可用的默认 tier") ++ } ++ ++ projectID, err := client.OnboardUser(ctx, accessToken, tierID) ++ if err != nil { ++ return "", fmt.Errorf("onboardUser 失败 (tier=%s): %w", tierID, err) ++ } ++ return projectID, nil ++} ++ ++func resolveDefaultTierID(loadRaw map[string]any) string { ++ if len(loadRaw) == 0 { ++ return "" ++ } ++ ++ rawTiers, ok := loadRaw["allowedTiers"] ++ if !ok { ++ return "" ++ } ++ ++ tiers, ok := rawTiers.([]any) ++ if !ok { ++ return "" ++ } ++ ++ for _, rawTier := range tiers { ++ tier, ok := rawTier.(map[string]any) ++ if !ok { ++ continue ++ } ++ if isDefault, _ := tier["isDefault"].(bool); !isDefault { ++ continue ++ } ++ if id, ok := tier["id"].(string); ok { ++ id = strings.TrimSpace(id) ++ if id != "" { ++ return id ++ } ++ } ++ } ++ ++ return "" ++} ++ + // BuildAccountCredentials 构建账户凭证 + func (s *AntigravityOAuthService) BuildAccountCredentials(tokenInfo *AntigravityTokenInfo) map[string]any { + creds := map[string]any{ +diff --git a/backend/internal/service/antigravity_token_provider.go b/backend/internal/service/antigravity_token_provider.go +index 94eca94d..dde3bb07 100644 +--- a/backend/internal/service/antigravity_token_provider.go ++++ b/backend/internal/service/antigravity_token_provider.go +@@ -102,6 +102,26 @@ func (p *AntigravityTokenProvider) GetAccessToken(ctx context.Context, account * + return "", errors.New("access_token not found in credentials") + } + ++ // 如果账号还没有 project_id,优先尝试在线补齐,避免请求 daily/sandbox 时出现 ++ // "Invalid project resource name projects/"。 ++ if strings.TrimSpace(account.GetCredential("project_id")) == "" && p.antigravityOAuthService != nil { ++ if tokenInfo, err := p.antigravityOAuthService.RefreshAccountToken(ctx, account); err == nil { ++ newCredentials := p.antigravityOAuthService.BuildAccountCredentials(tokenInfo) ++ for k, v := range account.Credentials { ++ if _, exists := newCredentials[k]; !exists { ++ newCredentials[k] = v ++ } ++ } ++ account.Credentials = newCredentials ++ if updateErr := p.accountRepo.Update(ctx, account); updateErr != nil { ++ log.Printf("[AntigravityTokenProvider] Failed to persist project_id补齐: %v", updateErr) ++ } ++ if refreshed := strings.TrimSpace(account.GetCredential("access_token")); refreshed != "" { ++ accessToken = refreshed ++ } ++ } ++ } ++ + // 3. 存入缓存(验证版本后再写入,避免异步刷新任务与请求线程的竞态条件) + if p.tokenCache != nil { + latestAccount, isStale := CheckTokenVersion(ctx, account, p.accountRepo) +diff --git a/backend/internal/service/antigravity_oauth_service_test.go b/backend/internal/service/antigravity_oauth_service_test.go +new file mode 100644 +index 00000000..e041c2b4 +--- /dev/null ++++ b/backend/internal/service/antigravity_oauth_service_test.go +@@ -0,0 +1,64 @@ ++package service ++ ++import ( ++ "testing" ++) ++ ++func TestResolveDefaultTierID(t *testing.T) { ++ t.Parallel() ++ ++ tests := []struct { ++ name string ++ loadRaw map[string]any ++ want string ++ }{ ++ { ++ name: "missing allowedTiers", ++ loadRaw: map[string]any{ ++ "paidTier": map[string]any{"id": "g1-pro-tier"}, ++ }, ++ want: "", ++ }, ++ { ++ name: "allowedTiers but no default", ++ loadRaw: map[string]any{ ++ "allowedTiers": []any{ ++ map[string]any{"id": "free-tier", "isDefault": false}, ++ map[string]any{"id": "standard-tier", "isDefault": false}, ++ }, ++ }, ++ want: "", ++ }, ++ { ++ name: "default tier found", ++ loadRaw: map[string]any{ ++ "allowedTiers": []any{ ++ map[string]any{"id": "free-tier", "isDefault": true}, ++ map[string]any{"id": "standard-tier", "isDefault": false}, ++ }, ++ }, ++ want: "free-tier", ++ }, ++ { ++ name: "default tier id with spaces", ++ loadRaw: map[string]any{ ++ "allowedTiers": []any{ ++ map[string]any{"id": " standard-tier ", "isDefault": true}, ++ }, ++ }, ++ want: "standard-tier", ++ }, ++ } ++ ++ for _, tc := range tests { ++ tc := tc ++ t.Run(tc.name, func(t *testing.T) { ++ t.Parallel() ++ ++ got := resolveDefaultTierID(tc.loadRaw) ++ if got != tc.want { ++ t.Fatalf("resolveDefaultTierID() = %q, want %q", got, tc.want) ++ } ++ }) ++ } ++} diff --git a/backend/internal/pkg/antigravity/client.go b/backend/internal/pkg/antigravity/client.go index a6279b11..3556da88 100644 --- a/backend/internal/pkg/antigravity/client.go +++ b/backend/internal/pkg/antigravity/client.go @@ -115,6 +115,23 @@ type LoadCodeAssistResponse struct { IneligibleTiers []*IneligibleTier `json:"ineligibleTiers,omitempty"` } +// OnboardUserRequest onboardUser 请求 +type OnboardUserRequest struct { + TierID string `json:"tierId"` + Metadata struct { + IDEType string `json:"ideType"` + Platform string `json:"platform,omitempty"` + PluginType string `json:"pluginType,omitempty"` + } `json:"metadata"` +} + +// OnboardUserResponse onboardUser 响应 +type OnboardUserResponse struct { + Name string `json:"name,omitempty"` + Done bool `json:"done,omitempty"` + Response map[string]any `json:"response,omitempty"` +} + // GetTier 获取账户类型 // 优先返回 paidTier(付费订阅级别),否则返回 currentTier func (r *LoadCodeAssistResponse) GetTier() string { @@ -361,6 +378,113 @@ func (c *Client) LoadCodeAssist(ctx context.Context, accessToken string) (*LoadC return nil, nil, lastErr } +// OnboardUser 触发账号 onboarding,并返回 project_id +// 说明: +// 1) 部分账号 loadCodeAssist 不会立即返回 cloudaicompanionProject; +// 2) 这时需要调用 onboardUser 完成初始化,之后才能拿到 project_id。 +func (c *Client) OnboardUser(ctx context.Context, accessToken, tierID string) (string, error) { + tierID = strings.TrimSpace(tierID) + if tierID == "" { + return "", fmt.Errorf("tier_id 为空") + } + + reqBody := OnboardUserRequest{TierID: tierID} + reqBody.Metadata.IDEType = "ANTIGRAVITY" + reqBody.Metadata.Platform = "PLATFORM_UNSPECIFIED" + reqBody.Metadata.PluginType = "GEMINI" + + bodyBytes, err := json.Marshal(reqBody) + if err != nil { + return "", fmt.Errorf("序列化请求失败: %w", err) + } + + availableURLs := BaseURLs + var lastErr error + + for urlIdx, baseURL := range availableURLs { + apiURL := baseURL + "/v1internal:onboardUser" + + for attempt := 1; attempt <= 5; attempt++ { + req, err := http.NewRequestWithContext(ctx, http.MethodPost, apiURL, strings.NewReader(string(bodyBytes))) + if err != nil { + lastErr = fmt.Errorf("创建请求失败: %w", err) + break + } + req.Header.Set("Authorization", "Bearer "+accessToken) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("User-Agent", UserAgent) + + resp, err := c.httpClient.Do(req) + if err != nil { + lastErr = fmt.Errorf("onboardUser 请求失败: %w", err) + if shouldFallbackToNextURL(err, 0) && urlIdx < len(availableURLs)-1 { + log.Printf("[antigravity] onboardUser URL fallback: %s -> %s", baseURL, availableURLs[urlIdx+1]) + break + } + return "", lastErr + } + + respBodyBytes, err := io.ReadAll(resp.Body) + _ = resp.Body.Close() + if err != nil { + return "", fmt.Errorf("读取响应失败: %w", err) + } + + if shouldFallbackToNextURL(nil, resp.StatusCode) && urlIdx < len(availableURLs)-1 { + log.Printf("[antigravity] onboardUser URL fallback (HTTP %d): %s -> %s", resp.StatusCode, baseURL, availableURLs[urlIdx+1]) + break + } + + if resp.StatusCode != http.StatusOK { + lastErr = fmt.Errorf("onboardUser 失败 (HTTP %d): %s", resp.StatusCode, string(respBodyBytes)) + return "", lastErr + } + + var onboardResp OnboardUserResponse + if err := json.Unmarshal(respBodyBytes, &onboardResp); err != nil { + lastErr = fmt.Errorf("onboardUser 响应解析失败: %w", err) + return "", lastErr + } + + if onboardResp.Done { + if projectID := extractProjectIDFromOnboardResponse(onboardResp.Response); projectID != "" { + DefaultURLAvailability.MarkSuccess(baseURL) + return projectID, nil + } + lastErr = fmt.Errorf("onboardUser 完成但未返回 project_id") + return "", lastErr + } + + // done=false 时等待后重试(与 CLIProxyAPI 行为一致) + time.Sleep(2 * time.Second) + } + } + + if lastErr != nil { + return "", lastErr + } + return "", fmt.Errorf("onboardUser 未返回 project_id") +} + +func extractProjectIDFromOnboardResponse(resp map[string]any) string { + if len(resp) == 0 { + return "" + } + + if v, ok := resp["cloudaicompanionProject"]; ok { + switch project := v.(type) { + case string: + return strings.TrimSpace(project) + case map[string]any: + if id, ok := project["id"].(string); ok { + return strings.TrimSpace(id) + } + } + } + + return "" +} + // ModelQuotaInfo 模型配额信息 type ModelQuotaInfo struct { RemainingFraction float64 `json:"remainingFraction"` diff --git a/backend/internal/service/antigravity_oauth_service.go b/backend/internal/service/antigravity_oauth_service.go index fa8379ed..86b7cc2e 100644 --- a/backend/internal/service/antigravity_oauth_service.go +++ b/backend/internal/service/antigravity_oauth_service.go @@ -273,12 +273,20 @@ func (s *AntigravityOAuthService) loadProjectIDWithRetry(ctx context.Context, ac } client := antigravity.NewClient(proxyURL) - loadResp, _, err := client.LoadCodeAssist(ctx, accessToken) + loadResp, loadRaw, err := client.LoadCodeAssist(ctx, accessToken) if err == nil && loadResp != nil && loadResp.CloudAICompanionProject != "" { return loadResp.CloudAICompanionProject, nil } + if err == nil { + if projectID, onboardErr := tryOnboardProjectID(ctx, client, accessToken, loadRaw); onboardErr == nil && projectID != "" { + return projectID, nil + } else if onboardErr != nil { + lastErr = onboardErr + } + } + // 记录错误 if err != nil { lastErr = err @@ -292,6 +300,53 @@ func (s *AntigravityOAuthService) loadProjectIDWithRetry(ctx context.Context, ac return "", fmt.Errorf("获取 project_id 失败 (重试 %d 次后): %w", maxRetries, lastErr) } +func tryOnboardProjectID(ctx context.Context, client *antigravity.Client, accessToken string, loadRaw map[string]any) (string, error) { + tierID := resolveDefaultTierID(loadRaw) + if tierID == "" { + return "", fmt.Errorf("loadCodeAssist 未返回可用的默认 tier") + } + + projectID, err := client.OnboardUser(ctx, accessToken, tierID) + if err != nil { + return "", fmt.Errorf("onboardUser 失败 (tier=%s): %w", tierID, err) + } + return projectID, nil +} + +func resolveDefaultTierID(loadRaw map[string]any) string { + if len(loadRaw) == 0 { + return "" + } + + rawTiers, ok := loadRaw["allowedTiers"] + if !ok { + return "" + } + + tiers, ok := rawTiers.([]any) + if !ok { + return "" + } + + for _, rawTier := range tiers { + tier, ok := rawTier.(map[string]any) + if !ok { + continue + } + if isDefault, _ := tier["isDefault"].(bool); !isDefault { + continue + } + if id, ok := tier["id"].(string); ok { + id = strings.TrimSpace(id) + if id != "" { + return id + } + } + } + + return "" +} + // BuildAccountCredentials 构建账户凭证 func (s *AntigravityOAuthService) BuildAccountCredentials(tokenInfo *AntigravityTokenInfo) map[string]any { creds := map[string]any{ diff --git a/backend/internal/service/antigravity_oauth_service_test.go b/backend/internal/service/antigravity_oauth_service_test.go new file mode 100644 index 00000000..e041c2b4 --- /dev/null +++ b/backend/internal/service/antigravity_oauth_service_test.go @@ -0,0 +1,64 @@ +package service + +import ( + "testing" +) + +func TestResolveDefaultTierID(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + loadRaw map[string]any + want string + }{ + { + name: "missing allowedTiers", + loadRaw: map[string]any{ + "paidTier": map[string]any{"id": "g1-pro-tier"}, + }, + want: "", + }, + { + name: "allowedTiers but no default", + loadRaw: map[string]any{ + "allowedTiers": []any{ + map[string]any{"id": "free-tier", "isDefault": false}, + map[string]any{"id": "standard-tier", "isDefault": false}, + }, + }, + want: "", + }, + { + name: "default tier found", + loadRaw: map[string]any{ + "allowedTiers": []any{ + map[string]any{"id": "free-tier", "isDefault": true}, + map[string]any{"id": "standard-tier", "isDefault": false}, + }, + }, + want: "free-tier", + }, + { + name: "default tier id with spaces", + loadRaw: map[string]any{ + "allowedTiers": []any{ + map[string]any{"id": " standard-tier ", "isDefault": true}, + }, + }, + want: "standard-tier", + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + got := resolveDefaultTierID(tc.loadRaw) + if got != tc.want { + t.Fatalf("resolveDefaultTierID() = %q, want %q", got, tc.want) + } + }) + } +} diff --git a/backend/internal/service/antigravity_token_provider.go b/backend/internal/service/antigravity_token_provider.go index 1eb740f9..e0ada9f1 100644 --- a/backend/internal/service/antigravity_token_provider.go +++ b/backend/internal/service/antigravity_token_provider.go @@ -113,6 +113,26 @@ func (p *AntigravityTokenProvider) GetAccessToken(ctx context.Context, account * return "", errors.New("access_token not found in credentials") } + // 如果账号还没有 project_id,优先尝试在线补齐,避免请求 daily/sandbox 时出现 + // "Invalid project resource name projects/"。 + if strings.TrimSpace(account.GetCredential("project_id")) == "" && p.antigravityOAuthService != nil { + if tokenInfo, err := p.antigravityOAuthService.RefreshAccountToken(ctx, account); err == nil { + newCredentials := p.antigravityOAuthService.BuildAccountCredentials(tokenInfo) + for k, v := range account.Credentials { + if _, exists := newCredentials[k]; !exists { + newCredentials[k] = v + } + } + account.Credentials = newCredentials + if updateErr := p.accountRepo.Update(ctx, account); updateErr != nil { + log.Printf("[AntigravityTokenProvider] Failed to persist project_id补齐: %v", updateErr) + } + if refreshed := strings.TrimSpace(account.GetCredential("access_token")); refreshed != "" { + accessToken = refreshed + } + } + } + // 3. 存入缓存(验证版本后再写入,避免异步刷新任务与请求线程的竞态条件) if p.tokenCache != nil { latestAccount, isStale := CheckTokenVersion(ctx, account, p.accountRepo) From 90e4328885e8758d1f96c67a6504cb2317c9ba50 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 11 Feb 2026 12:25:15 +0800 Subject: [PATCH 037/175] chore: remove patch file from tracking --- .gitignore | 1 + antigravity_projectid_fix.patch | 323 -------------------------------- 2 files changed, 1 insertion(+), 323 deletions(-) delete mode 100644 antigravity_projectid_fix.patch diff --git a/.gitignore b/.gitignore index 4ee1894a..925912fa 100644 --- a/.gitignore +++ b/.gitignore @@ -138,3 +138,4 @@ docs/* tools/loadtest/ # Antigravity Manager Antigravity-Manager/ +antigravity_projectid_fix.patch diff --git a/antigravity_projectid_fix.patch b/antigravity_projectid_fix.patch deleted file mode 100644 index 96ecf443..00000000 --- a/antigravity_projectid_fix.patch +++ /dev/null @@ -1,323 +0,0 @@ -diff --git a/backend/internal/pkg/antigravity/client.go b/backend/internal/pkg/antigravity/client.go -index a6279b11..3556da88 100644 ---- a/backend/internal/pkg/antigravity/client.go -+++ b/backend/internal/pkg/antigravity/client.go -@@ -115,6 +115,23 @@ type LoadCodeAssistResponse struct { - IneligibleTiers []*IneligibleTier `json:"ineligibleTiers,omitempty"` - } - -+// OnboardUserRequest onboardUser 请求 -+type OnboardUserRequest struct { -+ TierID string `json:"tierId"` -+ Metadata struct { -+ IDEType string `json:"ideType"` -+ Platform string `json:"platform,omitempty"` -+ PluginType string `json:"pluginType,omitempty"` -+ } `json:"metadata"` -+} -+ -+// OnboardUserResponse onboardUser 响应 -+type OnboardUserResponse struct { -+ Name string `json:"name,omitempty"` -+ Done bool `json:"done,omitempty"` -+ Response map[string]any `json:"response,omitempty"` -+} -+ - // GetTier 获取账户类型 - // 优先返回 paidTier(付费订阅级别),否则返回 currentTier - func (r *LoadCodeAssistResponse) GetTier() string { -@@ -361,6 +378,113 @@ func (c *Client) LoadCodeAssist(ctx context.Context, accessToken string) (*LoadC - return nil, nil, lastErr - } - -+// OnboardUser 触发账号 onboarding,并返回 project_id -+// 说明: -+// 1) 部分账号 loadCodeAssist 不会立即返回 cloudaicompanionProject; -+// 2) 这时需要调用 onboardUser 完成初始化,之后才能拿到 project_id。 -+func (c *Client) OnboardUser(ctx context.Context, accessToken, tierID string) (string, error) { -+ tierID = strings.TrimSpace(tierID) -+ if tierID == "" { -+ return "", fmt.Errorf("tier_id 为空") -+ } -+ -+ reqBody := OnboardUserRequest{TierID: tierID} -+ reqBody.Metadata.IDEType = "ANTIGRAVITY" -+ reqBody.Metadata.Platform = "PLATFORM_UNSPECIFIED" -+ reqBody.Metadata.PluginType = "GEMINI" -+ -+ bodyBytes, err := json.Marshal(reqBody) -+ if err != nil { -+ return "", fmt.Errorf("序列化请求失败: %w", err) -+ } -+ -+ availableURLs := BaseURLs -+ var lastErr error -+ -+ for urlIdx, baseURL := range availableURLs { -+ apiURL := baseURL + "/v1internal:onboardUser" -+ -+ for attempt := 1; attempt <= 5; attempt++ { -+ req, err := http.NewRequestWithContext(ctx, http.MethodPost, apiURL, strings.NewReader(string(bodyBytes))) -+ if err != nil { -+ lastErr = fmt.Errorf("创建请求失败: %w", err) -+ break -+ } -+ req.Header.Set("Authorization", "Bearer "+accessToken) -+ req.Header.Set("Content-Type", "application/json") -+ req.Header.Set("User-Agent", UserAgent) -+ -+ resp, err := c.httpClient.Do(req) -+ if err != nil { -+ lastErr = fmt.Errorf("onboardUser 请求失败: %w", err) -+ if shouldFallbackToNextURL(err, 0) && urlIdx < len(availableURLs)-1 { -+ log.Printf("[antigravity] onboardUser URL fallback: %s -> %s", baseURL, availableURLs[urlIdx+1]) -+ break -+ } -+ return "", lastErr -+ } -+ -+ respBodyBytes, err := io.ReadAll(resp.Body) -+ _ = resp.Body.Close() -+ if err != nil { -+ return "", fmt.Errorf("读取响应失败: %w", err) -+ } -+ -+ if shouldFallbackToNextURL(nil, resp.StatusCode) && urlIdx < len(availableURLs)-1 { -+ log.Printf("[antigravity] onboardUser URL fallback (HTTP %d): %s -> %s", resp.StatusCode, baseURL, availableURLs[urlIdx+1]) -+ break -+ } -+ -+ if resp.StatusCode != http.StatusOK { -+ lastErr = fmt.Errorf("onboardUser 失败 (HTTP %d): %s", resp.StatusCode, string(respBodyBytes)) -+ return "", lastErr -+ } -+ -+ var onboardResp OnboardUserResponse -+ if err := json.Unmarshal(respBodyBytes, &onboardResp); err != nil { -+ lastErr = fmt.Errorf("onboardUser 响应解析失败: %w", err) -+ return "", lastErr -+ } -+ -+ if onboardResp.Done { -+ if projectID := extractProjectIDFromOnboardResponse(onboardResp.Response); projectID != "" { -+ DefaultURLAvailability.MarkSuccess(baseURL) -+ return projectID, nil -+ } -+ lastErr = fmt.Errorf("onboardUser 完成但未返回 project_id") -+ return "", lastErr -+ } -+ -+ // done=false 时等待后重试(与 CLIProxyAPI 行为一致) -+ time.Sleep(2 * time.Second) -+ } -+ } -+ -+ if lastErr != nil { -+ return "", lastErr -+ } -+ return "", fmt.Errorf("onboardUser 未返回 project_id") -+} -+ -+func extractProjectIDFromOnboardResponse(resp map[string]any) string { -+ if len(resp) == 0 { -+ return "" -+ } -+ -+ if v, ok := resp["cloudaicompanionProject"]; ok { -+ switch project := v.(type) { -+ case string: -+ return strings.TrimSpace(project) -+ case map[string]any: -+ if id, ok := project["id"].(string); ok { -+ return strings.TrimSpace(id) -+ } -+ } -+ } -+ -+ return "" -+} -+ - // ModelQuotaInfo 模型配额信息 - type ModelQuotaInfo struct { - RemainingFraction float64 `json:"remainingFraction"` -diff --git a/backend/internal/service/antigravity_oauth_service.go b/backend/internal/service/antigravity_oauth_service.go -index fa8379ed..86b7cc2e 100644 ---- a/backend/internal/service/antigravity_oauth_service.go -+++ b/backend/internal/service/antigravity_oauth_service.go -@@ -273,12 +273,20 @@ func (s *AntigravityOAuthService) loadProjectIDWithRetry(ctx context.Context, ac - } - - client := antigravity.NewClient(proxyURL) -- loadResp, _, err := client.LoadCodeAssist(ctx, accessToken) -+ loadResp, loadRaw, err := client.LoadCodeAssist(ctx, accessToken) - - if err == nil && loadResp != nil && loadResp.CloudAICompanionProject != "" { - return loadResp.CloudAICompanionProject, nil - } - -+ if err == nil { -+ if projectID, onboardErr := tryOnboardProjectID(ctx, client, accessToken, loadRaw); onboardErr == nil && projectID != "" { -+ return projectID, nil -+ } else if onboardErr != nil { -+ lastErr = onboardErr -+ } -+ } -+ - // 记录错误 - if err != nil { - lastErr = err -@@ -292,6 +300,53 @@ func (s *AntigravityOAuthService) loadProjectIDWithRetry(ctx context.Context, ac - return "", fmt.Errorf("获取 project_id 失败 (重试 %d 次后): %w", maxRetries, lastErr) - } - -+func tryOnboardProjectID(ctx context.Context, client *antigravity.Client, accessToken string, loadRaw map[string]any) (string, error) { -+ tierID := resolveDefaultTierID(loadRaw) -+ if tierID == "" { -+ return "", fmt.Errorf("loadCodeAssist 未返回可用的默认 tier") -+ } -+ -+ projectID, err := client.OnboardUser(ctx, accessToken, tierID) -+ if err != nil { -+ return "", fmt.Errorf("onboardUser 失败 (tier=%s): %w", tierID, err) -+ } -+ return projectID, nil -+} -+ -+func resolveDefaultTierID(loadRaw map[string]any) string { -+ if len(loadRaw) == 0 { -+ return "" -+ } -+ -+ rawTiers, ok := loadRaw["allowedTiers"] -+ if !ok { -+ return "" -+ } -+ -+ tiers, ok := rawTiers.([]any) -+ if !ok { -+ return "" -+ } -+ -+ for _, rawTier := range tiers { -+ tier, ok := rawTier.(map[string]any) -+ if !ok { -+ continue -+ } -+ if isDefault, _ := tier["isDefault"].(bool); !isDefault { -+ continue -+ } -+ if id, ok := tier["id"].(string); ok { -+ id = strings.TrimSpace(id) -+ if id != "" { -+ return id -+ } -+ } -+ } -+ -+ return "" -+} -+ - // BuildAccountCredentials 构建账户凭证 - func (s *AntigravityOAuthService) BuildAccountCredentials(tokenInfo *AntigravityTokenInfo) map[string]any { - creds := map[string]any{ -diff --git a/backend/internal/service/antigravity_token_provider.go b/backend/internal/service/antigravity_token_provider.go -index 94eca94d..dde3bb07 100644 ---- a/backend/internal/service/antigravity_token_provider.go -+++ b/backend/internal/service/antigravity_token_provider.go -@@ -102,6 +102,26 @@ func (p *AntigravityTokenProvider) GetAccessToken(ctx context.Context, account * - return "", errors.New("access_token not found in credentials") - } - -+ // 如果账号还没有 project_id,优先尝试在线补齐,避免请求 daily/sandbox 时出现 -+ // "Invalid project resource name projects/"。 -+ if strings.TrimSpace(account.GetCredential("project_id")) == "" && p.antigravityOAuthService != nil { -+ if tokenInfo, err := p.antigravityOAuthService.RefreshAccountToken(ctx, account); err == nil { -+ newCredentials := p.antigravityOAuthService.BuildAccountCredentials(tokenInfo) -+ for k, v := range account.Credentials { -+ if _, exists := newCredentials[k]; !exists { -+ newCredentials[k] = v -+ } -+ } -+ account.Credentials = newCredentials -+ if updateErr := p.accountRepo.Update(ctx, account); updateErr != nil { -+ log.Printf("[AntigravityTokenProvider] Failed to persist project_id补齐: %v", updateErr) -+ } -+ if refreshed := strings.TrimSpace(account.GetCredential("access_token")); refreshed != "" { -+ accessToken = refreshed -+ } -+ } -+ } -+ - // 3. 存入缓存(验证版本后再写入,避免异步刷新任务与请求线程的竞态条件) - if p.tokenCache != nil { - latestAccount, isStale := CheckTokenVersion(ctx, account, p.accountRepo) -diff --git a/backend/internal/service/antigravity_oauth_service_test.go b/backend/internal/service/antigravity_oauth_service_test.go -new file mode 100644 -index 00000000..e041c2b4 ---- /dev/null -+++ b/backend/internal/service/antigravity_oauth_service_test.go -@@ -0,0 +1,64 @@ -+package service -+ -+import ( -+ "testing" -+) -+ -+func TestResolveDefaultTierID(t *testing.T) { -+ t.Parallel() -+ -+ tests := []struct { -+ name string -+ loadRaw map[string]any -+ want string -+ }{ -+ { -+ name: "missing allowedTiers", -+ loadRaw: map[string]any{ -+ "paidTier": map[string]any{"id": "g1-pro-tier"}, -+ }, -+ want: "", -+ }, -+ { -+ name: "allowedTiers but no default", -+ loadRaw: map[string]any{ -+ "allowedTiers": []any{ -+ map[string]any{"id": "free-tier", "isDefault": false}, -+ map[string]any{"id": "standard-tier", "isDefault": false}, -+ }, -+ }, -+ want: "", -+ }, -+ { -+ name: "default tier found", -+ loadRaw: map[string]any{ -+ "allowedTiers": []any{ -+ map[string]any{"id": "free-tier", "isDefault": true}, -+ map[string]any{"id": "standard-tier", "isDefault": false}, -+ }, -+ }, -+ want: "free-tier", -+ }, -+ { -+ name: "default tier id with spaces", -+ loadRaw: map[string]any{ -+ "allowedTiers": []any{ -+ map[string]any{"id": " standard-tier ", "isDefault": true}, -+ }, -+ }, -+ want: "standard-tier", -+ }, -+ } -+ -+ for _, tc := range tests { -+ tc := tc -+ t.Run(tc.name, func(t *testing.T) { -+ t.Parallel() -+ -+ got := resolveDefaultTierID(tc.loadRaw) -+ if got != tc.want { -+ t.Fatalf("resolveDefaultTierID() = %q, want %q", got, tc.want) -+ } -+ }) -+ } -+} From 4661c2f90fd158cc1aebe724ed905964549faa72 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 11 Feb 2026 12:35:26 +0800 Subject: [PATCH 038/175] chore: bump version to 0.1.79.5 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index 7180e218..3e8643b1 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.79.4 +0.1.79.5 From 5b6da04a028929608b800f2dd24bd4f5aadca39b Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 11 Feb 2026 12:43:45 +0800 Subject: [PATCH 039/175] =?UTF-8?q?docs:=20=E6=9B=B4=E6=96=B0=20CI=20?= =?UTF-8?q?=E6=A3=80=E6=9F=A5=E6=B5=81=E7=A8=8B=EF=BC=8C=E5=BC=BA=E8=B0=83?= =?UTF-8?q?=E6=9C=AC=E5=9C=B0=E5=85=88=E6=89=A7=E8=A1=8C=E5=85=A8=E9=83=A8?= =?UTF-8?q?=E6=A3=80=E6=9F=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- AGENTS.md | 35 +++++++++++++++++++++++++++-------- CLAUDE.md | 35 +++++++++++++++++++++++++++-------- 2 files changed, 54 insertions(+), 16 deletions(-) diff --git a/AGENTS.md b/AGENTS.md index ccac56d1..577d61c4 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -798,18 +798,37 @@ PR 目标是上游官方仓库,**只包含通用功能改动**(bug fix、新 推送到我们自己的 `develop` 或 `main` 分支时,包含所有改动(定制化 + 通用功能)。 -**推送流程**: -1. 本地运行 `cd backend && make test-unit` 确保单元测试通过 -2. 本地运行 `cd backend && gofmt -l ./...` 确保格式正确 -3. 推送后确认 CI 和 Security Scan 两个 workflow 的 4 个 job 全部绿色 ✅ -4. 任何 job 失败必须立即修复,**禁止在 CI 未通过的状态下继续后续操作** +**推送前必须在本地执行全部 CI 检查**(不要等 GitHub Actions): + +```bash +# 确保 Go 工具链可用(macOS homebrew) +export PATH="/opt/homebrew/bin:$HOME/go/bin:$PATH" + +# 1. 单元测试(必须) +cd backend && make test-unit + +# 2. 集成测试(推荐,需要 Docker) +make test-integration + +# 3. golangci-lint 静态检查(必须) +golangci-lint run --timeout=5m + +# 4. gofmt 格式检查(必须) +gofmt -l ./... +# 如果有输出,运行 gofmt -w 修复 +``` + +**推送后确认**: +1. 使用 `gh run list --repo touwaeriol/sub2api --branch ` 检查 GitHub Actions 状态 +2. 确认 CI 和 Security Scan 两个 workflow 的 4 个 job 全部绿色 ✅ +3. 任何 job 失败必须立即修复,**禁止在 CI 未通过的状态下继续后续操作** ### 发布版本 -1. 确保 `main` 分支最新提交的 4 个 CI job 全部通过 +1. 本地执行上述全部 CI 检查通过 2. 递增 `backend/cmd/server/VERSION`,提交并推送 -3. 打 tag 推送后,确认 tag 触发的 3 个 workflow(CI、Security Scan、Release)全部通过 -4. **Release workflow 失败时禁止部署** — 必须先修复问题,删除旧 tag,重新打 tag +3. 推送后确认 GitHub Actions 的 4 个 CI job 全部通过 +4. **CI 未通过时禁止部署** — 必须先修复问题 5. 使用 `gh run list --repo touwaeriol/sub2api --limit 10` 确认状态 ### 常见 CI 失败原因及修复 diff --git a/CLAUDE.md b/CLAUDE.md index e855e961..c896482e 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -839,18 +839,37 @@ PR 目标是上游官方仓库,**只包含通用功能改动**(bug fix、新 推送到我们自己的 `develop` 或 `main` 分支时,包含所有改动(定制化 + 通用功能)。 -**推送流程**: -1. 本地运行 `cd backend && make test-unit` 确保单元测试通过 -2. 本地运行 `cd backend && gofmt -l ./...` 确保格式正确 -3. 推送后确认 CI 和 Security Scan 两个 workflow 的 4 个 job 全部绿色 ✅ -4. 任何 job 失败必须立即修复,**禁止在 CI 未通过的状态下继续后续操作** +**推送前必须在本地执行全部 CI 检查**(不要等 GitHub Actions): + +```bash +# 确保 Go 工具链可用(macOS homebrew) +export PATH="/opt/homebrew/bin:$HOME/go/bin:$PATH" + +# 1. 单元测试(必须) +cd backend && make test-unit + +# 2. 集成测试(推荐,需要 Docker) +make test-integration + +# 3. golangci-lint 静态检查(必须) +golangci-lint run --timeout=5m + +# 4. gofmt 格式检查(必须) +gofmt -l ./... +# 如果有输出,运行 gofmt -w 修复 +``` + +**推送后确认**: +1. 使用 `gh run list --repo touwaeriol/sub2api --branch ` 检查 GitHub Actions 状态 +2. 确认 CI 和 Security Scan 两个 workflow 的 4 个 job 全部绿色 ✅ +3. 任何 job 失败必须立即修复,**禁止在 CI 未通过的状态下继续后续操作** ### 发布版本 -1. 确保 `main` 分支最新提交的 4 个 CI job 全部通过 +1. 本地执行上述全部 CI 检查通过 2. 递增 `backend/cmd/server/VERSION`,提交并推送 -3. 打 tag 推送后,确认 tag 触发的 3 个 workflow(CI、Security Scan、Release)全部通过 -4. **Release workflow 失败时禁止部署** — 必须先修复问题,删除旧 tag,重新打 tag +3. 推送后确认 GitHub Actions 的 4 个 CI job 全部通过 +4. **CI 未通过时禁止部署** — 必须先修复问题 5. 使用 `gh run list --repo touwaeriol/sub2api --limit 10` 确认状态 ### 常见 CI 失败原因及修复 From 78a9705fadd54a4568583905f9dc3b9193c2ef1a Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 11 Feb 2026 12:45:52 +0800 Subject: [PATCH 040/175] fix: resolve ineffassign lint error in onboard project_id logic --- backend/internal/service/antigravity_oauth_service.go | 1 + 1 file changed, 1 insertion(+) diff --git a/backend/internal/service/antigravity_oauth_service.go b/backend/internal/service/antigravity_oauth_service.go index 86b7cc2e..f878a392 100644 --- a/backend/internal/service/antigravity_oauth_service.go +++ b/backend/internal/service/antigravity_oauth_service.go @@ -284,6 +284,7 @@ func (s *AntigravityOAuthService) loadProjectIDWithRetry(ctx context.Context, ac return projectID, nil } else if onboardErr != nil { lastErr = onboardErr + continue } } From a1e2ffd58690b147ff48c8bb4849bd75cdbee84c Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 11 Feb 2026 13:00:31 +0800 Subject: [PATCH 041/175] refactor: optimize project_id fill to lightweight approach MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Replace heavy RefreshAccountToken with lightweight tryFillProjectID (loadCodeAssist → onboardUser → fallback), consistent with Antigravity-Manager's behavior - Add sync.Map cooldown/dedup (60s) to prevent repeated fill attempts - Add fallback project_id "bamboo-precept-lgxtn" matching AM - Extract mergeCredentials helper to eliminate duplication - Use slog structured logging instead of log.Printf - Fix time.Sleep in OnboardUser to context-aware select - Fix strings.NewReader(string(bodyBytes)) → bytes.NewReader(bodyBytes) - Remove redundant tc := tc in test (Go 1.22+) - Add nil guard in persistProjectID for test safety --- backend/internal/pkg/antigravity/client.go | 12 +- .../service/antigravity_oauth_service_test.go | 1 - .../service/antigravity_token_provider.go | 108 +++++++++++++----- 3 files changed, 90 insertions(+), 31 deletions(-) diff --git a/backend/internal/pkg/antigravity/client.go b/backend/internal/pkg/antigravity/client.go index 3556da88..9a0488a4 100644 --- a/backend/internal/pkg/antigravity/client.go +++ b/backend/internal/pkg/antigravity/client.go @@ -326,7 +326,7 @@ func (c *Client) LoadCodeAssist(ctx context.Context, accessToken string) (*LoadC var lastErr error for urlIdx, baseURL := range availableURLs { apiURL := baseURL + "/v1internal:loadCodeAssist" - req, err := http.NewRequestWithContext(ctx, http.MethodPost, apiURL, strings.NewReader(string(bodyBytes))) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, apiURL, bytes.NewReader(bodyBytes)) if err != nil { lastErr = fmt.Errorf("创建请求失败: %w", err) continue @@ -405,7 +405,7 @@ func (c *Client) OnboardUser(ctx context.Context, accessToken, tierID string) (s apiURL := baseURL + "/v1internal:onboardUser" for attempt := 1; attempt <= 5; attempt++ { - req, err := http.NewRequestWithContext(ctx, http.MethodPost, apiURL, strings.NewReader(string(bodyBytes))) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, apiURL, bytes.NewReader(bodyBytes)) if err != nil { lastErr = fmt.Errorf("创建请求失败: %w", err) break @@ -456,7 +456,11 @@ func (c *Client) OnboardUser(ctx context.Context, accessToken, tierID string) (s } // done=false 时等待后重试(与 CLIProxyAPI 行为一致) - time.Sleep(2 * time.Second) + select { + case <-ctx.Done(): + return "", ctx.Err() + case <-time.After(2 * time.Second): + } } } @@ -521,7 +525,7 @@ func (c *Client) FetchAvailableModels(ctx context.Context, accessToken, projectI var lastErr error for urlIdx, baseURL := range availableURLs { apiURL := baseURL + "/v1internal:fetchAvailableModels" - req, err := http.NewRequestWithContext(ctx, http.MethodPost, apiURL, strings.NewReader(string(bodyBytes))) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, apiURL, bytes.NewReader(bodyBytes)) if err != nil { lastErr = fmt.Errorf("创建请求失败: %w", err) continue diff --git a/backend/internal/service/antigravity_oauth_service_test.go b/backend/internal/service/antigravity_oauth_service_test.go index e041c2b4..0325d9bc 100644 --- a/backend/internal/service/antigravity_oauth_service_test.go +++ b/backend/internal/service/antigravity_oauth_service_test.go @@ -51,7 +51,6 @@ func TestResolveDefaultTierID(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/backend/internal/service/antigravity_token_provider.go b/backend/internal/service/antigravity_token_provider.go index e0ada9f1..774c1c75 100644 --- a/backend/internal/service/antigravity_token_provider.go +++ b/backend/internal/service/antigravity_token_provider.go @@ -3,16 +3,24 @@ package service import ( "context" "errors" - "log" "log/slog" "strconv" "strings" + "sync" "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/antigravity" ) const ( antigravityTokenRefreshSkew = 3 * time.Minute antigravityTokenCacheSkew = 5 * time.Minute + + // projectIDFillCooldown 同一账号 project_id 补齐失败后的冷却时间 + projectIDFillCooldown = 60 * time.Second + + // fallbackProjectID 所有获取方式都失败时的兜底值(与 Antigravity-Manager 一致) + fallbackProjectID = "bamboo-precept-lgxtn" ) // AntigravityTokenCache Token 缓存接口(复用 GeminiTokenCache 接口定义) @@ -23,6 +31,9 @@ type AntigravityTokenProvider struct { accountRepo AccountRepository tokenCache AntigravityTokenCache antigravityOAuthService *AntigravityOAuthService + + // projectIDFillAttempts 记录每个账号最近一次 project_id 补齐尝试时间,用于冷却去重 + projectIDFillAttempts sync.Map // map[int64]time.Time } func NewAntigravityTokenProvider( @@ -94,14 +105,10 @@ func (p *AntigravityTokenProvider) GetAccessToken(ctx context.Context, account * return "", err } newCredentials := p.antigravityOAuthService.BuildAccountCredentials(tokenInfo) - for k, v := range account.Credentials { - if _, exists := newCredentials[k]; !exists { - newCredentials[k] = v - } - } + mergeCredentials(newCredentials, account.Credentials) account.Credentials = newCredentials if updateErr := p.accountRepo.Update(ctx, account); updateErr != nil { - log.Printf("[AntigravityTokenProvider] Failed to update account credentials: %v", updateErr) + slog.Error("failed to update account credentials after token refresh", "account_id", account.ID, "error", updateErr) } expiresAt = account.GetCredentialAsTime("expires_at") } @@ -113,27 +120,12 @@ func (p *AntigravityTokenProvider) GetAccessToken(ctx context.Context, account * return "", errors.New("access_token not found in credentials") } - // 如果账号还没有 project_id,优先尝试在线补齐,避免请求 daily/sandbox 时出现 - // "Invalid project resource name projects/"。 - if strings.TrimSpace(account.GetCredential("project_id")) == "" && p.antigravityOAuthService != nil { - if tokenInfo, err := p.antigravityOAuthService.RefreshAccountToken(ctx, account); err == nil { - newCredentials := p.antigravityOAuthService.BuildAccountCredentials(tokenInfo) - for k, v := range account.Credentials { - if _, exists := newCredentials[k]; !exists { - newCredentials[k] = v - } - } - account.Credentials = newCredentials - if updateErr := p.accountRepo.Update(ctx, account); updateErr != nil { - log.Printf("[AntigravityTokenProvider] Failed to persist project_id补齐: %v", updateErr) - } - if refreshed := strings.TrimSpace(account.GetCredential("access_token")); refreshed != "" { - accessToken = refreshed - } - } + // 3. 如果缺少 project_id,轻量补齐(不刷新 token) + if strings.TrimSpace(account.GetCredential("project_id")) == "" { + p.tryFillProjectID(ctx, account, accessToken) } - // 3. 存入缓存(验证版本后再写入,避免异步刷新任务与请求线程的竞态条件) + // 4. 存入缓存(验证版本后再写入,避免异步刷新任务与请求线程的竞态条件) if p.tokenCache != nil { latestAccount, isStale := CheckTokenVersion(ctx, account, p.accountRepo) if isStale && latestAccount != nil { @@ -164,6 +156,70 @@ func (p *AntigravityTokenProvider) GetAccessToken(ctx context.Context, account * return accessToken, nil } +// tryFillProjectID 轻量级 project_id 补齐(与 Antigravity-Manager 保持一致) +// 只调用 loadCodeAssist + onboardUser,不刷新 token。 +// 带冷却去重:同一账号 60s 内不重复尝试。 +func (p *AntigravityTokenProvider) tryFillProjectID(ctx context.Context, account *Account, accessToken string) { + // 冷却检查:60s 内不重复尝试 + if lastAttempt, ok := p.projectIDFillAttempts.Load(account.ID); ok { + if time.Since(lastAttempt.(time.Time)) < projectIDFillCooldown { + return + } + } + p.projectIDFillAttempts.Store(account.ID, time.Now()) + + proxyURL := "" + if account.ProxyID != nil && account.Proxy != nil { + proxyURL = account.Proxy.URL() + } + + client := antigravity.NewClient(proxyURL) + + // Step 1: loadCodeAssist(单次调用,不重试) + loadResp, loadRaw, err := client.LoadCodeAssist(ctx, accessToken) + if err == nil && loadResp != nil && loadResp.CloudAICompanionProject != "" { + p.persistProjectID(ctx, account, loadResp.CloudAICompanionProject) + p.projectIDFillAttempts.Delete(account.ID) // 成功后清除冷却 + return + } + + // Step 2: onboardUser(loadCodeAssist 成功但未返回 project_id 时) + if err == nil { + if projectID, onboardErr := tryOnboardProjectID(ctx, client, accessToken, loadRaw); onboardErr == nil && projectID != "" { + p.persistProjectID(ctx, account, projectID) + p.projectIDFillAttempts.Delete(account.ID) + return + } + } + + // Step 3: 兜底值(与 Antigravity-Manager 一致) + slog.Warn("project_id fill failed, using fallback", + "account_id", account.ID, + "fallback", fallbackProjectID, + ) + p.persistProjectID(ctx, account, fallbackProjectID) +} + +// persistProjectID 将 project_id 写入账号凭证并持久化 +func (p *AntigravityTokenProvider) persistProjectID(ctx context.Context, account *Account, projectID string) { + account.Credentials["project_id"] = projectID + if p.accountRepo == nil { + return + } + if updateErr := p.accountRepo.Update(ctx, account); updateErr != nil { + slog.Error("failed to persist project_id", "account_id", account.ID, "error", updateErr) + } +} + +// mergeCredentials 将 old 中不存在于 new 的字段合并到 new +func mergeCredentials(newCreds, oldCreds map[string]any) { + for k, v := range oldCreds { + if _, exists := newCreds[k]; !exists { + newCreds[k] = v + } + } +} + func AntigravityTokenCacheKey(account *Account) string { projectID := strings.TrimSpace(account.GetCredential("project_id")) if projectID != "" { From f2917aeaf86961f563c611d7e685985873ce67b9 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 11 Feb 2026 13:06:41 +0800 Subject: [PATCH 042/175] fix: use safe type assertion for errcheck lint compliance --- backend/internal/service/antigravity_token_provider.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/internal/service/antigravity_token_provider.go b/backend/internal/service/antigravity_token_provider.go index 774c1c75..4dbc8e39 100644 --- a/backend/internal/service/antigravity_token_provider.go +++ b/backend/internal/service/antigravity_token_provider.go @@ -162,7 +162,7 @@ func (p *AntigravityTokenProvider) GetAccessToken(ctx context.Context, account * func (p *AntigravityTokenProvider) tryFillProjectID(ctx context.Context, account *Account, accessToken string) { // 冷却检查:60s 内不重复尝试 if lastAttempt, ok := p.projectIDFillAttempts.Load(account.ID); ok { - if time.Since(lastAttempt.(time.Time)) < projectIDFillCooldown { + if t, ok := lastAttempt.(time.Time); ok && time.Since(t) < projectIDFillCooldown { return } } From 92745f7534df0a21c803641293bda40f0ddecc11 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 11 Feb 2026 13:08:49 +0800 Subject: [PATCH 043/175] chore: bump version to 0.1.79.6 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index 3e8643b1..684cdbbc 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.79.5 +0.1.79.6 From f12da659628e49a716a457a0e3aeeef1a8ee27d5 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 11 Feb 2026 14:39:02 +0800 Subject: [PATCH 044/175] docs: add Admin API reference to CLAUDE.md and AGENTS.md --- AGENTS.md | 372 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ CLAUDE.md | 372 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 744 insertions(+) diff --git a/AGENTS.md b/AGENTS.md index 577d61c4..0202e94f 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -509,6 +509,378 @@ docker stats sub2api --- +## Admin API 接口文档 + +### 认证方式 + +所有 Admin API 通过 `x-api-key` 请求头传递 Admin API Key 认证。 + +``` +x-api-key: admin-xxx +``` + +> **使用说明**:用户提供 admin token 后,直接将其作为 `x-api-key` 的值使用。Token 格式为 `admin-` + 64 位十六进制字符,在管理后台 `设置 > Admin API Key` 中生成。**请勿将实际 token 写入文档或代码中。** + +### 环境地址 + +| 环境 | 基础地址 | 说明 | +|------|----------|------| +| 正式 | `https://clicodeplus.com` | 生产环境 | +| Beta | `http://<服务器IP>:8084` | 仅内网访问 | +| OpenAI | `http://<服务器IP>:8083` | 仅内网访问 | + +> 以下接口文档中,`${BASE}` 代表环境基础地址,`${KEY}` 代表用户提供的 admin token。 + +--- + +### 1. 账号管理 + +#### 1.1 获取账号列表 + +``` +GET /api/v1/admin/accounts +``` + +**查询参数**: + +| 参数 | 类型 | 必填 | 说明 | +|------|------|------|------| +| `platform` | string | 否 | 平台筛选:`antigravity` / `anthropic` / `openai` / `gemini` | +| `type` | string | 否 | 账号类型:`oauth` / `api_key` / `cookie` | +| `status` | string | 否 | 状态:`active` / `disabled` / `error` | +| `search` | string | 否 | 搜索关键词(名称、备注) | +| `page` | int | 否 | 页码,默认 1 | +| `page_size` | int | 否 | 每页数量,默认 20 | + +```bash +curl -s "${BASE}/api/v1/admin/accounts?platform=antigravity&page=1&page_size=100" \ + -H "x-api-key: ${KEY}" +``` + +**响应**: +```json +{ + "code": 0, + "message": "success", + "data": { + "items": [{"id": 1, "name": "xxx@gmail.com", "platform": "antigravity", "status": "active", ...}], + "total": 66 + } +} +``` + +#### 1.2 获取账号详情 + +``` +GET /api/v1/admin/accounts/:id +``` + +```bash +curl -s "${BASE}/api/v1/admin/accounts/1" -H "x-api-key: ${KEY}" +``` + +#### 1.3 测试账号连接 + +``` +POST /api/v1/admin/accounts/:id/test +``` + +**请求体**(JSON,可选): + +| 字段 | 类型 | 必填 | 说明 | +|------|------|------|------| +| `model_id` | string | 否 | 指定测试模型,如 `claude-opus-4-6`;不传则使用默认模型 | + +**响应格式**:SSE(Server-Sent Events)流 + +```bash +curl -N -X POST "${BASE}/api/v1/admin/accounts/1/test" \ + -H "x-api-key: ${KEY}" \ + -H "Content-Type: application/json" \ + -d '{"model_id": "claude-opus-4-6"}' +``` + +**SSE 事件类型**: + +| type | 字段 | 说明 | +|------|------|------| +| `test_start` | `model` | 测试开始,返回测试模型名 | +| `content` | `text` | 模型响应内容(流式文本片段) | +| `test_end` | `success`, `error` | 测试结束,`success=true` 表示成功 | +| `error` | `text` | 错误信息 | + +#### 1.4 清除账号限流 + +``` +POST /api/v1/admin/accounts/:id/clear-rate-limit +``` + +```bash +curl -X POST "${BASE}/api/v1/admin/accounts/1/clear-rate-limit" \ + -H "x-api-key: ${KEY}" +``` + +#### 1.5 清除账号错误状态 + +``` +POST /api/v1/admin/accounts/:id/clear-error +``` + +```bash +curl -X POST "${BASE}/api/v1/admin/accounts/1/clear-error" \ + -H "x-api-key: ${KEY}" +``` + +#### 1.6 获取账号可用模型 + +``` +GET /api/v1/admin/accounts/:id/models +``` + +```bash +curl -s "${BASE}/api/v1/admin/accounts/1/models" -H "x-api-key: ${KEY}" +``` + +#### 1.7 刷新 OAuth Token + +``` +POST /api/v1/admin/accounts/:id/refresh +``` + +```bash +curl -X POST "${BASE}/api/v1/admin/accounts/1/refresh" -H "x-api-key: ${KEY}" +``` + +#### 1.8 刷新账号等级 + +``` +POST /api/v1/admin/accounts/:id/refresh-tier +``` + +```bash +curl -X POST "${BASE}/api/v1/admin/accounts/1/refresh-tier" -H "x-api-key: ${KEY}" +``` + +#### 1.9 获取账号统计 + +``` +GET /api/v1/admin/accounts/:id/stats +``` + +```bash +curl -s "${BASE}/api/v1/admin/accounts/1/stats" -H "x-api-key: ${KEY}" +``` + +#### 1.10 获取账号用量 + +``` +GET /api/v1/admin/accounts/:id/usage +``` + +```bash +curl -s "${BASE}/api/v1/admin/accounts/1/usage" -H "x-api-key: ${KEY}" +``` + +#### 1.11 批量测试账号(脚本) + +批量测试指定平台所有账号的指定模型连通性: + +```bash +# 用户需提供:BASE(环境地址)、KEY(admin token)、MODEL(测试模型) +ACCOUNT_IDS=$(curl -s "${BASE}/api/v1/admin/accounts?platform=antigravity&page=1&page_size=100" \ + -H "x-api-key: ${KEY}" | python3 -c " +import json, sys +data = json.load(sys.stdin) +for item in data['data']['items']: + print(f\"{item['id']}|{item['name']}\") +") + +while IFS='|' read -r ID NAME; do + echo "测试账号 ID=${ID} (${NAME})..." + RESPONSE=$(curl -s --max-time 60 -N \ + -X POST "${BASE}/api/v1/admin/accounts/${ID}/test" \ + -H "x-api-key: ${KEY}" \ + -H "Content-Type: application/json" \ + -d "{\"model_id\": \"${MODEL}\"}" 2>&1) + if echo "$RESPONSE" | grep -q '"success":true'; then + echo " ✅ 成功" + elif echo "$RESPONSE" | grep -q '"type":"content"'; then + echo " ✅ 成功(有内容响应)" + else + ERROR_MSG=$(echo "$RESPONSE" | grep -o '"error":"[^"]*"' | tail -1) + echo " ❌ 失败: ${ERROR_MSG}" + fi +done <<< "$ACCOUNT_IDS" +``` + +--- + +### 2. 运维监控 + +#### 2.1 并发统计 + +``` +GET /api/v1/admin/ops/concurrency +``` + +```bash +curl -s "${BASE}/api/v1/admin/ops/concurrency" -H "x-api-key: ${KEY}" +``` + +#### 2.2 账号可用性 + +``` +GET /api/v1/admin/ops/account-availability +``` + +```bash +curl -s "${BASE}/api/v1/admin/ops/account-availability" -H "x-api-key: ${KEY}" +``` + +#### 2.3 实时流量摘要 + +``` +GET /api/v1/admin/ops/realtime-traffic +``` + +```bash +curl -s "${BASE}/api/v1/admin/ops/realtime-traffic" -H "x-api-key: ${KEY}" +``` + +#### 2.4 请求错误列表 + +``` +GET /api/v1/admin/ops/request-errors +``` + +**查询参数**:`page`、`page_size` + +```bash +curl -s "${BASE}/api/v1/admin/ops/request-errors?page=1&page_size=50" \ + -H "x-api-key: ${KEY}" +``` + +#### 2.5 上游错误列表 + +``` +GET /api/v1/admin/ops/upstream-errors +``` + +```bash +curl -s "${BASE}/api/v1/admin/ops/upstream-errors?page=1&page_size=50" \ + -H "x-api-key: ${KEY}" +``` + +#### 2.6 仪表板概览 + +``` +GET /api/v1/admin/ops/dashboard/overview +``` + +```bash +curl -s "${BASE}/api/v1/admin/ops/dashboard/overview" -H "x-api-key: ${KEY}" +``` + +--- + +### 3. 系统设置 + +#### 3.1 获取系统设置 + +``` +GET /api/v1/admin/settings +``` + +```bash +curl -s "${BASE}/api/v1/admin/settings" -H "x-api-key: ${KEY}" +``` + +#### 3.2 更新系统设置 + +``` +PUT /api/v1/admin/settings +``` + +```bash +curl -X PUT "${BASE}/api/v1/admin/settings" \ + -H "x-api-key: ${KEY}" \ + -H "Content-Type: application/json" \ + -d '{ ... }' +``` + +#### 3.3 Admin API Key 状态(脱敏) + +``` +GET /api/v1/admin/settings/admin-api-key +``` + +```bash +curl -s "${BASE}/api/v1/admin/settings/admin-api-key" -H "x-api-key: ${KEY}" +``` + +--- + +### 4. 用户管理 + +#### 4.1 用户列表 + +``` +GET /api/v1/admin/users +``` + +```bash +curl -s "${BASE}/api/v1/admin/users?page=1&page_size=20" -H "x-api-key: ${KEY}" +``` + +#### 4.2 用户详情 + +``` +GET /api/v1/admin/users/:id +``` + +```bash +curl -s "${BASE}/api/v1/admin/users/1" -H "x-api-key: ${KEY}" +``` + +#### 4.3 更新用户余额 + +``` +POST /api/v1/admin/users/:id/balance +``` + +```bash +curl -X POST "${BASE}/api/v1/admin/users/1/balance" \ + -H "x-api-key: ${KEY}" \ + -H "Content-Type: application/json" \ + -d '{"amount": 100, "reason": "充值"}' +``` + +--- + +### 5. 分组管理 + +#### 5.1 分组列表 + +``` +GET /api/v1/admin/groups +``` + +```bash +curl -s "${BASE}/api/v1/admin/groups" -H "x-api-key: ${KEY}" +``` + +#### 5.2 所有分组(不分页) + +``` +GET /api/v1/admin/groups/all +``` + +```bash +curl -s "${BASE}/api/v1/admin/groups/all" -H "x-api-key: ${KEY}" +``` + +--- + ## 注意事项 1. **前端必须打包进镜像**:使用 `docker build` 在构建服务器(`us-asaki-root`)上构建,Dockerfile 会自动编译前端并 embed 到后端二进制中,构建完成后通过 `docker save | docker load` 传输到生产服务器(`clicodeplus`) diff --git a/CLAUDE.md b/CLAUDE.md index c896482e..737cdf19 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -550,6 +550,378 @@ docker stats sub2api --- +## Admin API 接口文档 + +### 认证方式 + +所有 Admin API 通过 `x-api-key` 请求头传递 Admin API Key 认证。 + +``` +x-api-key: admin-xxx +``` + +> **使用说明**:用户提供 admin token 后,直接将其作为 `x-api-key` 的值使用。Token 格式为 `admin-` + 64 位十六进制字符,在管理后台 `设置 > Admin API Key` 中生成。**请勿将实际 token 写入文档或代码中。** + +### 环境地址 + +| 环境 | 基础地址 | 说明 | +|------|----------|------| +| 正式 | `https://clicodeplus.com` | 生产环境 | +| Beta | `http://<服务器IP>:8084` | 仅内网访问 | +| OpenAI | `http://<服务器IP>:8083` | 仅内网访问 | + +> 以下接口文档中,`${BASE}` 代表环境基础地址,`${KEY}` 代表用户提供的 admin token。 + +--- + +### 1. 账号管理 + +#### 1.1 获取账号列表 + +``` +GET /api/v1/admin/accounts +``` + +**查询参数**: + +| 参数 | 类型 | 必填 | 说明 | +|------|------|------|------| +| `platform` | string | 否 | 平台筛选:`antigravity` / `anthropic` / `openai` / `gemini` | +| `type` | string | 否 | 账号类型:`oauth` / `api_key` / `cookie` | +| `status` | string | 否 | 状态:`active` / `disabled` / `error` | +| `search` | string | 否 | 搜索关键词(名称、备注) | +| `page` | int | 否 | 页码,默认 1 | +| `page_size` | int | 否 | 每页数量,默认 20 | + +```bash +curl -s "${BASE}/api/v1/admin/accounts?platform=antigravity&page=1&page_size=100" \ + -H "x-api-key: ${KEY}" +``` + +**响应**: +```json +{ + "code": 0, + "message": "success", + "data": { + "items": [{"id": 1, "name": "xxx@gmail.com", "platform": "antigravity", "status": "active", ...}], + "total": 66 + } +} +``` + +#### 1.2 获取账号详情 + +``` +GET /api/v1/admin/accounts/:id +``` + +```bash +curl -s "${BASE}/api/v1/admin/accounts/1" -H "x-api-key: ${KEY}" +``` + +#### 1.3 测试账号连接 + +``` +POST /api/v1/admin/accounts/:id/test +``` + +**请求体**(JSON,可选): + +| 字段 | 类型 | 必填 | 说明 | +|------|------|------|------| +| `model_id` | string | 否 | 指定测试模型,如 `claude-opus-4-6`;不传则使用默认模型 | + +**响应格式**:SSE(Server-Sent Events)流 + +```bash +curl -N -X POST "${BASE}/api/v1/admin/accounts/1/test" \ + -H "x-api-key: ${KEY}" \ + -H "Content-Type: application/json" \ + -d '{"model_id": "claude-opus-4-6"}' +``` + +**SSE 事件类型**: + +| type | 字段 | 说明 | +|------|------|------| +| `test_start` | `model` | 测试开始,返回测试模型名 | +| `content` | `text` | 模型响应内容(流式文本片段) | +| `test_end` | `success`, `error` | 测试结束,`success=true` 表示成功 | +| `error` | `text` | 错误信息 | + +#### 1.4 清除账号限流 + +``` +POST /api/v1/admin/accounts/:id/clear-rate-limit +``` + +```bash +curl -X POST "${BASE}/api/v1/admin/accounts/1/clear-rate-limit" \ + -H "x-api-key: ${KEY}" +``` + +#### 1.5 清除账号错误状态 + +``` +POST /api/v1/admin/accounts/:id/clear-error +``` + +```bash +curl -X POST "${BASE}/api/v1/admin/accounts/1/clear-error" \ + -H "x-api-key: ${KEY}" +``` + +#### 1.6 获取账号可用模型 + +``` +GET /api/v1/admin/accounts/:id/models +``` + +```bash +curl -s "${BASE}/api/v1/admin/accounts/1/models" -H "x-api-key: ${KEY}" +``` + +#### 1.7 刷新 OAuth Token + +``` +POST /api/v1/admin/accounts/:id/refresh +``` + +```bash +curl -X POST "${BASE}/api/v1/admin/accounts/1/refresh" -H "x-api-key: ${KEY}" +``` + +#### 1.8 刷新账号等级 + +``` +POST /api/v1/admin/accounts/:id/refresh-tier +``` + +```bash +curl -X POST "${BASE}/api/v1/admin/accounts/1/refresh-tier" -H "x-api-key: ${KEY}" +``` + +#### 1.9 获取账号统计 + +``` +GET /api/v1/admin/accounts/:id/stats +``` + +```bash +curl -s "${BASE}/api/v1/admin/accounts/1/stats" -H "x-api-key: ${KEY}" +``` + +#### 1.10 获取账号用量 + +``` +GET /api/v1/admin/accounts/:id/usage +``` + +```bash +curl -s "${BASE}/api/v1/admin/accounts/1/usage" -H "x-api-key: ${KEY}" +``` + +#### 1.11 批量测试账号(脚本) + +批量测试指定平台所有账号的指定模型连通性: + +```bash +# 用户需提供:BASE(环境地址)、KEY(admin token)、MODEL(测试模型) +ACCOUNT_IDS=$(curl -s "${BASE}/api/v1/admin/accounts?platform=antigravity&page=1&page_size=100" \ + -H "x-api-key: ${KEY}" | python3 -c " +import json, sys +data = json.load(sys.stdin) +for item in data['data']['items']: + print(f\"{item['id']}|{item['name']}\") +") + +while IFS='|' read -r ID NAME; do + echo "测试账号 ID=${ID} (${NAME})..." + RESPONSE=$(curl -s --max-time 60 -N \ + -X POST "${BASE}/api/v1/admin/accounts/${ID}/test" \ + -H "x-api-key: ${KEY}" \ + -H "Content-Type: application/json" \ + -d "{\"model_id\": \"${MODEL}\"}" 2>&1) + if echo "$RESPONSE" | grep -q '"success":true'; then + echo " ✅ 成功" + elif echo "$RESPONSE" | grep -q '"type":"content"'; then + echo " ✅ 成功(有内容响应)" + else + ERROR_MSG=$(echo "$RESPONSE" | grep -o '"error":"[^"]*"' | tail -1) + echo " ❌ 失败: ${ERROR_MSG}" + fi +done <<< "$ACCOUNT_IDS" +``` + +--- + +### 2. 运维监控 + +#### 2.1 并发统计 + +``` +GET /api/v1/admin/ops/concurrency +``` + +```bash +curl -s "${BASE}/api/v1/admin/ops/concurrency" -H "x-api-key: ${KEY}" +``` + +#### 2.2 账号可用性 + +``` +GET /api/v1/admin/ops/account-availability +``` + +```bash +curl -s "${BASE}/api/v1/admin/ops/account-availability" -H "x-api-key: ${KEY}" +``` + +#### 2.3 实时流量摘要 + +``` +GET /api/v1/admin/ops/realtime-traffic +``` + +```bash +curl -s "${BASE}/api/v1/admin/ops/realtime-traffic" -H "x-api-key: ${KEY}" +``` + +#### 2.4 请求错误列表 + +``` +GET /api/v1/admin/ops/request-errors +``` + +**查询参数**:`page`、`page_size` + +```bash +curl -s "${BASE}/api/v1/admin/ops/request-errors?page=1&page_size=50" \ + -H "x-api-key: ${KEY}" +``` + +#### 2.5 上游错误列表 + +``` +GET /api/v1/admin/ops/upstream-errors +``` + +```bash +curl -s "${BASE}/api/v1/admin/ops/upstream-errors?page=1&page_size=50" \ + -H "x-api-key: ${KEY}" +``` + +#### 2.6 仪表板概览 + +``` +GET /api/v1/admin/ops/dashboard/overview +``` + +```bash +curl -s "${BASE}/api/v1/admin/ops/dashboard/overview" -H "x-api-key: ${KEY}" +``` + +--- + +### 3. 系统设置 + +#### 3.1 获取系统设置 + +``` +GET /api/v1/admin/settings +``` + +```bash +curl -s "${BASE}/api/v1/admin/settings" -H "x-api-key: ${KEY}" +``` + +#### 3.2 更新系统设置 + +``` +PUT /api/v1/admin/settings +``` + +```bash +curl -X PUT "${BASE}/api/v1/admin/settings" \ + -H "x-api-key: ${KEY}" \ + -H "Content-Type: application/json" \ + -d '{ ... }' +``` + +#### 3.3 Admin API Key 状态(脱敏) + +``` +GET /api/v1/admin/settings/admin-api-key +``` + +```bash +curl -s "${BASE}/api/v1/admin/settings/admin-api-key" -H "x-api-key: ${KEY}" +``` + +--- + +### 4. 用户管理 + +#### 4.1 用户列表 + +``` +GET /api/v1/admin/users +``` + +```bash +curl -s "${BASE}/api/v1/admin/users?page=1&page_size=20" -H "x-api-key: ${KEY}" +``` + +#### 4.2 用户详情 + +``` +GET /api/v1/admin/users/:id +``` + +```bash +curl -s "${BASE}/api/v1/admin/users/1" -H "x-api-key: ${KEY}" +``` + +#### 4.3 更新用户余额 + +``` +POST /api/v1/admin/users/:id/balance +``` + +```bash +curl -X POST "${BASE}/api/v1/admin/users/1/balance" \ + -H "x-api-key: ${KEY}" \ + -H "Content-Type: application/json" \ + -d '{"amount": 100, "reason": "充值"}' +``` + +--- + +### 5. 分组管理 + +#### 5.1 分组列表 + +``` +GET /api/v1/admin/groups +``` + +```bash +curl -s "${BASE}/api/v1/admin/groups" -H "x-api-key: ${KEY}" +``` + +#### 5.2 所有分组(不分页) + +``` +GET /api/v1/admin/groups/all +``` + +```bash +curl -s "${BASE}/api/v1/admin/groups/all" -H "x-api-key: ${KEY}" +``` + +--- + ## 注意事项 1. **前端必须打包进镜像**:使用 `docker build` 在构建服务器(`us-asaki-root`)上构建,Dockerfile 会自动编译前端并 embed 到后端二进制中,构建完成后通过 `docker save | docker load` 传输到生产服务器(`clicodeplus`) From c722212e126319e7a65c4c1b19f0558e632b9ede Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 11 Feb 2026 14:39:14 +0800 Subject: [PATCH 045/175] fix: distinguish client disconnection from upstream retry failure When client disconnects during upstream request, the error was incorrectly reported as "Upstream request failed after retries". Now checks context cancellation first and returns "Client disconnected before upstream response" instead. --- backend/internal/service/antigravity_gateway_service.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 0d054c49..5ca7b3f3 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -1372,6 +1372,10 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, ForceCacheBilling: switchErr.IsStickySession, } } + // 区分客户端取消和真正的上游失败,返回更准确的错误消息 + if c.Request.Context().Err() != nil { + return nil, s.writeClaudeError(c, http.StatusBadGateway, "client_disconnected", "Client disconnected before upstream response") + } return nil, s.writeClaudeError(c, http.StatusBadGateway, "upstream_error", "Upstream request failed after retries") } resp := result.resp @@ -2044,6 +2048,10 @@ func (s *AntigravityGatewayService) ForwardGemini(ctx context.Context, c *gin.Co ForceCacheBilling: switchErr.IsStickySession, } } + // 区分客户端取消和真正的上游失败,返回更准确的错误消息 + if c.Request.Context().Err() != nil { + return nil, s.writeGoogleError(c, http.StatusBadGateway, "Client disconnected before upstream response") + } return nil, s.writeGoogleError(c, http.StatusBadGateway, "Upstream request failed after retries") } resp := result.resp From 44693d0dfb1bfd9fd00ce583bb932f0628a0840d Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 11 Feb 2026 14:39:29 +0800 Subject: [PATCH 046/175] chore: bump version to 0.1.79.7 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index 684cdbbc..0aa59ad9 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.79.6 +0.1.79.7 From d11b295729799fd5ce7f092b73a2815ec2b64e73 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 11 Feb 2026 16:21:22 +0800 Subject: [PATCH 047/175] chore: bump version to 0.1.80.1 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index 0aa59ad9..4f8bcf69 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.79.7 +0.1.80.1 From caaed775aacfe68f5c753585fbfb5b0570cddacf Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 11 Feb 2026 18:51:51 +0800 Subject: [PATCH 048/175] =?UTF-8?q?ui:=20=E6=A8=A1=E5=9E=8B=E9=99=90?= =?UTF-8?q?=E6=B5=81=E6=A0=87=E7=AD=BE=E6=AF=8F=E8=A1=8C=E6=9C=80=E5=A4=9A?= =?UTF-8?q?=E6=98=BE=E7=A4=BA3=E4=B8=AA=EF=BC=8C=E8=B6=85=E5=87=BA?= =?UTF-8?q?=E8=87=AA=E5=8A=A8=E6=8D=A2=E8=A1=8C?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- frontend/src/components/account/AccountStatusIndicator.vue | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/frontend/src/components/account/AccountStatusIndicator.vue b/frontend/src/components/account/AccountStatusIndicator.vue index 5fe96a1d..af32ea0c 100644 --- a/frontend/src/components/account/AccountStatusIndicator.vue +++ b/frontend/src/components/account/AccountStatusIndicator.vue @@ -77,7 +77,7 @@ - +
From 850e26776318f1c59cbca114999ac292d962a355 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 11 Feb 2026 20:49:31 +0800 Subject: [PATCH 049/175] chore: bump version to 0.1.81.1 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index 4f8bcf69..f6a03b1f 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.80.1 +0.1.81.1 From 807d0018ef72251c15d1a0e4234eab2f2612ff6a Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 11 Feb 2026 22:17:15 +0800 Subject: [PATCH 050/175] =?UTF-8?q?docs:=20=E8=A1=A5=E5=85=85=E8=B4=A6?= =?UTF-8?q?=E5=8F=B7=E6=9B=B4=E6=96=B0/=E6=89=B9=E9=87=8F=E6=9B=B4?= =?UTF-8?q?=E6=96=B0=20API=20=E6=96=87=E6=A1=A3=EF=BC=8C=E6=B7=BB=E5=8A=A0?= =?UTF-8?q?=20API=20=E6=93=8D=E4=BD=9C=E6=B5=81=E7=A8=8B=E8=A7=84=E8=8C=83?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 新增 1.11 更新单个账号(PUT)和 1.12 批量更新账号(POST)接口文档 - 添加 API 操作流程规范:遇到新需求先探索接口、更新文档、再执行操作 --- AGENTS.md | 77 ++++++++++++++++++++++++++++++++++++++++++++++++++++++- CLAUDE.md | 77 ++++++++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 152 insertions(+), 2 deletions(-) diff --git a/AGENTS.md b/AGENTS.md index 0202e94f..3fdd88c7 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -511,6 +511,18 @@ docker stats sub2api ## Admin API 接口文档 +### ⚠️ API 操作流程规范 + +当收到操作正式环境 Web 界面的新需求,但文档中未记录对应 API 接口时,**必须按以下流程执行**: + +1. **探索接口**:通过代码库搜索路由定义(`backend/internal/server/routes/`)、Handler(`backend/internal/handler/admin/`)和请求结构体,确定正确的 API 端点、请求方法、请求体格式 +2. **更新文档**:将新发现的接口补充到本文档的 Admin API 接口文档章节中,包含端点、参数说明和 curl 示例 +3. **执行操作**:根据最新文档中记录的接口完成用户需求 + +> **目的**:避免每次遇到相同需求都重复探索代码库,确保 API 文档持续完善,后续操作可直接查阅文档执行。 + +--- + ### 认证方式 所有 Admin API 通过 `x-api-key` 请求头传递 Admin API Key 认证。 @@ -681,7 +693,70 @@ GET /api/v1/admin/accounts/:id/usage curl -s "${BASE}/api/v1/admin/accounts/1/usage" -H "x-api-key: ${KEY}" ``` -#### 1.11 批量测试账号(脚本) +#### 1.11 更新单个账号 + +``` +PUT /api/v1/admin/accounts/:id +``` + +**请求体**(JSON,所有字段均为可选,仅传需要更新的字段): + +| 字段 | 类型 | 说明 | +|------|------|------| +| `name` | string | 账号名称 | +| `notes` | *string | 备注 | +| `type` | string | 类型:`oauth` / `setup-token` / `apikey` / `upstream` | +| `credentials` | object | 凭证信息 | +| `extra` | object | 额外配置 | +| `proxy_id` | *int64 | 代理 ID | +| `concurrency` | *int | 并发数 | +| `priority` | *int | 优先级(默认 50) | +| `rate_multiplier` | *float64 | 速率倍数 | +| `status` | string | 状态:`active` / `inactive` | +| `group_ids` | *[]int64 | 分组 ID 列表 | +| `expires_at` | *int64 | 过期时间戳 | +| `auto_pause_on_expired` | *bool | 过期后自动暂停 | + +> 使用指针类型(`*`)的字段可以区分"未提供"和"设置为零值"。 + +```bash +# 示例:更新账号优先级为 100 +curl -X PUT "${BASE}/api/v1/admin/accounts/1" \ + -H "x-api-key: ${KEY}" \ + -H "Content-Type: application/json" \ + -d '{"priority": 100}' +``` + +#### 1.12 批量更新账号 + +``` +POST /api/v1/admin/accounts/bulk-update +``` + +**请求体**(JSON): + +| 字段 | 类型 | 必填 | 说明 | +|------|------|------|------| +| `account_ids` | []int64 | **是** | 要更新的账号 ID 列表 | +| `priority` | *int | 否 | 优先级 | +| `concurrency` | *int | 否 | 并发数 | +| `rate_multiplier` | *float64 | 否 | 速率倍数 | +| `status` | string | 否 | 状态:`active` / `inactive` / `error` | +| `schedulable` | *bool | 否 | 是否可调度 | +| `group_ids` | *[]int64 | 否 | 分组 ID 列表 | +| `proxy_id` | *int64 | 否 | 代理 ID | +| `credentials` | object | 否 | 凭证信息(批量覆盖) | +| `extra` | object | 否 | 额外配置(批量覆盖) | + +```bash +# 示例:批量设置多个账号优先级为 100 +curl -X POST "${BASE}/api/v1/admin/accounts/bulk-update" \ + -H "x-api-key: ${KEY}" \ + -H "Content-Type: application/json" \ + -d '{"account_ids": [1, 2, 3], "priority": 100}' +``` + +#### 1.13 批量测试账号(脚本) 批量测试指定平台所有账号的指定模型连通性: diff --git a/CLAUDE.md b/CLAUDE.md index 737cdf19..4d96070e 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -552,6 +552,18 @@ docker stats sub2api ## Admin API 接口文档 +### ⚠️ API 操作流程规范 + +当收到操作正式环境 Web 界面的新需求,但文档中未记录对应 API 接口时,**必须按以下流程执行**: + +1. **探索接口**:通过代码库搜索路由定义(`backend/internal/server/routes/`)、Handler(`backend/internal/handler/admin/`)和请求结构体,确定正确的 API 端点、请求方法、请求体格式 +2. **更新文档**:将新发现的接口补充到本文档的 Admin API 接口文档章节中,包含端点、参数说明和 curl 示例 +3. **执行操作**:根据最新文档中记录的接口完成用户需求 + +> **目的**:避免每次遇到相同需求都重复探索代码库,确保 API 文档持续完善,后续操作可直接查阅文档执行。 + +--- + ### 认证方式 所有 Admin API 通过 `x-api-key` 请求头传递 Admin API Key 认证。 @@ -722,7 +734,70 @@ GET /api/v1/admin/accounts/:id/usage curl -s "${BASE}/api/v1/admin/accounts/1/usage" -H "x-api-key: ${KEY}" ``` -#### 1.11 批量测试账号(脚本) +#### 1.11 更新单个账号 + +``` +PUT /api/v1/admin/accounts/:id +``` + +**请求体**(JSON,所有字段均为可选,仅传需要更新的字段): + +| 字段 | 类型 | 说明 | +|------|------|------| +| `name` | string | 账号名称 | +| `notes` | *string | 备注 | +| `type` | string | 类型:`oauth` / `setup-token` / `apikey` / `upstream` | +| `credentials` | object | 凭证信息 | +| `extra` | object | 额外配置 | +| `proxy_id` | *int64 | 代理 ID | +| `concurrency` | *int | 并发数 | +| `priority` | *int | 优先级(默认 50) | +| `rate_multiplier` | *float64 | 速率倍数 | +| `status` | string | 状态:`active` / `inactive` | +| `group_ids` | *[]int64 | 分组 ID 列表 | +| `expires_at` | *int64 | 过期时间戳 | +| `auto_pause_on_expired` | *bool | 过期后自动暂停 | + +> 使用指针类型(`*`)的字段可以区分"未提供"和"设置为零值"。 + +```bash +# 示例:更新账号优先级为 100 +curl -X PUT "${BASE}/api/v1/admin/accounts/1" \ + -H "x-api-key: ${KEY}" \ + -H "Content-Type: application/json" \ + -d '{"priority": 100}' +``` + +#### 1.12 批量更新账号 + +``` +POST /api/v1/admin/accounts/bulk-update +``` + +**请求体**(JSON): + +| 字段 | 类型 | 必填 | 说明 | +|------|------|------|------| +| `account_ids` | []int64 | **是** | 要更新的账号 ID 列表 | +| `priority` | *int | 否 | 优先级 | +| `concurrency` | *int | 否 | 并发数 | +| `rate_multiplier` | *float64 | 否 | 速率倍数 | +| `status` | string | 否 | 状态:`active` / `inactive` / `error` | +| `schedulable` | *bool | 否 | 是否可调度 | +| `group_ids` | *[]int64 | 否 | 分组 ID 列表 | +| `proxy_id` | *int64 | 否 | 代理 ID | +| `credentials` | object | 否 | 凭证信息(批量覆盖) | +| `extra` | object | 否 | 额外配置(批量覆盖) | + +```bash +# 示例:批量设置多个账号优先级为 100 +curl -X POST "${BASE}/api/v1/admin/accounts/bulk-update" \ + -H "x-api-key: ${KEY}" \ + -H "Content-Type: application/json" \ + -d '{"account_ids": [1, 2, 3], "priority": 100}' +``` + +#### 1.13 批量测试账号(脚本) 批量测试指定平台所有账号的指定模型连通性: From a03c361b041bbb3c7f51f783130ab7e28dab284f Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 11 Feb 2026 22:31:07 +0800 Subject: [PATCH 051/175] feat: add gemini model mapping whitelist for apikey and bulk edit --- .../service/gateway_multiplatform_test.go | 79 +++++++++++++++++++ backend/internal/service/gateway_service.go | 4 - .../account/BulkEditAccountModal.vue | 52 +++++++----- .../components/account/CreateAccountModal.vue | 32 +------- .../components/account/EditAccountModal.vue | 32 +------- frontend/src/types/index.ts | 1 + 6 files changed, 116 insertions(+), 84 deletions(-) diff --git a/backend/internal/service/gateway_multiplatform_test.go b/backend/internal/service/gateway_multiplatform_test.go index b4b93ace..e257b1a5 100644 --- a/backend/internal/service/gateway_multiplatform_test.go +++ b/backend/internal/service/gateway_multiplatform_test.go @@ -890,6 +890,55 @@ func TestGatewayService_SelectAccountForModelWithPlatform_GeminiPreferOAuth(t *t require.Equal(t, int64(2), acc.ID) } +func TestGatewayService_SelectAccountForModelWithPlatform_GeminiAPIKeyModelMappingFilter(t *testing.T) { + ctx := context.Background() + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + { + ID: 1, + Platform: PlatformGemini, + Type: AccountTypeAPIKey, + Priority: 1, + Status: StatusActive, + Schedulable: true, + Credentials: map[string]any{"model_mapping": map[string]any{"gemini-2.5-pro": "gemini-2.5-pro"}}, + }, + { + ID: 2, + Platform: PlatformGemini, + Type: AccountTypeAPIKey, + Priority: 2, + Status: StatusActive, + Schedulable: true, + Credentials: map[string]any{"model_mapping": map[string]any{"gemini-2.5-flash": "gemini-2.5-flash"}}, + }, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + } + + acc, err := svc.selectAccountForModelWithPlatform(ctx, nil, "", "gemini-2.5-flash", nil, PlatformGemini) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(2), acc.ID, "应过滤不支持请求模型的 APIKey 账号") + + acc, err = svc.selectAccountForModelWithPlatform(ctx, nil, "", "gemini-3-pro-preview", nil, PlatformGemini) + require.Error(t, err) + require.Nil(t, acc) + require.Contains(t, err.Error(), "supporting model") +} + func TestGatewayService_SelectAccountForModelWithPlatform_StickyInGroup(t *testing.T) { ctx := context.Background() groupID := int64(50) @@ -1065,6 +1114,36 @@ func TestGatewayService_isModelSupportedByAccount(t *testing.T) { model: "claude-3-5-sonnet-20241022", expected: true, }, + { + name: "Gemini平台-无映射配置-支持所有模型", + account: &Account{Platform: PlatformGemini, Type: AccountTypeAPIKey}, + model: "gemini-2.5-flash", + expected: true, + }, + { + name: "Gemini平台-有映射配置-只支持配置的模型", + account: &Account{ + Platform: PlatformGemini, + Type: AccountTypeAPIKey, + Credentials: map[string]any{ + "model_mapping": map[string]any{"gemini-2.5-pro": "gemini-2.5-pro"}, + }, + }, + model: "gemini-2.5-flash", + expected: false, + }, + { + name: "Gemini平台-有映射配置-支持配置的模型", + account: &Account{ + Platform: PlatformGemini, + Type: AccountTypeAPIKey, + Credentials: map[string]any{ + "model_mapping": map[string]any{"gemini-2.5-pro": "gemini-2.5-pro"}, + }, + }, + model: "gemini-2.5-pro", + expected: true, + }, } for _, tt := range tests { diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 56af4610..3141f71d 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -2547,10 +2547,6 @@ func (s *GatewayService) isModelSupportedByAccount(account *Account, requestedMo if account.Platform == PlatformAnthropic && account.Type != AccountTypeAPIKey { requestedModel = claude.NormalizeModelID(requestedModel) } - // Gemini API Key 账户直接透传,由上游判断模型是否支持 - if account.Platform == PlatformGemini && account.Type == AccountTypeAPIKey { - return true - } // 其他平台使用账户的模型支持检查 return account.IsModelSupported(requestedModel) } diff --git a/frontend/src/components/account/BulkEditAccountModal.vue b/frontend/src/components/account/BulkEditAccountModal.vue index 912eabb3..879a255c 100644 --- a/frontend/src/components/account/BulkEditAccountModal.vue +++ b/frontend/src/components/account/BulkEditAccountModal.vue @@ -654,6 +654,7 @@ import Select from '@/components/common/Select.vue' import ProxySelector from '@/components/common/ProxySelector.vue' import GroupSelector from '@/components/common/GroupSelector.vue' import Icon from '@/components/icons/Icon.vue' +import { buildModelMappingObject as buildModelMappingPayload } from '@/composables/useModelWhitelist' interface Props { show: boolean @@ -705,7 +706,7 @@ const rateMultiplier = ref(1) const status = ref<'active' | 'inactive'>('active') const groupIds = ref([]) -// All models list (combined Anthropic + OpenAI) +// All models list (combined Anthropic + OpenAI + Gemini) const allModels = [ { value: 'claude-opus-4-6', label: 'Claude Opus 4.6' }, { value: 'claude-opus-4-5-20251101', label: 'Claude Opus 4.5' }, @@ -722,10 +723,15 @@ const allModels = [ { value: 'gpt-5.1-codex', label: 'GPT-5.1 Codex' }, { value: 'gpt-5.1-2025-11-13', label: 'GPT-5.1' }, { value: 'gpt-5.1-codex-mini', label: 'GPT-5.1 Codex Mini' }, - { value: 'gpt-5-2025-08-07', label: 'GPT-5' } + { value: 'gpt-5-2025-08-07', label: 'GPT-5' }, + { value: 'gemini-2.0-flash', label: 'Gemini 2.0 Flash' }, + { value: 'gemini-2.5-flash', label: 'Gemini 2.5 Flash' }, + { value: 'gemini-2.5-pro', label: 'Gemini 2.5 Pro' }, + { value: 'gemini-3-flash-preview', label: 'Gemini 3 Flash Preview' }, + { value: 'gemini-3-pro-preview', label: 'Gemini 3 Pro Preview' } ] -// Preset mappings (combined Anthropic + OpenAI) +// Preset mappings (combined Anthropic + OpenAI + Gemini) const presetMappings = [ { label: 'Sonnet 4', @@ -777,6 +783,24 @@ const presetMappings = [ from: 'gpt-5.1-codex-max', to: 'gpt-5.1-codex', color: 'bg-pink-100 text-pink-700 hover:bg-pink-200 dark:bg-pink-900/30 dark:text-pink-400' + }, + { + label: 'Gemini Flash 2.0', + from: 'gemini-2.0-flash', + to: 'gemini-2.0-flash', + color: 'bg-cyan-100 text-cyan-700 hover:bg-cyan-200 dark:bg-cyan-900/30 dark:text-cyan-400' + }, + { + label: 'Gemini 2.5 Flash', + from: 'gemini-2.5-flash', + to: 'gemini-2.5-flash', + color: 'bg-teal-100 text-teal-700 hover:bg-teal-200 dark:bg-teal-900/30 dark:text-teal-400' + }, + { + label: 'Gemini 2.5 Pro', + from: 'gemini-2.5-pro', + to: 'gemini-2.5-pro', + color: 'bg-sky-100 text-sky-700 hover:bg-sky-200 dark:bg-sky-900/30 dark:text-sky-400' } ] @@ -866,23 +890,11 @@ const removeErrorCode = (code: number) => { } const buildModelMappingObject = (): Record | null => { - const mapping: Record = {} - - if (modelRestrictionMode.value === 'whitelist') { - for (const model of allowedModels.value) { - mapping[model] = model - } - } else { - for (const m of modelMappings.value) { - const from = m.from.trim() - const to = m.to.trim() - if (from && to) { - mapping[from] = to - } - } - } - - return Object.keys(mapping).length > 0 ? mapping : null + return buildModelMappingPayload( + modelRestrictionMode.value, + allowedModels.value, + modelMappings.value + ) } const buildUpdatePayload = (): Record | null => { diff --git a/frontend/src/components/account/CreateAccountModal.vue b/frontend/src/components/account/CreateAccountModal.vue index af06abca..2d7d745c 100644 --- a/frontend/src/components/account/CreateAccountModal.vue +++ b/frontend/src/components/account/CreateAccountModal.vue @@ -862,8 +862,8 @@

{{ t('admin.accounts.gemini.tier.aiStudioHint') }}

- -
+ +
@@ -1135,34 +1135,6 @@
- -
-
-
- - - -
-

- {{ t('admin.accounts.gemini.modelPassthrough') }} -

-

- {{ t('admin.accounts.gemini.modelPassthroughDesc') }} -

-
-
-
-
diff --git a/frontend/src/components/account/EditAccountModal.vue b/frontend/src/components/account/EditAccountModal.vue index 60575f56..3395e82e 100644 --- a/frontend/src/components/account/EditAccountModal.vue +++ b/frontend/src/components/account/EditAccountModal.vue @@ -65,8 +65,8 @@

{{ t('admin.accounts.leaveEmptyToKeep') }}

- -
+ +
@@ -338,34 +338,6 @@
- -
-
-
- - - -
-

- {{ t('admin.accounts.gemini.modelPassthrough') }} -

-

- {{ t('admin.accounts.gemini.modelPassthroughDesc') }} -

-
-
-
-
diff --git a/frontend/src/types/index.ts b/frontend/src/types/index.ts index 189d8af1..a250820b 100644 --- a/frontend/src/types/index.ts +++ b/frontend/src/types/index.ts @@ -515,6 +515,7 @@ export interface ProxyAccountSummary { export interface GeminiCredentials { // API Key authentication api_key?: string + model_mapping?: Record // OAuth authentication access_token?: string From a747c63b8e2181c857d9c37cf8f309b66d5f38d4 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 11 Feb 2026 22:31:07 +0800 Subject: [PATCH 052/175] feat: add gemini model mapping whitelist for apikey and bulk edit --- .../service/gateway_multiplatform_test.go | 79 +++++++++++++++++++ backend/internal/service/gateway_service.go | 4 - .../account/BulkEditAccountModal.vue | 52 +++++++----- .../components/account/CreateAccountModal.vue | 32 +------- .../components/account/EditAccountModal.vue | 32 +------- frontend/src/types/index.ts | 1 + 6 files changed, 116 insertions(+), 84 deletions(-) diff --git a/backend/internal/service/gateway_multiplatform_test.go b/backend/internal/service/gateway_multiplatform_test.go index b4b93ace..e257b1a5 100644 --- a/backend/internal/service/gateway_multiplatform_test.go +++ b/backend/internal/service/gateway_multiplatform_test.go @@ -890,6 +890,55 @@ func TestGatewayService_SelectAccountForModelWithPlatform_GeminiPreferOAuth(t *t require.Equal(t, int64(2), acc.ID) } +func TestGatewayService_SelectAccountForModelWithPlatform_GeminiAPIKeyModelMappingFilter(t *testing.T) { + ctx := context.Background() + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + { + ID: 1, + Platform: PlatformGemini, + Type: AccountTypeAPIKey, + Priority: 1, + Status: StatusActive, + Schedulable: true, + Credentials: map[string]any{"model_mapping": map[string]any{"gemini-2.5-pro": "gemini-2.5-pro"}}, + }, + { + ID: 2, + Platform: PlatformGemini, + Type: AccountTypeAPIKey, + Priority: 2, + Status: StatusActive, + Schedulable: true, + Credentials: map[string]any{"model_mapping": map[string]any{"gemini-2.5-flash": "gemini-2.5-flash"}}, + }, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + } + + acc, err := svc.selectAccountForModelWithPlatform(ctx, nil, "", "gemini-2.5-flash", nil, PlatformGemini) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(2), acc.ID, "应过滤不支持请求模型的 APIKey 账号") + + acc, err = svc.selectAccountForModelWithPlatform(ctx, nil, "", "gemini-3-pro-preview", nil, PlatformGemini) + require.Error(t, err) + require.Nil(t, acc) + require.Contains(t, err.Error(), "supporting model") +} + func TestGatewayService_SelectAccountForModelWithPlatform_StickyInGroup(t *testing.T) { ctx := context.Background() groupID := int64(50) @@ -1065,6 +1114,36 @@ func TestGatewayService_isModelSupportedByAccount(t *testing.T) { model: "claude-3-5-sonnet-20241022", expected: true, }, + { + name: "Gemini平台-无映射配置-支持所有模型", + account: &Account{Platform: PlatformGemini, Type: AccountTypeAPIKey}, + model: "gemini-2.5-flash", + expected: true, + }, + { + name: "Gemini平台-有映射配置-只支持配置的模型", + account: &Account{ + Platform: PlatformGemini, + Type: AccountTypeAPIKey, + Credentials: map[string]any{ + "model_mapping": map[string]any{"gemini-2.5-pro": "gemini-2.5-pro"}, + }, + }, + model: "gemini-2.5-flash", + expected: false, + }, + { + name: "Gemini平台-有映射配置-支持配置的模型", + account: &Account{ + Platform: PlatformGemini, + Type: AccountTypeAPIKey, + Credentials: map[string]any{ + "model_mapping": map[string]any{"gemini-2.5-pro": "gemini-2.5-pro"}, + }, + }, + model: "gemini-2.5-pro", + expected: true, + }, } for _, tt := range tests { diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 56af4610..3141f71d 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -2547,10 +2547,6 @@ func (s *GatewayService) isModelSupportedByAccount(account *Account, requestedMo if account.Platform == PlatformAnthropic && account.Type != AccountTypeAPIKey { requestedModel = claude.NormalizeModelID(requestedModel) } - // Gemini API Key 账户直接透传,由上游判断模型是否支持 - if account.Platform == PlatformGemini && account.Type == AccountTypeAPIKey { - return true - } // 其他平台使用账户的模型支持检查 return account.IsModelSupported(requestedModel) } diff --git a/frontend/src/components/account/BulkEditAccountModal.vue b/frontend/src/components/account/BulkEditAccountModal.vue index 912eabb3..879a255c 100644 --- a/frontend/src/components/account/BulkEditAccountModal.vue +++ b/frontend/src/components/account/BulkEditAccountModal.vue @@ -654,6 +654,7 @@ import Select from '@/components/common/Select.vue' import ProxySelector from '@/components/common/ProxySelector.vue' import GroupSelector from '@/components/common/GroupSelector.vue' import Icon from '@/components/icons/Icon.vue' +import { buildModelMappingObject as buildModelMappingPayload } from '@/composables/useModelWhitelist' interface Props { show: boolean @@ -705,7 +706,7 @@ const rateMultiplier = ref(1) const status = ref<'active' | 'inactive'>('active') const groupIds = ref([]) -// All models list (combined Anthropic + OpenAI) +// All models list (combined Anthropic + OpenAI + Gemini) const allModels = [ { value: 'claude-opus-4-6', label: 'Claude Opus 4.6' }, { value: 'claude-opus-4-5-20251101', label: 'Claude Opus 4.5' }, @@ -722,10 +723,15 @@ const allModels = [ { value: 'gpt-5.1-codex', label: 'GPT-5.1 Codex' }, { value: 'gpt-5.1-2025-11-13', label: 'GPT-5.1' }, { value: 'gpt-5.1-codex-mini', label: 'GPT-5.1 Codex Mini' }, - { value: 'gpt-5-2025-08-07', label: 'GPT-5' } + { value: 'gpt-5-2025-08-07', label: 'GPT-5' }, + { value: 'gemini-2.0-flash', label: 'Gemini 2.0 Flash' }, + { value: 'gemini-2.5-flash', label: 'Gemini 2.5 Flash' }, + { value: 'gemini-2.5-pro', label: 'Gemini 2.5 Pro' }, + { value: 'gemini-3-flash-preview', label: 'Gemini 3 Flash Preview' }, + { value: 'gemini-3-pro-preview', label: 'Gemini 3 Pro Preview' } ] -// Preset mappings (combined Anthropic + OpenAI) +// Preset mappings (combined Anthropic + OpenAI + Gemini) const presetMappings = [ { label: 'Sonnet 4', @@ -777,6 +783,24 @@ const presetMappings = [ from: 'gpt-5.1-codex-max', to: 'gpt-5.1-codex', color: 'bg-pink-100 text-pink-700 hover:bg-pink-200 dark:bg-pink-900/30 dark:text-pink-400' + }, + { + label: 'Gemini Flash 2.0', + from: 'gemini-2.0-flash', + to: 'gemini-2.0-flash', + color: 'bg-cyan-100 text-cyan-700 hover:bg-cyan-200 dark:bg-cyan-900/30 dark:text-cyan-400' + }, + { + label: 'Gemini 2.5 Flash', + from: 'gemini-2.5-flash', + to: 'gemini-2.5-flash', + color: 'bg-teal-100 text-teal-700 hover:bg-teal-200 dark:bg-teal-900/30 dark:text-teal-400' + }, + { + label: 'Gemini 2.5 Pro', + from: 'gemini-2.5-pro', + to: 'gemini-2.5-pro', + color: 'bg-sky-100 text-sky-700 hover:bg-sky-200 dark:bg-sky-900/30 dark:text-sky-400' } ] @@ -866,23 +890,11 @@ const removeErrorCode = (code: number) => { } const buildModelMappingObject = (): Record | null => { - const mapping: Record = {} - - if (modelRestrictionMode.value === 'whitelist') { - for (const model of allowedModels.value) { - mapping[model] = model - } - } else { - for (const m of modelMappings.value) { - const from = m.from.trim() - const to = m.to.trim() - if (from && to) { - mapping[from] = to - } - } - } - - return Object.keys(mapping).length > 0 ? mapping : null + return buildModelMappingPayload( + modelRestrictionMode.value, + allowedModels.value, + modelMappings.value + ) } const buildUpdatePayload = (): Record | null => { diff --git a/frontend/src/components/account/CreateAccountModal.vue b/frontend/src/components/account/CreateAccountModal.vue index af06abca..2d7d745c 100644 --- a/frontend/src/components/account/CreateAccountModal.vue +++ b/frontend/src/components/account/CreateAccountModal.vue @@ -862,8 +862,8 @@

{{ t('admin.accounts.gemini.tier.aiStudioHint') }}

- -
+ +
@@ -1135,34 +1135,6 @@
- -
-
-
- - - -
-

- {{ t('admin.accounts.gemini.modelPassthrough') }} -

-

- {{ t('admin.accounts.gemini.modelPassthroughDesc') }} -

-
-
-
-
diff --git a/frontend/src/components/account/EditAccountModal.vue b/frontend/src/components/account/EditAccountModal.vue index 60575f56..3395e82e 100644 --- a/frontend/src/components/account/EditAccountModal.vue +++ b/frontend/src/components/account/EditAccountModal.vue @@ -65,8 +65,8 @@

{{ t('admin.accounts.leaveEmptyToKeep') }}

- -
+ +
@@ -338,34 +338,6 @@
- -
-
-
- - - -
-

- {{ t('admin.accounts.gemini.modelPassthrough') }} -

-

- {{ t('admin.accounts.gemini.modelPassthroughDesc') }} -

-
-
-
-
diff --git a/frontend/src/types/index.ts b/frontend/src/types/index.ts index 189d8af1..a250820b 100644 --- a/frontend/src/types/index.ts +++ b/frontend/src/types/index.ts @@ -515,6 +515,7 @@ export interface ProxyAccountSummary { export interface GeminiCredentials { // API Key authentication api_key?: string + model_mapping?: Record // OAuth authentication access_token?: string From 0c33d18a4db3b331ad932fda9ca7da69c68d27f8 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 11 Feb 2026 22:31:27 +0800 Subject: [PATCH 053/175] chore: bump version to 0.1.81.2 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index f6a03b1f..867fbb60 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.81.1 +0.1.81.2 From 7f03319646d4b693d881282d92504c9cd5302b48 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 11 Feb 2026 23:06:59 +0800 Subject: [PATCH 054/175] =?UTF-8?q?Revert=20"fix:=20=E5=B9=B6=E5=8F=91/?= =?UTF-8?q?=E6=8E=92=E9=98=9F=E9=9D=A2=E6=9D=BF=E6=94=AF=E6=8C=81=20platfo?= =?UTF-8?q?rm/group=20=E8=BF=87=E6=BB=A4"?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 86e600aa5248163518dc2b549939614910dfcda8. --- .../handler/admin/ops_realtime_handler.go | 17 +---- backend/internal/service/ops_concurrency.go | 70 +------------------ frontend/src/api/admin/ops.ts | 12 +--- .../ops/components/OpsConcurrencyCard.vue | 10 +-- 4 files changed, 6 insertions(+), 103 deletions(-) diff --git a/backend/internal/handler/admin/ops_realtime_handler.go b/backend/internal/handler/admin/ops_realtime_handler.go index 2d3cce4b..c175dcd0 100644 --- a/backend/internal/handler/admin/ops_realtime_handler.go +++ b/backend/internal/handler/admin/ops_realtime_handler.go @@ -65,10 +65,6 @@ func (h *OpsHandler) GetConcurrencyStats(c *gin.Context) { // GetUserConcurrencyStats returns real-time concurrency usage for all active users. // GET /api/v1/admin/ops/user-concurrency -// -// Query params: -// - platform: optional, filter users by allowed platform -// - group_id: optional, filter users by allowed group func (h *OpsHandler) GetUserConcurrencyStats(c *gin.Context) { if h.opsService == nil { response.Error(c, http.StatusServiceUnavailable, "Ops service not available") @@ -88,18 +84,7 @@ func (h *OpsHandler) GetUserConcurrencyStats(c *gin.Context) { return } - platformFilter := strings.TrimSpace(c.Query("platform")) - var groupID *int64 - if v := strings.TrimSpace(c.Query("group_id")); v != "" { - id, err := strconv.ParseInt(v, 10, 64) - if err != nil || id <= 0 { - response.BadRequest(c, "Invalid group_id") - return - } - groupID = &id - } - - users, collectedAt, err := h.opsService.GetUserConcurrencyStats(c.Request.Context(), platformFilter, groupID) + users, collectedAt, err := h.opsService.GetUserConcurrencyStats(c.Request.Context()) if err != nil { response.ErrorFrom(c, err) return diff --git a/backend/internal/service/ops_concurrency.go b/backend/internal/service/ops_concurrency.go index faac2d5b..f6541d08 100644 --- a/backend/internal/service/ops_concurrency.go +++ b/backend/internal/service/ops_concurrency.go @@ -344,16 +344,8 @@ func (s *OpsService) getUsersLoadMapBestEffort(ctx context.Context, users []User return out } -// GetUserConcurrencyStats returns real-time concurrency usage for active users. -// -// Optional filters: -// - platformFilter: only include users who have access to groups belonging to that platform -// - groupIDFilter: only include users who have access to that specific group -func (s *OpsService) GetUserConcurrencyStats( - ctx context.Context, - platformFilter string, - groupIDFilter *int64, -) (map[int64]*UserConcurrencyInfo, *time.Time, error) { +// GetUserConcurrencyStats returns real-time concurrency usage for all active users. +func (s *OpsService) GetUserConcurrencyStats(ctx context.Context) (map[int64]*UserConcurrencyInfo, *time.Time, error) { if err := s.RequireMonitoringEnabled(ctx); err != nil { return nil, nil, err } @@ -363,15 +355,6 @@ func (s *OpsService) GetUserConcurrencyStats( return nil, nil, err } - // Build a set of allowed group IDs when filtering is requested. - var allowedGroupIDs map[int64]struct{} - if platformFilter != "" || (groupIDFilter != nil && *groupIDFilter > 0) { - allowedGroupIDs, err = s.buildAllowedGroupIDsForFilter(ctx, platformFilter, groupIDFilter) - if err != nil { - return nil, nil, err - } - } - collectedAt := time.Now() loadMap := s.getUsersLoadMapBestEffort(ctx, users) @@ -382,12 +365,6 @@ func (s *OpsService) GetUserConcurrencyStats( continue } - // Apply group/platform filter: skip users whose AllowedGroups - // have no intersection with the matching group IDs. - if allowedGroupIDs != nil && !userMatchesGroupFilter(u.AllowedGroups, allowedGroupIDs) { - continue - } - load := loadMap[u.ID] currentInUse := int64(0) waiting := int64(0) @@ -417,46 +394,3 @@ func (s *OpsService) GetUserConcurrencyStats( return result, &collectedAt, nil } - -// buildAllowedGroupIDsForFilter returns the set of group IDs that match the given -// platform and/or group ID filter. It reuses listAllAccountsForOps (which already -// supports platform filtering at the DB level) to collect group IDs from accounts. -func (s *OpsService) buildAllowedGroupIDsForFilter(ctx context.Context, platformFilter string, groupIDFilter *int64) (map[int64]struct{}, error) { - // Fast path: only group ID filter, no platform filter needed. - if platformFilter == "" && groupIDFilter != nil && *groupIDFilter > 0 { - return map[int64]struct{}{*groupIDFilter: {}}, nil - } - - // Use the same account-based approach as GetConcurrencyStats to collect group IDs. - accounts, err := s.listAllAccountsForOps(ctx, platformFilter) - if err != nil { - return nil, err - } - - groupIDs := make(map[int64]struct{}) - for _, acc := range accounts { - for _, grp := range acc.Groups { - if grp == nil || grp.ID <= 0 { - continue - } - // If groupIDFilter is set, only include that specific group. - if groupIDFilter != nil && *groupIDFilter > 0 && grp.ID != *groupIDFilter { - continue - } - groupIDs[grp.ID] = struct{}{} - } - } - - return groupIDs, nil -} - -// userMatchesGroupFilter returns true if the user's AllowedGroups contains -// at least one group ID in the allowed set. -func userMatchesGroupFilter(userGroups []int64, allowedGroupIDs map[int64]struct{}) bool { - for _, gid := range userGroups { - if _, ok := allowedGroupIDs[gid]; ok { - return true - } - } - return false -} diff --git a/frontend/src/api/admin/ops.ts b/frontend/src/api/admin/ops.ts index 523fbd00..9f980a12 100644 --- a/frontend/src/api/admin/ops.ts +++ b/frontend/src/api/admin/ops.ts @@ -366,16 +366,8 @@ export async function getConcurrencyStats(platform?: string, groupId?: number | return data } -export async function getUserConcurrencyStats(platform?: string, groupId?: number | null): Promise { - const params: Record = {} - if (platform) { - params.platform = platform - } - if (typeof groupId === 'number' && groupId > 0) { - params.group_id = groupId - } - - const { data } = await apiClient.get('/admin/ops/user-concurrency', { params }) +export async function getUserConcurrencyStats(): Promise { + const { data } = await apiClient.get('/admin/ops/user-concurrency') return data } diff --git a/frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue b/frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue index 0956caa5..ca640ade 100644 --- a/frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue +++ b/frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue @@ -265,7 +265,7 @@ async function loadData() { try { if (showByUser.value) { // 用户视图模式只加载用户并发数据 - const userData = await opsAPI.getUserConcurrencyStats(props.platformFilter, props.groupIdFilter) + const userData = await opsAPI.getUserConcurrencyStats() userConcurrency.value = userData } else { // 常规模式加载账号/平台/分组数据 @@ -301,14 +301,6 @@ watch( } ) -// 过滤条件变化时重新加载数据 -watch( - [() => props.platformFilter, () => props.groupIdFilter], - () => { - loadData() - } -) - function getLoadBarClass(loadPct: number): string { if (loadPct >= 90) return 'bg-red-500 dark:bg-red-600' if (loadPct >= 70) return 'bg-orange-500 dark:bg-orange-600' From 51e903c34e092761ece3fa659bc303aec94ef6c3 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 11 Feb 2026 23:26:20 +0800 Subject: [PATCH 055/175] =?UTF-8?q?Revert=20"fix:=20=E5=B9=B6=E5=8F=91/?= =?UTF-8?q?=E6=8E=92=E9=98=9F=E9=9D=A2=E6=9D=BF=E6=94=AF=E6=8C=81=20platfo?= =?UTF-8?q?rm/group=20=E8=BF=87=E6=BB=A4"?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 86e600aa5248163518dc2b549939614910dfcda8. --- .../handler/admin/ops_realtime_handler.go | 17 +---- backend/internal/service/ops_concurrency.go | 70 +------------------ frontend/src/api/admin/ops.ts | 12 +--- .../ops/components/OpsConcurrencyCard.vue | 10 +-- 4 files changed, 6 insertions(+), 103 deletions(-) diff --git a/backend/internal/handler/admin/ops_realtime_handler.go b/backend/internal/handler/admin/ops_realtime_handler.go index 2d3cce4b..c175dcd0 100644 --- a/backend/internal/handler/admin/ops_realtime_handler.go +++ b/backend/internal/handler/admin/ops_realtime_handler.go @@ -65,10 +65,6 @@ func (h *OpsHandler) GetConcurrencyStats(c *gin.Context) { // GetUserConcurrencyStats returns real-time concurrency usage for all active users. // GET /api/v1/admin/ops/user-concurrency -// -// Query params: -// - platform: optional, filter users by allowed platform -// - group_id: optional, filter users by allowed group func (h *OpsHandler) GetUserConcurrencyStats(c *gin.Context) { if h.opsService == nil { response.Error(c, http.StatusServiceUnavailable, "Ops service not available") @@ -88,18 +84,7 @@ func (h *OpsHandler) GetUserConcurrencyStats(c *gin.Context) { return } - platformFilter := strings.TrimSpace(c.Query("platform")) - var groupID *int64 - if v := strings.TrimSpace(c.Query("group_id")); v != "" { - id, err := strconv.ParseInt(v, 10, 64) - if err != nil || id <= 0 { - response.BadRequest(c, "Invalid group_id") - return - } - groupID = &id - } - - users, collectedAt, err := h.opsService.GetUserConcurrencyStats(c.Request.Context(), platformFilter, groupID) + users, collectedAt, err := h.opsService.GetUserConcurrencyStats(c.Request.Context()) if err != nil { response.ErrorFrom(c, err) return diff --git a/backend/internal/service/ops_concurrency.go b/backend/internal/service/ops_concurrency.go index faac2d5b..f6541d08 100644 --- a/backend/internal/service/ops_concurrency.go +++ b/backend/internal/service/ops_concurrency.go @@ -344,16 +344,8 @@ func (s *OpsService) getUsersLoadMapBestEffort(ctx context.Context, users []User return out } -// GetUserConcurrencyStats returns real-time concurrency usage for active users. -// -// Optional filters: -// - platformFilter: only include users who have access to groups belonging to that platform -// - groupIDFilter: only include users who have access to that specific group -func (s *OpsService) GetUserConcurrencyStats( - ctx context.Context, - platformFilter string, - groupIDFilter *int64, -) (map[int64]*UserConcurrencyInfo, *time.Time, error) { +// GetUserConcurrencyStats returns real-time concurrency usage for all active users. +func (s *OpsService) GetUserConcurrencyStats(ctx context.Context) (map[int64]*UserConcurrencyInfo, *time.Time, error) { if err := s.RequireMonitoringEnabled(ctx); err != nil { return nil, nil, err } @@ -363,15 +355,6 @@ func (s *OpsService) GetUserConcurrencyStats( return nil, nil, err } - // Build a set of allowed group IDs when filtering is requested. - var allowedGroupIDs map[int64]struct{} - if platformFilter != "" || (groupIDFilter != nil && *groupIDFilter > 0) { - allowedGroupIDs, err = s.buildAllowedGroupIDsForFilter(ctx, platformFilter, groupIDFilter) - if err != nil { - return nil, nil, err - } - } - collectedAt := time.Now() loadMap := s.getUsersLoadMapBestEffort(ctx, users) @@ -382,12 +365,6 @@ func (s *OpsService) GetUserConcurrencyStats( continue } - // Apply group/platform filter: skip users whose AllowedGroups - // have no intersection with the matching group IDs. - if allowedGroupIDs != nil && !userMatchesGroupFilter(u.AllowedGroups, allowedGroupIDs) { - continue - } - load := loadMap[u.ID] currentInUse := int64(0) waiting := int64(0) @@ -417,46 +394,3 @@ func (s *OpsService) GetUserConcurrencyStats( return result, &collectedAt, nil } - -// buildAllowedGroupIDsForFilter returns the set of group IDs that match the given -// platform and/or group ID filter. It reuses listAllAccountsForOps (which already -// supports platform filtering at the DB level) to collect group IDs from accounts. -func (s *OpsService) buildAllowedGroupIDsForFilter(ctx context.Context, platformFilter string, groupIDFilter *int64) (map[int64]struct{}, error) { - // Fast path: only group ID filter, no platform filter needed. - if platformFilter == "" && groupIDFilter != nil && *groupIDFilter > 0 { - return map[int64]struct{}{*groupIDFilter: {}}, nil - } - - // Use the same account-based approach as GetConcurrencyStats to collect group IDs. - accounts, err := s.listAllAccountsForOps(ctx, platformFilter) - if err != nil { - return nil, err - } - - groupIDs := make(map[int64]struct{}) - for _, acc := range accounts { - for _, grp := range acc.Groups { - if grp == nil || grp.ID <= 0 { - continue - } - // If groupIDFilter is set, only include that specific group. - if groupIDFilter != nil && *groupIDFilter > 0 && grp.ID != *groupIDFilter { - continue - } - groupIDs[grp.ID] = struct{}{} - } - } - - return groupIDs, nil -} - -// userMatchesGroupFilter returns true if the user's AllowedGroups contains -// at least one group ID in the allowed set. -func userMatchesGroupFilter(userGroups []int64, allowedGroupIDs map[int64]struct{}) bool { - for _, gid := range userGroups { - if _, ok := allowedGroupIDs[gid]; ok { - return true - } - } - return false -} diff --git a/frontend/src/api/admin/ops.ts b/frontend/src/api/admin/ops.ts index 523fbd00..9f980a12 100644 --- a/frontend/src/api/admin/ops.ts +++ b/frontend/src/api/admin/ops.ts @@ -366,16 +366,8 @@ export async function getConcurrencyStats(platform?: string, groupId?: number | return data } -export async function getUserConcurrencyStats(platform?: string, groupId?: number | null): Promise { - const params: Record = {} - if (platform) { - params.platform = platform - } - if (typeof groupId === 'number' && groupId > 0) { - params.group_id = groupId - } - - const { data } = await apiClient.get('/admin/ops/user-concurrency', { params }) +export async function getUserConcurrencyStats(): Promise { + const { data } = await apiClient.get('/admin/ops/user-concurrency') return data } diff --git a/frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue b/frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue index 0956caa5..ca640ade 100644 --- a/frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue +++ b/frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue @@ -265,7 +265,7 @@ async function loadData() { try { if (showByUser.value) { // 用户视图模式只加载用户并发数据 - const userData = await opsAPI.getUserConcurrencyStats(props.platformFilter, props.groupIdFilter) + const userData = await opsAPI.getUserConcurrencyStats() userConcurrency.value = userData } else { // 常规模式加载账号/平台/分组数据 @@ -301,14 +301,6 @@ watch( } ) -// 过滤条件变化时重新加载数据 -watch( - [() => props.platformFilter, () => props.groupIdFilter], - () => { - loadData() - } -) - function getLoadBarClass(loadPct: number): string { if (loadPct >= 90) return 'bg-red-500 dark:bg-red-600' if (loadPct >= 70) return 'bg-orange-500 dark:bg-orange-600' From 9af4a55176d38d19519aa939fc5ad44d99382149 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 11 Feb 2026 23:35:26 +0800 Subject: [PATCH 056/175] =?UTF-8?q?fix:=20gofmt=20=E6=A0=BC=E5=BC=8F?= =?UTF-8?q?=E4=BF=AE=E5=A4=8D=20gateway=5Fcache=5Fintegration=5Ftest.go?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/repository/gateway_cache_integration_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/backend/internal/repository/gateway_cache_integration_test.go b/backend/internal/repository/gateway_cache_integration_test.go index 2fdaa3d1..0eebc33f 100644 --- a/backend/internal/repository/gateway_cache_integration_test.go +++ b/backend/internal/repository/gateway_cache_integration_test.go @@ -104,7 +104,6 @@ func (s *GatewayCacheSuite) TestGetSessionAccountID_CorruptedValue() { require.False(s.T(), errors.Is(err, redis.Nil), "expected parsing error, not redis.Nil") } - func TestGatewayCacheSuite(t *testing.T) { suite.Run(t, new(GatewayCacheSuite)) } From 3a9f1c579617f996be5096e1d4ab73701c315464 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 11 Feb 2026 23:37:37 +0800 Subject: [PATCH 057/175] chore: bump version to 0.1.81.3 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index 867fbb60..ad55bdc8 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.81.2 +0.1.81.3 From daf7bf3e8b4cbe42a1be2334241e52ba36324565 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 11 Feb 2026 23:50:06 +0800 Subject: [PATCH 058/175] =?UTF-8?q?docs:=20Admin=20API=20Key=20=E6=94=B9?= =?UTF-8?q?=E4=B8=BA=E4=BB=8E=20.env=20=E6=96=87=E4=BB=B6=E8=AF=BB?= =?UTF-8?q?=E5=8F=96=EF=BC=8C=E9=81=BF=E5=85=8D=E6=98=8E=E6=96=87=E4=BC=A0?= =?UTF-8?q?=E9=80=92?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- AGENTS.md | 4 ++-- CLAUDE.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/AGENTS.md b/AGENTS.md index 3fdd88c7..85592334 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -531,7 +531,7 @@ docker stats sub2api x-api-key: admin-xxx ``` -> **使用说明**:用户提供 admin token 后,直接将其作为 `x-api-key` 的值使用。Token 格式为 `admin-` + 64 位十六进制字符,在管理后台 `设置 > Admin API Key` 中生成。**请勿将实际 token 写入文档或代码中。** +> **使用说明**:Admin API Key 统一存放在项目根目录 `.env` 文件的 `ADMIN_API_KEY` 变量中(该文件已被 `.gitignore` 排除,不会提交到代码库)。操作前先从 `.env` 读取密钥;若密钥失效(返回 401),应提示用户提供新的密钥并更新到 `.env` 中。Token 格式为 `admin-` + 64 位十六进制字符,在管理后台 `设置 > Admin API Key` 中生成。**请勿将实际 token 写入文档或代码中。** ### 环境地址 @@ -541,7 +541,7 @@ x-api-key: admin-xxx | Beta | `http://<服务器IP>:8084` | 仅内网访问 | | OpenAI | `http://<服务器IP>:8083` | 仅内网访问 | -> 以下接口文档中,`${BASE}` 代表环境基础地址,`${KEY}` 代表用户提供的 admin token。 +> 以下接口文档中,`${BASE}` 代表环境基础地址,`${KEY}` 代表 `.env` 中的 `ADMIN_API_KEY`。操作前执行 `source .env` 或 `export KEY=$ADMIN_API_KEY` 加载。 --- diff --git a/CLAUDE.md b/CLAUDE.md index 4d96070e..e8e9c93f 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -572,7 +572,7 @@ docker stats sub2api x-api-key: admin-xxx ``` -> **使用说明**:用户提供 admin token 后,直接将其作为 `x-api-key` 的值使用。Token 格式为 `admin-` + 64 位十六进制字符,在管理后台 `设置 > Admin API Key` 中生成。**请勿将实际 token 写入文档或代码中。** +> **使用说明**:Admin API Key 统一存放在项目根目录 `.env` 文件的 `ADMIN_API_KEY` 变量中(该文件已被 `.gitignore` 排除,不会提交到代码库)。操作前先从 `.env` 读取密钥;若密钥失效(返回 401),应提示用户提供新的密钥并更新到 `.env` 中。Token 格式为 `admin-` + 64 位十六进制字符,在管理后台 `设置 > Admin API Key` 中生成。**请勿将实际 token 写入文档或代码中。** ### 环境地址 @@ -582,7 +582,7 @@ x-api-key: admin-xxx | Beta | `http://<服务器IP>:8084` | 仅内网访问 | | OpenAI | `http://<服务器IP>:8083` | 仅内网访问 | -> 以下接口文档中,`${BASE}` 代表环境基础地址,`${KEY}` 代表用户提供的 admin token。 +> 以下接口文档中,`${BASE}` 代表环境基础地址,`${KEY}` 代表 `.env` 中的 `ADMIN_API_KEY`。操作前执行 `source .env` 或 `export KEY=$ADMIN_API_KEY` 加载。 --- From 34936189d8ab5238e9bef369bc8fa2efe34d1790 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Thu, 12 Feb 2026 02:33:20 +0800 Subject: [PATCH 059/175] =?UTF-8?q?fix(antigravity):=20=E5=9B=BA=E5=AE=9A?= =?UTF-8?q?=E6=8C=89=E6=98=A0=E5=B0=84=E6=A8=A1=E5=9E=8B=E8=AE=A1=E8=B4=B9?= =?UTF-8?q?=E5=B9=B6=E8=A1=A5=E5=85=85=E5=9B=9E=E5=BD=92=E6=B5=8B=E8=AF=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 当账号配置了 model_mapping 时,确保计费使用映射后的实际模型, 而非用户请求的原始模型名,避免计费不准确。 --- .github/workflows/security-scan.yml | 2 +- .../service/antigravity_gateway_service.go | 12 +- .../antigravity_gateway_service_test.go | 138 ++++++++++++++++++ 3 files changed, 145 insertions(+), 7 deletions(-) diff --git a/.github/workflows/security-scan.yml b/.github/workflows/security-scan.yml index 05dd1d1a..d0852eac 100644 --- a/.github/workflows/security-scan.yml +++ b/.github/workflows/security-scan.yml @@ -32,7 +32,7 @@ jobs: working-directory: backend run: | go install github.com/securego/gosec/v2/cmd/gosec@latest - gosec -severity high -confidence high ./... + gosec -severity high -confidence high -exclude=G704 ./... frontend-security: runs-on: ubuntu-latest diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 97245082..522f7fa4 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -85,7 +85,6 @@ var ( ) const ( - antigravityBillingModelEnv = "GATEWAY_ANTIGRAVITY_BILL_WITH_MAPPED_MODEL" antigravityForwardBaseURLEnv = "GATEWAY_ANTIGRAVITY_FORWARD_BASE_URL" antigravityFallbackSecondsEnv = "GATEWAY_ANTIGRAVITY_FALLBACK_COOLDOWN_SECONDS" ) @@ -1311,6 +1310,7 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, // 应用 thinking 模式自动后缀:如果 thinking 开启且目标是 claude-sonnet-4-5,自动改为 thinking 版本 thinkingEnabled := claudeReq.Thinking != nil && (claudeReq.Thinking.Type == "enabled" || claudeReq.Thinking.Type == "adaptive") mappedModel = applyThinkingModelSuffix(mappedModel, thinkingEnabled) + billingModel := mappedModel // 获取 access_token if s.tokenProvider == nil { @@ -1624,7 +1624,7 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, return &ForwardResult{ RequestID: requestID, Usage: *usage, - Model: originalModel, // 使用原始模型用于计费和日志 + Model: billingModel, // 使用映射模型用于计费和日志 Stream: claudeReq.Stream, Duration: time.Since(startTime), FirstTokenMs: firstTokenMs, @@ -1978,6 +1978,7 @@ func (s *AntigravityGatewayService) ForwardGemini(ctx context.Context, c *gin.Co if mappedModel == "" { return nil, s.writeGoogleError(c, http.StatusForbidden, fmt.Sprintf("model %s not in whitelist", originalModel)) } + billingModel := mappedModel // 获取 access_token if s.tokenProvider == nil { @@ -2207,7 +2208,7 @@ handleSuccess: return &ForwardResult{ RequestID: requestID, Usage: *usage, - Model: originalModel, + Model: billingModel, Stream: stream, Duration: time.Since(startTime), FirstTokenMs: firstTokenMs, @@ -3883,7 +3884,6 @@ func (s *AntigravityGatewayService) ForwardUpstream(ctx context.Context, c *gin. return nil, fmt.Errorf("missing model") } originalModel := claudeReq.Model - billingModel := originalModel // 构建上游请求 URL upstreamURL := baseURL + "/v1/messages" @@ -3936,7 +3936,7 @@ func (s *AntigravityGatewayService) ForwardUpstream(ctx context.Context, c *gin. _, _ = c.Writer.Write(respBody) return &ForwardResult{ - Model: billingModel, + Model: originalModel, }, nil } @@ -3977,7 +3977,7 @@ func (s *AntigravityGatewayService) ForwardUpstream(ctx context.Context, c *gin. log.Printf("%s status=success duration_ms=%d", prefix, duration.Milliseconds()) return &ForwardResult{ - Model: billingModel, + Model: originalModel, Stream: claudeReq.Stream, Duration: duration, FirstTokenMs: firstTokenMs, diff --git a/backend/internal/service/antigravity_gateway_service_test.go b/backend/internal/service/antigravity_gateway_service_test.go index b312e5ca..95d8f41b 100644 --- a/backend/internal/service/antigravity_gateway_service_test.go +++ b/backend/internal/service/antigravity_gateway_service_test.go @@ -133,6 +133,36 @@ func (s *httpUpstreamStub) DoWithTLS(_ *http.Request, _ string, _ int64, _ int, return s.resp, s.err } +type antigravitySettingRepoStub struct{} + +func (s *antigravitySettingRepoStub) Get(ctx context.Context, key string) (*Setting, error) { + panic("unexpected Get call") +} + +func (s *antigravitySettingRepoStub) GetValue(ctx context.Context, key string) (string, error) { + return "", ErrSettingNotFound +} + +func (s *antigravitySettingRepoStub) Set(ctx context.Context, key, value string) error { + panic("unexpected Set call") +} + +func (s *antigravitySettingRepoStub) GetMultiple(ctx context.Context, keys []string) (map[string]string, error) { + panic("unexpected GetMultiple call") +} + +func (s *antigravitySettingRepoStub) SetMultiple(ctx context.Context, settings map[string]string) error { + panic("unexpected SetMultiple call") +} + +func (s *antigravitySettingRepoStub) GetAll(ctx context.Context) (map[string]string, error) { + panic("unexpected GetAll call") +} + +func (s *antigravitySettingRepoStub) Delete(ctx context.Context, key string) error { + panic("unexpected Delete call") +} + func TestAntigravityGatewayService_Forward_PromptTooLong(t *testing.T) { gin.SetMode(gin.TestMode) writer := httptest.NewRecorder() @@ -159,6 +189,7 @@ func TestAntigravityGatewayService_Forward_PromptTooLong(t *testing.T) { } svc := &AntigravityGatewayService{ + settingService: NewSettingService(&antigravitySettingRepoStub{}, &config.Config{Gateway: config.GatewayConfig{MaxLineSize: defaultMaxLineSize}}), tokenProvider: &AntigravityTokenProvider{}, httpUpstream: &httpUpstreamStub{resp: resp}, } @@ -417,6 +448,113 @@ func TestAntigravityGatewayService_ForwardGemini_StickySessionForceCacheBilling( require.True(t, failoverErr.ForceCacheBilling, "ForceCacheBilling should be true for sticky session switch") } +// TestAntigravityGatewayService_Forward_BillsWithMappedModel +// 验证:Antigravity Claude 转发返回的计费模型使用映射后的模型 +func TestAntigravityGatewayService_Forward_BillsWithMappedModel(t *testing.T) { + gin.SetMode(gin.TestMode) + writer := httptest.NewRecorder() + c, _ := gin.CreateTestContext(writer) + + body, err := json.Marshal(map[string]any{ + "model": "claude-sonnet-4-5", + "messages": []map[string]any{ + {"role": "user", "content": "hello"}, + }, + "max_tokens": 16, + "stream": true, + }) + require.NoError(t, err) + + req := httptest.NewRequest(http.MethodPost, "/v1/messages", bytes.NewReader(body)) + c.Request = req + + upstreamBody := []byte("data: {\"response\":{\"candidates\":[{\"content\":{\"parts\":[{\"text\":\"ok\"}]},\"finishReason\":\"STOP\"}],\"usageMetadata\":{\"promptTokenCount\":8,\"candidatesTokenCount\":3}}}\n\n") + resp := &http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"X-Request-Id": []string{"req-bill-1"}}, + Body: io.NopCloser(bytes.NewReader(upstreamBody)), + } + + svc := &AntigravityGatewayService{ + settingService: NewSettingService(&antigravitySettingRepoStub{}, &config.Config{Gateway: config.GatewayConfig{MaxLineSize: defaultMaxLineSize}}), + tokenProvider: &AntigravityTokenProvider{}, + httpUpstream: &httpUpstreamStub{resp: resp}, + } + + const mappedModel = "gemini-3-pro-high" + account := &Account{ + ID: 5, + Name: "acc-forward-billing", + Platform: PlatformAntigravity, + Type: AccountTypeOAuth, + Status: StatusActive, + Concurrency: 1, + Credentials: map[string]any{ + "access_token": "token", + "model_mapping": map[string]any{ + "claude-sonnet-4-5": mappedModel, + }, + }, + } + + result, err := svc.Forward(context.Background(), c, account, body, false) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, mappedModel, result.Model) +} + +// TestAntigravityGatewayService_ForwardGemini_BillsWithMappedModel +// 验证:Antigravity Gemini 转发返回的计费模型使用映射后的模型 +func TestAntigravityGatewayService_ForwardGemini_BillsWithMappedModel(t *testing.T) { + gin.SetMode(gin.TestMode) + writer := httptest.NewRecorder() + c, _ := gin.CreateTestContext(writer) + + body, err := json.Marshal(map[string]any{ + "contents": []map[string]any{ + {"role": "user", "parts": []map[string]any{{"text": "hello"}}}, + }, + }) + require.NoError(t, err) + + req := httptest.NewRequest(http.MethodPost, "/v1beta/models/gemini-2.5-flash:generateContent", bytes.NewReader(body)) + c.Request = req + + upstreamBody := []byte("data: {\"response\":{\"candidates\":[{\"content\":{\"parts\":[{\"text\":\"ok\"}]},\"finishReason\":\"STOP\"}],\"usageMetadata\":{\"promptTokenCount\":8,\"candidatesTokenCount\":3}}}\n\n") + resp := &http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"X-Request-Id": []string{"req-bill-2"}}, + Body: io.NopCloser(bytes.NewReader(upstreamBody)), + } + + svc := &AntigravityGatewayService{ + settingService: NewSettingService(&antigravitySettingRepoStub{}, &config.Config{Gateway: config.GatewayConfig{MaxLineSize: defaultMaxLineSize}}), + tokenProvider: &AntigravityTokenProvider{}, + httpUpstream: &httpUpstreamStub{resp: resp}, + } + + const mappedModel = "gemini-3-pro-high" + account := &Account{ + ID: 6, + Name: "acc-gemini-billing", + Platform: PlatformAntigravity, + Type: AccountTypeOAuth, + Status: StatusActive, + Concurrency: 1, + Credentials: map[string]any{ + "access_token": "token", + "model_mapping": map[string]any{ + "gemini-2.5-flash": mappedModel, + }, + }, + } + + result, err := svc.ForwardGemini(context.Background(), c, account, "gemini-2.5-flash", "generateContent", true, body, false) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, mappedModel, result.Model) +} + // --- 流式 happy path 测试 --- // TestStreamUpstreamResponse_NormalComplete From 1abc688cad517ed697a5dbb113c6a2b6b7c9e8dd Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Thu, 12 Feb 2026 02:33:33 +0800 Subject: [PATCH 060/175] chore: bump version to 0.1.81.4 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index ad55bdc8..954dcb0b 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.81.3 +0.1.81.4 From 24dcba1d728e37f2afd9f09c2155d8665518da58 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Thu, 12 Feb 2026 18:56:31 +0800 Subject: [PATCH 061/175] chore: gofmt antigravity gateway tests --- .../service/antigravity_gateway_service_test.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/backend/internal/service/antigravity_gateway_service_test.go b/backend/internal/service/antigravity_gateway_service_test.go index 95d8f41b..af099ac3 100644 --- a/backend/internal/service/antigravity_gateway_service_test.go +++ b/backend/internal/service/antigravity_gateway_service_test.go @@ -190,8 +190,8 @@ func TestAntigravityGatewayService_Forward_PromptTooLong(t *testing.T) { svc := &AntigravityGatewayService{ settingService: NewSettingService(&antigravitySettingRepoStub{}, &config.Config{Gateway: config.GatewayConfig{MaxLineSize: defaultMaxLineSize}}), - tokenProvider: &AntigravityTokenProvider{}, - httpUpstream: &httpUpstreamStub{resp: resp}, + tokenProvider: &AntigravityTokenProvider{}, + httpUpstream: &httpUpstreamStub{resp: resp}, } account := &Account{ @@ -477,8 +477,8 @@ func TestAntigravityGatewayService_Forward_BillsWithMappedModel(t *testing.T) { svc := &AntigravityGatewayService{ settingService: NewSettingService(&antigravitySettingRepoStub{}, &config.Config{Gateway: config.GatewayConfig{MaxLineSize: defaultMaxLineSize}}), - tokenProvider: &AntigravityTokenProvider{}, - httpUpstream: &httpUpstreamStub{resp: resp}, + tokenProvider: &AntigravityTokenProvider{}, + httpUpstream: &httpUpstreamStub{resp: resp}, } const mappedModel = "gemini-3-pro-high" @@ -529,8 +529,8 @@ func TestAntigravityGatewayService_ForwardGemini_BillsWithMappedModel(t *testing svc := &AntigravityGatewayService{ settingService: NewSettingService(&antigravitySettingRepoStub{}, &config.Config{Gateway: config.GatewayConfig{MaxLineSize: defaultMaxLineSize}}), - tokenProvider: &AntigravityTokenProvider{}, - httpUpstream: &httpUpstreamStub{resp: resp}, + tokenProvider: &AntigravityTokenProvider{}, + httpUpstream: &httpUpstreamStub{resp: resp}, } const mappedModel = "gemini-3-pro-high" From 5e518f5fbd51cb69e2702876bd009defd72aa6b4 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Thu, 12 Feb 2026 18:56:55 +0800 Subject: [PATCH 062/175] feat: allow antigravity warmup intercept toggle - Show warmup-intercept toggle for antigravity accounts in admin UI\n- Add unit tests verifying antigravity accounts are intercepted on /v1/messages --- ...eway_handler_warmup_intercept_unit_test.go | 340 ++++++++++++++++++ .../components/account/CreateAccountModal.vue | 8 +- .../components/account/EditAccountModal.vue | 4 +- 3 files changed, 346 insertions(+), 6 deletions(-) create mode 100644 backend/internal/handler/gateway_handler_warmup_intercept_unit_test.go diff --git a/backend/internal/handler/gateway_handler_warmup_intercept_unit_test.go b/backend/internal/handler/gateway_handler_warmup_intercept_unit_test.go new file mode 100644 index 00000000..15d85949 --- /dev/null +++ b/backend/internal/handler/gateway_handler_warmup_intercept_unit_test.go @@ -0,0 +1,340 @@ +//go:build unit + +package handler + +import ( + "bytes" + "context" + "encoding/json" + "net/http/httptest" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + middleware "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/require" +) + +// 目标:严格验证“antigravity 账号通过 /v1/messages 提供 Claude 服务时”, +// 当账号 credentials.intercept_warmup_requests=true 且请求为 Warmup 时, +// 后端会在转发上游前直接拦截并返回 mock 响应(不依赖上游)。 + +type fakeSchedulerCache struct { + accounts []*service.Account +} + +func (f *fakeSchedulerCache) GetSnapshot(_ context.Context, _ service.SchedulerBucket) ([]*service.Account, bool, error) { + return f.accounts, true, nil +} +func (f *fakeSchedulerCache) SetSnapshot(_ context.Context, _ service.SchedulerBucket, _ []service.Account) error { + return nil +} +func (f *fakeSchedulerCache) GetAccount(_ context.Context, _ int64) (*service.Account, error) { + return nil, nil +} +func (f *fakeSchedulerCache) SetAccount(_ context.Context, _ *service.Account) error { return nil } +func (f *fakeSchedulerCache) DeleteAccount(_ context.Context, _ int64) error { return nil } +func (f *fakeSchedulerCache) UpdateLastUsed(_ context.Context, _ map[int64]time.Time) error { + return nil +} +func (f *fakeSchedulerCache) TryLockBucket(_ context.Context, _ service.SchedulerBucket, _ time.Duration) (bool, error) { + return true, nil +} +func (f *fakeSchedulerCache) ListBuckets(_ context.Context) ([]service.SchedulerBucket, error) { + return nil, nil +} +func (f *fakeSchedulerCache) GetOutboxWatermark(_ context.Context) (int64, error) { return 0, nil } +func (f *fakeSchedulerCache) SetOutboxWatermark(_ context.Context, _ int64) error { return nil } + +type fakeGroupRepo struct { + group *service.Group +} + +func (f *fakeGroupRepo) Create(context.Context, *service.Group) error { return nil } +func (f *fakeGroupRepo) GetByID(context.Context, int64) (*service.Group, error) { + return f.group, nil +} +func (f *fakeGroupRepo) GetByIDLite(context.Context, int64) (*service.Group, error) { + return f.group, nil +} +func (f *fakeGroupRepo) Update(context.Context, *service.Group) error { return nil } +func (f *fakeGroupRepo) Delete(context.Context, int64) error { return nil } +func (f *fakeGroupRepo) DeleteCascade(context.Context, int64) ([]int64, error) { return nil, nil } +func (f *fakeGroupRepo) List(context.Context, pagination.PaginationParams) ([]service.Group, *pagination.PaginationResult, error) { + return nil, nil, nil +} +func (f *fakeGroupRepo) ListWithFilters(context.Context, pagination.PaginationParams, string, string, string, *bool) ([]service.Group, *pagination.PaginationResult, error) { + return nil, nil, nil +} +func (f *fakeGroupRepo) ListActive(context.Context) ([]service.Group, error) { return nil, nil } +func (f *fakeGroupRepo) ListActiveByPlatform(context.Context, string) ([]service.Group, error) { + return nil, nil +} +func (f *fakeGroupRepo) ExistsByName(context.Context, string) (bool, error) { return false, nil } +func (f *fakeGroupRepo) GetAccountCount(context.Context, int64) (int64, error) { return 0, nil } +func (f *fakeGroupRepo) DeleteAccountGroupsByGroupID(context.Context, int64) (int64, error) { + return 0, nil +} +func (f *fakeGroupRepo) GetAccountIDsByGroupIDs(context.Context, []int64) ([]int64, error) { + return nil, nil +} +func (f *fakeGroupRepo) BindAccountsToGroup(context.Context, int64, []int64) error { return nil } +func (f *fakeGroupRepo) UpdateSortOrders(context.Context, []service.GroupSortOrderUpdate) error { + return nil +} + +type fakeConcurrencyCache struct{} + +func (f *fakeConcurrencyCache) AcquireAccountSlot(context.Context, int64, int, string) (bool, error) { + return true, nil +} +func (f *fakeConcurrencyCache) ReleaseAccountSlot(context.Context, int64, string) error { return nil } +func (f *fakeConcurrencyCache) GetAccountConcurrency(context.Context, int64) (int, error) { + return 0, nil +} +func (f *fakeConcurrencyCache) IncrementAccountWaitCount(context.Context, int64, int) (bool, error) { + return true, nil +} +func (f *fakeConcurrencyCache) DecrementAccountWaitCount(context.Context, int64) error { return nil } +func (f *fakeConcurrencyCache) GetAccountWaitingCount(context.Context, int64) (int, error) { + return 0, nil +} +func (f *fakeConcurrencyCache) AcquireUserSlot(context.Context, int64, int, string) (bool, error) { + return true, nil +} +func (f *fakeConcurrencyCache) ReleaseUserSlot(context.Context, int64, string) error { return nil } +func (f *fakeConcurrencyCache) GetUserConcurrency(context.Context, int64) (int, error) { return 0, nil } +func (f *fakeConcurrencyCache) IncrementWaitCount(context.Context, int64, int) (bool, error) { + return true, nil +} +func (f *fakeConcurrencyCache) DecrementWaitCount(context.Context, int64) error { return nil } +func (f *fakeConcurrencyCache) GetAccountsLoadBatch(context.Context, []service.AccountWithConcurrency) (map[int64]*service.AccountLoadInfo, error) { + return map[int64]*service.AccountLoadInfo{}, nil +} +func (f *fakeConcurrencyCache) GetUsersLoadBatch(context.Context, []service.UserWithConcurrency) (map[int64]*service.UserLoadInfo, error) { + return map[int64]*service.UserLoadInfo{}, nil +} +func (f *fakeConcurrencyCache) CleanupExpiredAccountSlots(context.Context, int64) error { return nil } + +func newTestGatewayHandler(t *testing.T, group *service.Group, accounts []*service.Account) (*GatewayHandler, func()) { + t.Helper() + + schedulerCache := &fakeSchedulerCache{accounts: accounts} + schedulerSnapshot := service.NewSchedulerSnapshotService(schedulerCache, nil, nil, nil, nil) + + gwSvc := service.NewGatewayService( + nil, // accountRepo (not used: scheduler snapshot hit) + &fakeGroupRepo{group: group}, + nil, // usageLogRepo + nil, // userRepo + nil, // userSubRepo + nil, // userGroupRateRepo + nil, // cache (disable sticky) + nil, // cfg + schedulerSnapshot, + nil, // concurrencyService (disable load-aware; tryAcquire always acquired) + nil, // billingService + nil, // rateLimitService + nil, // billingCacheService + nil, // identityService + nil, // httpUpstream + nil, // deferredService + nil, // claudeTokenProvider + nil, // sessionLimitCache + nil, // digestStore + ) + + // RunModeSimple:跳过计费检查,避免引入 repo/cache 依赖。 + cfg := &config.Config{RunMode: config.RunModeSimple} + billingCacheSvc := service.NewBillingCacheService(nil, nil, nil, cfg) + + concurrencySvc := service.NewConcurrencyService(&fakeConcurrencyCache{}) + concurrencyHelper := NewConcurrencyHelper(concurrencySvc, SSEPingFormatClaude, 0) + + h := &GatewayHandler{ + gatewayService: gwSvc, + billingCacheService: billingCacheSvc, + concurrencyHelper: concurrencyHelper, + // 这些字段对本测试不敏感,保持较小即可 + maxAccountSwitches: 1, + maxAccountSwitchesGemini: 1, + } + + cleanup := func() { + billingCacheSvc.Stop() + } + return h, cleanup +} + +func TestGatewayHandlerMessages_InterceptWarmup_AntigravityAccount_MixedSchedulingV1(t *testing.T) { + gin.SetMode(gin.TestMode) + + groupID := int64(2001) + accountID := int64(1001) + + group := &service.Group{ + ID: groupID, + Hydrated: true, + Platform: service.PlatformAnthropic, // /v1/messages(Claude兼容)入口 + Status: service.StatusActive, + } + + account := &service.Account{ + ID: accountID, + Name: "ag-1", + Platform: service.PlatformAntigravity, + Type: service.AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "tok_xxx", + "intercept_warmup_requests": true, + }, + Extra: map[string]any{ + "mixed_scheduling": true, // 关键:允许被 anthropic 分组混合调度选中 + }, + Concurrency: 1, + Priority: 1, + Status: service.StatusActive, + Schedulable: true, + AccountGroups: []service.AccountGroup{{AccountID: accountID, GroupID: groupID}}, + } + + h, cleanup := newTestGatewayHandler(t, group, []*service.Account{account}) + defer cleanup() + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + + body := []byte(`{ + "model": "claude-sonnet-4-5", + "max_tokens": 256, + "messages": [{"role":"user","content":[{"type":"text","text":"Warmup"}]}] + }`) + req := httptest.NewRequest("POST", "/v1/messages", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + req = req.WithContext(context.WithValue(req.Context(), ctxkey.Group, group)) + c.Request = req + + apiKey := &service.APIKey{ + ID: 3001, + UserID: 4001, + GroupID: &groupID, + Status: service.StatusActive, + User: &service.User{ + ID: 4001, + Concurrency: 10, + Balance: 100, + }, + Group: group, + } + + c.Set(string(middleware.ContextKeyAPIKey), apiKey) + c.Set(string(middleware.ContextKeyUser), middleware.AuthSubject{UserID: apiKey.UserID, Concurrency: 10}) + + h.Messages(c) + + require.Equal(t, 200, rec.Code) + + // 断言:确实选中了 antigravity 账号(不是纯函数测试,而是从 Handler 里验证调度结果) + selected, ok := c.Get(opsAccountIDKey) + require.True(t, ok) + require.Equal(t, accountID, selected) + + var resp map[string]any + require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp)) + require.Equal(t, "msg_mock_warmup", resp["id"]) + require.Equal(t, "claude-sonnet-4-5", resp["model"]) + + content, ok := resp["content"].([]any) + require.True(t, ok) + require.Len(t, content, 1) + first, ok := content[0].(map[string]any) + require.True(t, ok) + require.Equal(t, "New Conversation", first["text"]) +} + +func TestGatewayHandlerMessages_InterceptWarmup_AntigravityAccount_ForcePlatform(t *testing.T) { + gin.SetMode(gin.TestMode) + + groupID := int64(2002) + accountID := int64(1002) + + group := &service.Group{ + ID: groupID, + Hydrated: true, + Platform: service.PlatformAntigravity, + Status: service.StatusActive, + } + + account := &service.Account{ + ID: accountID, + Name: "ag-2", + Platform: service.PlatformAntigravity, + Type: service.AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "tok_xxx", + "intercept_warmup_requests": true, + }, + Concurrency: 1, + Priority: 1, + Status: service.StatusActive, + Schedulable: true, + AccountGroups: []service.AccountGroup{{AccountID: accountID, GroupID: groupID}}, + } + + h, cleanup := newTestGatewayHandler(t, group, []*service.Account{account}) + defer cleanup() + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + + body := []byte(`{ + "model": "claude-sonnet-4-5", + "max_tokens": 256, + "messages": [{"role":"user","content":[{"type":"text","text":"Warmup"}]}] + }`) + req := httptest.NewRequest("POST", "/antigravity/v1/messages", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + + // 模拟 routes/gateway.go 里的 ForcePlatform 中间件效果: + // - 写入 request.Context(Service读取) + // - 写入 gin.Context(Handler快速读取) + ctx := context.WithValue(req.Context(), ctxkey.Group, group) + ctx = context.WithValue(ctx, ctxkey.ForcePlatform, service.PlatformAntigravity) + req = req.WithContext(ctx) + c.Request = req + c.Set(string(middleware.ContextKeyForcePlatform), service.PlatformAntigravity) + + apiKey := &service.APIKey{ + ID: 3002, + UserID: 4002, + GroupID: &groupID, + Status: service.StatusActive, + User: &service.User{ + ID: 4002, + Concurrency: 10, + Balance: 100, + }, + Group: group, + } + + c.Set(string(middleware.ContextKeyAPIKey), apiKey) + c.Set(string(middleware.ContextKeyUser), middleware.AuthSubject{UserID: apiKey.UserID, Concurrency: 10}) + + h.Messages(c) + + require.Equal(t, 200, rec.Code) + + selected, ok := c.Get(opsAccountIDKey) + require.True(t, ok) + require.Equal(t, accountID, selected) + + var resp map[string]any + require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp)) + require.Equal(t, "msg_mock_warmup", resp["id"]) + require.Equal(t, "claude-sonnet-4-5", resp["model"]) +} diff --git a/frontend/src/components/account/CreateAccountModal.vue b/frontend/src/components/account/CreateAccountModal.vue index 2d7d745c..584db0c4 100644 --- a/frontend/src/components/account/CreateAccountModal.vue +++ b/frontend/src/components/account/CreateAccountModal.vue @@ -1285,9 +1285,9 @@ - +
@@ -2303,8 +2303,8 @@ watch( antigravityModelMappings.value = [] antigravityModelRestrictionMode.value = 'mapping' } - // Reset Anthropic-specific settings when switching to other platforms - if (newPlatform !== 'anthropic') { + // Reset Anthropic/Antigravity-specific settings when switching to other platforms + if (newPlatform !== 'anthropic' && newPlatform !== 'antigravity') { interceptWarmupRequests.value = false } // Reset OAuth states diff --git a/frontend/src/components/account/EditAccountModal.vue b/frontend/src/components/account/EditAccountModal.vue index 3395e82e..c5566c5d 100644 --- a/frontend/src/components/account/EditAccountModal.vue +++ b/frontend/src/components/account/EditAccountModal.vue @@ -602,9 +602,9 @@
- +
From 320ca28f90ac4079d729137a3629f8f56073f950 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Thu, 12 Feb 2026 19:30:44 +0800 Subject: [PATCH 063/175] fix: antigravity 429 fallback uses final model key --- .../service/antigravity_gateway_service.go | 11 ++++++++++- .../service/antigravity_rate_limit_test.go | 16 ++++++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 522f7fa4..d07f96d8 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -2653,7 +2653,16 @@ func (s *AntigravityGatewayService) handleUpstreamError( defaultDur := s.getDefaultRateLimitDuration() // 尝试解析模型 key 并设置模型级限流 - modelKey := resolveAntigravityModelKey(requestedModel) + // + // 注意:requestedModel 可能是“映射前”的请求模型名(例如 claude-opus-4-6), + // 调度与限流判定使用的是 Antigravity 最终模型名(包含映射与 thinking 后缀)。 + // 因此这里必须写入最终模型 key,确保后续调度能正确避开已限流模型。 + modelKey := resolveFinalAntigravityModelKey(ctx, account, requestedModel) + if strings.TrimSpace(modelKey) == "" { + // 极少数情况下无法映射(理论上不应发生:能转发成功说明映射已通过), + // 保持旧行为作为兜底,避免完全丢失模型级限流记录。 + modelKey = resolveAntigravityModelKey(requestedModel) + } if modelKey != "" { ra := s.resolveResetTime(resetAt, defaultDur) if err := s.accountRepo.SetModelRateLimit(ctx, account.ID, modelKey, ra); err != nil { diff --git a/backend/internal/service/antigravity_rate_limit_test.go b/backend/internal/service/antigravity_rate_limit_test.go index 0befa7d9..31a674d7 100644 --- a/backend/internal/service/antigravity_rate_limit_test.go +++ b/backend/internal/service/antigravity_rate_limit_test.go @@ -191,6 +191,22 @@ func TestHandleUpstreamError_429_NonModelRateLimit(t *testing.T) { require.Equal(t, "claude-sonnet-4-5", repo.modelRateLimitCalls[0].modelKey) } +func TestHandleUpstreamError_429_NonModelRateLimit_UsesMappedModelKey(t *testing.T) { + repo := &stubAntigravityAccountRepo{} + svc := &AntigravityGatewayService{accountRepo: repo} + account := &Account{ID: 20, Name: "acc-20", Platform: PlatformAntigravity} + + // 429 + 普通限流响应(无 RATE_LIMIT_EXCEEDED reason)→ 走模型级限流兜底 + // 场景:requestedModel 会被默认映射到 Antigravity 最终模型(例如 claude-opus-4-6 -> claude-opus-4-6-thinking) + body := buildGeminiRateLimitBody("5s") + + result := svc.handleUpstreamError(context.Background(), "[test]", account, http.StatusTooManyRequests, http.Header{}, body, "claude-opus-4-6", 0, "", false) + + require.Nil(t, result) + require.Len(t, repo.modelRateLimitCalls, 1) + require.Equal(t, "claude-opus-4-6-thinking", repo.modelRateLimitCalls[0].modelKey) +} + // TestHandleUpstreamError_503_ModelCapacityExhausted 测试 503 模型容量不足场景 // MODEL_CAPACITY_EXHAUSTED 时应等待重试,不切换账号 func TestHandleUpstreamError_503_ModelCapacityExhausted(t *testing.T) { From 428ee065d365482c1ff1cf93cb589f4322bf0362 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Thu, 12 Feb 2026 20:09:57 +0800 Subject: [PATCH 064/175] chore: bump version to 0.1.81.5 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index 954dcb0b..b6f1aa76 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.81.4 +0.1.81.5 From ae770a625b076ee07d6eeaf4848d66f793c0184c Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Thu, 12 Feb 2026 23:49:41 +0800 Subject: [PATCH 065/175] fix(ui): mixed channel confirm for upstream create --- .../components/account/CreateAccountModal.vue | 25 +++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/frontend/src/components/account/CreateAccountModal.vue b/frontend/src/components/account/CreateAccountModal.vue index 584db0c4..6ed606f4 100644 --- a/frontend/src/components/account/CreateAccountModal.vue +++ b/frontend/src/components/account/CreateAccountModal.vue @@ -2795,7 +2795,7 @@ const createAccountAndFinish = async ( if (!applyTempUnschedConfig(credentials)) { return } - await adminAPI.accounts.create({ + const payload: any = { name: form.name, notes: form.notes, platform, @@ -2809,7 +2809,28 @@ const createAccountAndFinish = async ( group_ids: form.group_ids, expires_at: form.expires_at, auto_pause_on_expired: autoPauseOnExpired.value - }) + } + + try { + await adminAPI.accounts.create(payload) + } catch (error: any) { + // Handle 409 mixed_channel_warning - show confirmation dialog + // Note: upstream Antigravity create path uses createAccountAndFinish directly, so mixed warning + // must be handled here as well (otherwise user only sees a generic "failed" toast). + if (error.response?.status === 409 && error.response?.data?.error === 'mixed_channel_warning') { + const details = error.response.data.details || {} + mixedChannelWarningDetails.value = { + groupName: details.group_name || 'Unknown', + currentPlatform: details.current_platform || 'Unknown', + otherPlatform: details.other_platform || 'Unknown' + } + pendingCreatePayload.value = payload + showMixedChannelWarning.value = true + return + } + throw error + } + appStore.showSuccess(t('admin.accounts.accountCreated')) emit('created') handleClose() From 5715587baf4de0e081bcc62d6ea9490cd8f846b8 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Thu, 12 Feb 2026 23:49:49 +0800 Subject: [PATCH 066/175] chore: bump version to 0.1.81.6 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index b6f1aa76..03154e4c 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.81.5 +0.1.81.6 From 6218eefd61728d5a338778d7f775ceca15c21937 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Fri, 13 Feb 2026 00:22:14 +0800 Subject: [PATCH 067/175] refactor(ui): extract mixed channel warning handler --- .../components/account/CreateAccountModal.vue | 87 ++++---------- .../components/account/EditAccountModal.vue | 62 ++++------ .../src/composables/useMixedChannelWarning.ts | 107 ++++++++++++++++++ 3 files changed, 152 insertions(+), 104 deletions(-) create mode 100644 frontend/src/composables/useMixedChannelWarning.ts diff --git a/frontend/src/components/account/CreateAccountModal.vue b/frontend/src/components/account/CreateAccountModal.vue index 6ed606f4..b576a24d 100644 --- a/frontend/src/components/account/CreateAccountModal.vue +++ b/frontend/src/components/account/CreateAccountModal.vue @@ -1964,6 +1964,7 @@ import { import { useOpenAIOAuth } from '@/composables/useOpenAIOAuth' import { useGeminiOAuth } from '@/composables/useGeminiOAuth' import { useAntigravityOAuth } from '@/composables/useAntigravityOAuth' +import { useMixedChannelWarning } from '@/composables/useMixedChannelWarning' import type { Proxy, AdminGroup, AccountPlatform, AccountType } from '@/types' import BaseDialog from '@/components/common/BaseDialog.vue' import ConfirmDialog from '@/components/common/ConfirmDialog.vue' @@ -2102,10 +2103,9 @@ const tempUnschedRules = ref([]) const geminiOAuthType = ref<'code_assist' | 'google_one' | 'ai_studio'>('google_one') const geminiAIStudioOAuthEnabled = ref(false) -// Mixed channel warning dialog state -const showMixedChannelWarning = ref(false) -const mixedChannelWarningDetails = ref<{ groupName: string; currentPlatform: string; otherPlatform: string } | null>(null) -const pendingCreatePayload = ref(null) +const mixedChannelWarning = useMixedChannelWarning() +const showMixedChannelWarning = mixedChannelWarning.show +const mixedChannelWarningDetails = mixedChannelWarning.details const showAdvancedOAuth = ref(false) const showGeminiHelpDialog = ref(false) @@ -2583,6 +2583,7 @@ const resetForm = () => { geminiOAuth.resetState() antigravityOAuth.resetState() oauthFlowRef.value?.reset() + mixedChannelWarning.cancel() } const handleClose = () => { @@ -2593,24 +2594,16 @@ const handleClose = () => { const doCreateAccount = async (payload: any) => { submitting.value = true try { - await adminAPI.accounts.create(payload) - appStore.showSuccess(t('admin.accounts.accountCreated')) - emit('created') - handleClose() - } catch (error: any) { - // Handle 409 mixed_channel_warning - show confirmation dialog - if (error.response?.status === 409 && error.response?.data?.error === 'mixed_channel_warning') { - const details = error.response.data.details || {} - mixedChannelWarningDetails.value = { - groupName: details.group_name || 'Unknown', - currentPlatform: details.current_platform || 'Unknown', - otherPlatform: details.other_platform || 'Unknown' + await mixedChannelWarning.tryRequest(payload, (p) => adminAPI.accounts.create(p), { + onSuccess: () => { + appStore.showSuccess(t('admin.accounts.accountCreated')) + emit('created') + handleClose() + }, + onError: (error: any) => { + appStore.showError(error.response?.data?.detail || t('admin.accounts.failedToCreate')) } - pendingCreatePayload.value = payload - showMixedChannelWarning.value = true - } else { - appStore.showError(error.response?.data?.detail || t('admin.accounts.failedToCreate')) - } + }) } finally { submitting.value = false } @@ -2618,28 +2611,16 @@ const doCreateAccount = async (payload: any) => { // Handle mixed channel warning confirmation const handleMixedChannelConfirm = async () => { - showMixedChannelWarning.value = false - if (pendingCreatePayload.value) { - pendingCreatePayload.value.confirm_mixed_channel_risk = true - submitting.value = true - try { - await adminAPI.accounts.create(pendingCreatePayload.value) - appStore.showSuccess(t('admin.accounts.accountCreated')) - emit('created') - handleClose() - } catch (error: any) { - appStore.showError(error.response?.data?.detail || t('admin.accounts.failedToCreate')) - } finally { - submitting.value = false - pendingCreatePayload.value = null - } + submitting.value = true + try { + await mixedChannelWarning.confirm() + } finally { + submitting.value = false } } const handleMixedChannelCancel = () => { - showMixedChannelWarning.value = false - pendingCreatePayload.value = null - mixedChannelWarningDetails.value = null + mixedChannelWarning.cancel() } const handleSubmit = async () => { @@ -2795,7 +2776,7 @@ const createAccountAndFinish = async ( if (!applyTempUnschedConfig(credentials)) { return } - const payload: any = { + await doCreateAccount({ name: form.name, notes: form.notes, platform, @@ -2809,31 +2790,7 @@ const createAccountAndFinish = async ( group_ids: form.group_ids, expires_at: form.expires_at, auto_pause_on_expired: autoPauseOnExpired.value - } - - try { - await adminAPI.accounts.create(payload) - } catch (error: any) { - // Handle 409 mixed_channel_warning - show confirmation dialog - // Note: upstream Antigravity create path uses createAccountAndFinish directly, so mixed warning - // must be handled here as well (otherwise user only sees a generic "failed" toast). - if (error.response?.status === 409 && error.response?.data?.error === 'mixed_channel_warning') { - const details = error.response.data.details || {} - mixedChannelWarningDetails.value = { - groupName: details.group_name || 'Unknown', - currentPlatform: details.current_platform || 'Unknown', - otherPlatform: details.other_platform || 'Unknown' - } - pendingCreatePayload.value = payload - showMixedChannelWarning.value = true - return - } - throw error - } - - appStore.showSuccess(t('admin.accounts.accountCreated')) - emit('created') - handleClose() + }) } // OpenAI OAuth 授权码兑换 diff --git a/frontend/src/components/account/EditAccountModal.vue b/frontend/src/components/account/EditAccountModal.vue index c5566c5d..8f761039 100644 --- a/frontend/src/components/account/EditAccountModal.vue +++ b/frontend/src/components/account/EditAccountModal.vue @@ -994,6 +994,7 @@ import ProxySelector from '@/components/common/ProxySelector.vue' import GroupSelector from '@/components/common/GroupSelector.vue' import ModelWhitelistSelector from '@/components/account/ModelWhitelistSelector.vue' import { formatDateTimeLocalInput, parseDateTimeLocalInput } from '@/utils/format' +import { useMixedChannelWarning } from '@/composables/useMixedChannelWarning' import { getPresetMappingsByPlatform, commonErrorCodes, @@ -1060,10 +1061,9 @@ const antigravityModelMappings = ref([]) const tempUnschedEnabled = ref(false) const tempUnschedRules = ref([]) -// Mixed channel warning dialog state -const showMixedChannelWarning = ref(false) -const mixedChannelWarningDetails = ref<{ groupName: string; currentPlatform: string; otherPlatform: string } | null>(null) -const pendingUpdatePayload = ref | null>(null) +const mixedChannelWarning = useMixedChannelWarning() +const showMixedChannelWarning = mixedChannelWarning.show +const mixedChannelWarningDetails = mixedChannelWarning.details // Quota control state (Anthropic OAuth/SetupToken only) const windowCostEnabled = ref(false) @@ -1525,11 +1525,13 @@ const parseDateTimeLocal = parseDateTimeLocalInput // Methods const handleClose = () => { + mixedChannelWarning.cancel() emit('close') } const handleSubmit = async () => { if (!props.account) return + const accountID = props.account.id submitting.value = true const updatePayload: Record = { ...form } @@ -1698,24 +1700,18 @@ const handleSubmit = async () => { updatePayload.extra = newExtra } - await adminAPI.accounts.update(props.account.id, updatePayload) - appStore.showSuccess(t('admin.accounts.accountUpdated')) - emit('updated') - handleClose() - } catch (error: any) { - // Handle 409 mixed_channel_warning - show confirmation dialog - if (error.response?.status === 409 && error.response?.data?.error === 'mixed_channel_warning') { - const details = error.response.data.details || {} - mixedChannelWarningDetails.value = { - groupName: details.group_name || 'Unknown', - currentPlatform: details.current_platform || 'Unknown', - otherPlatform: details.other_platform || 'Unknown' + await mixedChannelWarning.tryRequest(updatePayload, (p) => adminAPI.accounts.update(accountID, p), { + onSuccess: () => { + appStore.showSuccess(t('admin.accounts.accountUpdated')) + emit('updated') + handleClose() + }, + onError: (error: any) => { + appStore.showError(error.response?.data?.message || error.response?.data?.detail || t('admin.accounts.failedToUpdate')) } - pendingUpdatePayload.value = updatePayload - showMixedChannelWarning.value = true - } else { - appStore.showError(error.response?.data?.message || error.response?.data?.detail || t('admin.accounts.failedToUpdate')) - } + }) + } catch (error: any) { + appStore.showError(error.response?.data?.message || error.response?.data?.detail || t('admin.accounts.failedToUpdate')) } finally { submitting.value = false } @@ -1723,27 +1719,15 @@ const handleSubmit = async () => { // Handle mixed channel warning confirmation const handleMixedChannelConfirm = async () => { - showMixedChannelWarning.value = false - if (pendingUpdatePayload.value && props.account) { - pendingUpdatePayload.value.confirm_mixed_channel_risk = true - submitting.value = true - try { - await adminAPI.accounts.update(props.account.id, pendingUpdatePayload.value) - appStore.showSuccess(t('admin.accounts.accountUpdated')) - emit('updated') - handleClose() - } catch (error: any) { - appStore.showError(error.response?.data?.message || error.response?.data?.detail || t('admin.accounts.failedToUpdate')) - } finally { - submitting.value = false - pendingUpdatePayload.value = null - } + submitting.value = true + try { + await mixedChannelWarning.confirm() + } finally { + submitting.value = false } } const handleMixedChannelCancel = () => { - showMixedChannelWarning.value = false - pendingUpdatePayload.value = null - mixedChannelWarningDetails.value = null + mixedChannelWarning.cancel() } diff --git a/frontend/src/composables/useMixedChannelWarning.ts b/frontend/src/composables/useMixedChannelWarning.ts new file mode 100644 index 00000000..469b369b --- /dev/null +++ b/frontend/src/composables/useMixedChannelWarning.ts @@ -0,0 +1,107 @@ +import { ref } from 'vue' + +export interface MixedChannelWarningDetails { + groupName: string + currentPlatform: string + otherPlatform: string +} + +function isMixedChannelWarningError(error: any): boolean { + return error?.response?.status === 409 && error?.response?.data?.error === 'mixed_channel_warning' +} + +function extractMixedChannelWarningDetails(error: any): MixedChannelWarningDetails { + const details = error?.response?.data?.details || {} + return { + groupName: details.group_name || 'Unknown', + currentPlatform: details.current_platform || 'Unknown', + otherPlatform: details.other_platform || 'Unknown' + } +} + +export function useMixedChannelWarning() { + const show = ref(false) + const details = ref(null) + + const pendingPayload = ref(null) + const pendingRequest = ref<((payload: any) => Promise) | null>(null) + const pendingOnSuccess = ref<(() => void) | null>(null) + const pendingOnError = ref<((error: any) => void) | null>(null) + + const clearPending = () => { + pendingPayload.value = null + pendingRequest.value = null + pendingOnSuccess.value = null + pendingOnError.value = null + details.value = null + } + + const tryRequest = async ( + payload: any, + request: (payload: any) => Promise, + opts?: { + onSuccess?: () => void + onError?: (error: any) => void + } + ): Promise => { + try { + await request(payload) + opts?.onSuccess?.() + return true + } catch (error: any) { + if (isMixedChannelWarningError(error)) { + details.value = extractMixedChannelWarningDetails(error) + pendingPayload.value = payload + pendingRequest.value = request + pendingOnSuccess.value = opts?.onSuccess || null + pendingOnError.value = opts?.onError || null + show.value = true + return false + } + + if (opts?.onError) { + opts.onError(error) + return false + } + throw error + } + } + + const confirm = async (): Promise => { + show.value = false + if (!pendingPayload.value || !pendingRequest.value) { + clearPending() + return false + } + + pendingPayload.value.confirm_mixed_channel_risk = true + + try { + await pendingRequest.value(pendingPayload.value) + pendingOnSuccess.value?.() + return true + } catch (error: any) { + if (pendingOnError.value) { + pendingOnError.value(error) + return false + } + throw error + } finally { + clearPending() + } + } + + const cancel = () => { + show.value = false + clearPending() + } + + return { + show, + details, + tryRequest, + confirm, + cancel + } +} + From 739c80227a74d4eb9f5cf6e25d3e162bb023cf9a Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Fri, 13 Feb 2026 00:23:15 +0800 Subject: [PATCH 068/175] fix(ui): reset mixed channel warning on close --- frontend/src/components/account/CreateAccountModal.vue | 1 + 1 file changed, 1 insertion(+) diff --git a/frontend/src/components/account/CreateAccountModal.vue b/frontend/src/components/account/CreateAccountModal.vue index b576a24d..345266de 100644 --- a/frontend/src/components/account/CreateAccountModal.vue +++ b/frontend/src/components/account/CreateAccountModal.vue @@ -2587,6 +2587,7 @@ const resetForm = () => { } const handleClose = () => { + mixedChannelWarning.cancel() emit('close') } From 496545188a574a2cc2f49317e3c27042daa4e414 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Fri, 13 Feb 2026 00:26:37 +0800 Subject: [PATCH 069/175] chore: bump version to 0.1.81.7 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index 03154e4c..a91c933f 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.81.6 +0.1.81.7 From 8cb7356bbc05025ad5a24c2b75a479747814dfdd Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Fri, 13 Feb 2026 03:05:45 +0800 Subject: [PATCH 070/175] feat: add mixed-channel precheck flow for antigravity accounts --- .../internal/handler/admin/account_handler.go | 69 +++++-- .../account_handler_mixed_channel_test.go | 147 ++++++++++++++ .../handler/admin/admin_service_stub_test.go | 47 +++-- backend/internal/server/routes/admin.go | 1 + backend/internal/service/admin_service.go | 6 + frontend/src/api/admin/accounts.ts | 15 +- .../components/account/CreateAccountModal.vue | 186 ++++++++++++++---- .../components/account/EditAccountModal.vue | 166 +++++++++++++--- .../src/composables/useMixedChannelWarning.ts | 107 ---------- frontend/src/types/index.ts | 20 ++ 10 files changed, 567 insertions(+), 197 deletions(-) create mode 100644 backend/internal/handler/admin/account_handler_mixed_channel_test.go delete mode 100644 frontend/src/composables/useMixedChannelWarning.ts diff --git a/backend/internal/handler/admin/account_handler.go b/backend/internal/handler/admin/account_handler.go index 85400c6f..efc0ec2f 100644 --- a/backend/internal/handler/admin/account_handler.go +++ b/backend/internal/handler/admin/account_handler.go @@ -133,6 +133,13 @@ type BulkUpdateAccountsRequest struct { ConfirmMixedChannelRisk *bool `json:"confirm_mixed_channel_risk"` // 用户确认混合渠道风险 } +// CheckMixedChannelRequest represents check mixed channel risk request +type CheckMixedChannelRequest struct { + Platform string `json:"platform" binding:"required"` + GroupIDs []int64 `json:"group_ids"` + AccountID *int64 `json:"account_id"` +} + // AccountWithConcurrency extends Account with real-time concurrency info type AccountWithConcurrency struct { *dto.Account @@ -278,6 +285,50 @@ func (h *AccountHandler) GetByID(c *gin.Context) { response.Success(c, dto.AccountFromService(account)) } +// CheckMixedChannel handles checking mixed channel risk for account-group binding. +// POST /api/v1/admin/accounts/check-mixed-channel +func (h *AccountHandler) CheckMixedChannel(c *gin.Context) { + var req CheckMixedChannelRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + if len(req.GroupIDs) == 0 { + response.Success(c, gin.H{"has_risk": false}) + return + } + + accountID := int64(0) + if req.AccountID != nil { + accountID = *req.AccountID + } + + err := h.adminService.CheckMixedChannelRisk(c.Request.Context(), accountID, req.Platform, req.GroupIDs) + if err != nil { + var mixedErr *service.MixedChannelError + if errors.As(err, &mixedErr) { + response.Success(c, gin.H{ + "has_risk": true, + "error": "mixed_channel_warning", + "message": mixedErr.Error(), + "details": gin.H{ + "group_id": mixedErr.GroupID, + "group_name": mixedErr.GroupName, + "current_platform": mixedErr.CurrentPlatform, + "other_platform": mixedErr.OtherPlatform, + }, + }) + return + } + + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{"has_risk": false}) +} + // Create handles creating a new account // POST /api/v1/admin/accounts func (h *AccountHandler) Create(c *gin.Context) { @@ -314,17 +365,10 @@ func (h *AccountHandler) Create(c *gin.Context) { // 检查是否为混合渠道错误 var mixedErr *service.MixedChannelError if errors.As(err, &mixedErr) { - // 返回特殊错误码要求确认 + // 创建接口仅返回最小必要字段,详细信息由专门检查接口提供 c.JSON(409, gin.H{ "error": "mixed_channel_warning", "message": mixedErr.Error(), - "details": gin.H{ - "group_id": mixedErr.GroupID, - "group_name": mixedErr.GroupName, - "current_platform": mixedErr.CurrentPlatform, - "other_platform": mixedErr.OtherPlatform, - }, - "require_confirmation": true, }) return } @@ -378,17 +422,10 @@ func (h *AccountHandler) Update(c *gin.Context) { // 检查是否为混合渠道错误 var mixedErr *service.MixedChannelError if errors.As(err, &mixedErr) { - // 返回特殊错误码要求确认 + // 更新接口仅返回最小必要字段,详细信息由专门检查接口提供 c.JSON(409, gin.H{ "error": "mixed_channel_warning", "message": mixedErr.Error(), - "details": gin.H{ - "group_id": mixedErr.GroupID, - "group_name": mixedErr.GroupName, - "current_platform": mixedErr.CurrentPlatform, - "other_platform": mixedErr.OtherPlatform, - }, - "require_confirmation": true, }) return } diff --git a/backend/internal/handler/admin/account_handler_mixed_channel_test.go b/backend/internal/handler/admin/account_handler_mixed_channel_test.go new file mode 100644 index 00000000..ad004844 --- /dev/null +++ b/backend/internal/handler/admin/account_handler_mixed_channel_test.go @@ -0,0 +1,147 @@ +package admin + +import ( + "bytes" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/require" +) + +func setupAccountMixedChannelRouter(adminSvc *stubAdminService) *gin.Engine { + gin.SetMode(gin.TestMode) + router := gin.New() + accountHandler := NewAccountHandler(adminSvc, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) + router.POST("/api/v1/admin/accounts/check-mixed-channel", accountHandler.CheckMixedChannel) + router.POST("/api/v1/admin/accounts", accountHandler.Create) + router.PUT("/api/v1/admin/accounts/:id", accountHandler.Update) + return router +} + +func TestAccountHandlerCheckMixedChannelNoRisk(t *testing.T) { + adminSvc := newStubAdminService() + router := setupAccountMixedChannelRouter(adminSvc) + + body, _ := json.Marshal(map[string]any{ + "platform": "antigravity", + "group_ids": []int64{27}, + }) + rec := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/accounts/check-mixed-channel", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusOK, rec.Code) + var resp map[string]any + require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp)) + require.Equal(t, float64(0), resp["code"]) + data, ok := resp["data"].(map[string]any) + require.True(t, ok) + require.Equal(t, false, data["has_risk"]) + require.Equal(t, int64(0), adminSvc.lastMixedCheck.accountID) + require.Equal(t, "antigravity", adminSvc.lastMixedCheck.platform) + require.Equal(t, []int64{27}, adminSvc.lastMixedCheck.groupIDs) +} + +func TestAccountHandlerCheckMixedChannelWithRisk(t *testing.T) { + adminSvc := newStubAdminService() + adminSvc.checkMixedErr = &service.MixedChannelError{ + GroupID: 27, + GroupName: "claude-max", + CurrentPlatform: "Antigravity", + OtherPlatform: "Anthropic", + } + router := setupAccountMixedChannelRouter(adminSvc) + + body, _ := json.Marshal(map[string]any{ + "platform": "antigravity", + "group_ids": []int64{27}, + "account_id": 99, + }) + rec := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/accounts/check-mixed-channel", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusOK, rec.Code) + var resp map[string]any + require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp)) + require.Equal(t, float64(0), resp["code"]) + data, ok := resp["data"].(map[string]any) + require.True(t, ok) + require.Equal(t, true, data["has_risk"]) + require.Equal(t, "mixed_channel_warning", data["error"]) + details, ok := data["details"].(map[string]any) + require.True(t, ok) + require.Equal(t, float64(27), details["group_id"]) + require.Equal(t, "claude-max", details["group_name"]) + require.Equal(t, "Antigravity", details["current_platform"]) + require.Equal(t, "Anthropic", details["other_platform"]) + require.Equal(t, int64(99), adminSvc.lastMixedCheck.accountID) +} + +func TestAccountHandlerCreateMixedChannelConflictSimplifiedResponse(t *testing.T) { + adminSvc := newStubAdminService() + adminSvc.createAccountErr = &service.MixedChannelError{ + GroupID: 27, + GroupName: "claude-max", + CurrentPlatform: "Antigravity", + OtherPlatform: "Anthropic", + } + router := setupAccountMixedChannelRouter(adminSvc) + + body, _ := json.Marshal(map[string]any{ + "name": "ag-oauth-1", + "platform": "antigravity", + "type": "oauth", + "credentials": map[string]any{"refresh_token": "rt"}, + "group_ids": []int64{27}, + }) + rec := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/accounts", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusConflict, rec.Code) + var resp map[string]any + require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp)) + require.Equal(t, "mixed_channel_warning", resp["error"]) + require.Contains(t, resp["message"], "mixed_channel_warning") + _, hasDetails := resp["details"] + _, hasRequireConfirmation := resp["require_confirmation"] + require.False(t, hasDetails) + require.False(t, hasRequireConfirmation) +} + +func TestAccountHandlerUpdateMixedChannelConflictSimplifiedResponse(t *testing.T) { + adminSvc := newStubAdminService() + adminSvc.updateAccountErr = &service.MixedChannelError{ + GroupID: 27, + GroupName: "claude-max", + CurrentPlatform: "Antigravity", + OtherPlatform: "Anthropic", + } + router := setupAccountMixedChannelRouter(adminSvc) + + body, _ := json.Marshal(map[string]any{ + "group_ids": []int64{27}, + }) + rec := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPut, "/api/v1/admin/accounts/3", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusConflict, rec.Code) + var resp map[string]any + require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp)) + require.Equal(t, "mixed_channel_warning", resp["error"]) + require.Contains(t, resp["message"], "mixed_channel_warning") + _, hasDetails := resp["details"] + _, hasRequireConfirmation := resp["require_confirmation"] + require.False(t, hasDetails) + require.False(t, hasRequireConfirmation) +} diff --git a/backend/internal/handler/admin/admin_service_stub_test.go b/backend/internal/handler/admin/admin_service_stub_test.go index cbbfe942..9c4c253a 100644 --- a/backend/internal/handler/admin/admin_service_stub_test.go +++ b/backend/internal/handler/admin/admin_service_stub_test.go @@ -10,19 +10,27 @@ import ( ) type stubAdminService struct { - users []service.User - apiKeys []service.APIKey - groups []service.Group - accounts []service.Account - proxies []service.Proxy - proxyCounts []service.ProxyWithAccountCount - redeems []service.RedeemCode - createdAccounts []*service.CreateAccountInput - createdProxies []*service.CreateProxyInput - updatedProxyIDs []int64 - updatedProxies []*service.UpdateProxyInput - testedProxyIDs []int64 - mu sync.Mutex + users []service.User + apiKeys []service.APIKey + groups []service.Group + accounts []service.Account + proxies []service.Proxy + proxyCounts []service.ProxyWithAccountCount + redeems []service.RedeemCode + createdAccounts []*service.CreateAccountInput + createdProxies []*service.CreateProxyInput + updatedProxyIDs []int64 + updatedProxies []*service.UpdateProxyInput + testedProxyIDs []int64 + createAccountErr error + updateAccountErr error + checkMixedErr error + lastMixedCheck struct { + accountID int64 + platform string + groupIDs []int64 + } + mu sync.Mutex } func newStubAdminService() *stubAdminService { @@ -188,11 +196,17 @@ func (s *stubAdminService) CreateAccount(ctx context.Context, input *service.Cre s.mu.Lock() s.createdAccounts = append(s.createdAccounts, input) s.mu.Unlock() + if s.createAccountErr != nil { + return nil, s.createAccountErr + } account := service.Account{ID: 300, Name: input.Name, Status: service.StatusActive} return &account, nil } func (s *stubAdminService) UpdateAccount(ctx context.Context, id int64, input *service.UpdateAccountInput) (*service.Account, error) { + if s.updateAccountErr != nil { + return nil, s.updateAccountErr + } account := service.Account{ID: id, Name: input.Name, Status: service.StatusActive} return &account, nil } @@ -224,6 +238,13 @@ func (s *stubAdminService) BulkUpdateAccounts(ctx context.Context, input *servic return &service.BulkUpdateAccountsResult{Success: 1, Failed: 0, SuccessIDs: []int64{1}}, nil } +func (s *stubAdminService) CheckMixedChannelRisk(ctx context.Context, currentAccountID int64, currentAccountPlatform string, groupIDs []int64) error { + s.lastMixedCheck.accountID = currentAccountID + s.lastMixedCheck.platform = currentAccountPlatform + s.lastMixedCheck.groupIDs = append([]int64(nil), groupIDs...) + return s.checkMixedErr +} + func (s *stubAdminService) ListProxies(ctx context.Context, page, pageSize int, protocol, status, search string) ([]service.Proxy, int64, error) { search = strings.TrimSpace(strings.ToLower(search)) filtered := make([]service.Proxy, 0, len(s.proxies)) diff --git a/backend/internal/server/routes/admin.go b/backend/internal/server/routes/admin.go index 4509b4bc..693d997a 100644 --- a/backend/internal/server/routes/admin.go +++ b/backend/internal/server/routes/admin.go @@ -208,6 +208,7 @@ func registerAccountRoutes(admin *gin.RouterGroup, h *handler.Handlers) { accounts.GET("", h.Admin.Account.List) accounts.GET("/:id", h.Admin.Account.GetByID) accounts.POST("", h.Admin.Account.Create) + accounts.POST("/check-mixed-channel", h.Admin.Account.CheckMixedChannel) accounts.POST("/sync/crs", h.Admin.Account.SyncFromCRS) accounts.POST("/sync/crs/preview", h.Admin.Account.PreviewFromCRS) accounts.PUT("/:id", h.Admin.Account.Update) diff --git a/backend/internal/service/admin_service.go b/backend/internal/service/admin_service.go index 06354e1e..788e7f67 100644 --- a/backend/internal/service/admin_service.go +++ b/backend/internal/service/admin_service.go @@ -50,6 +50,7 @@ type AdminService interface { SetAccountError(ctx context.Context, id int64, errorMsg string) error SetAccountSchedulable(ctx context.Context, id int64, schedulable bool) (*Account, error) BulkUpdateAccounts(ctx context.Context, input *BulkUpdateAccountsInput) (*BulkUpdateAccountsResult, error) + CheckMixedChannelRisk(ctx context.Context, currentAccountID int64, currentAccountPlatform string, groupIDs []int64) error // Proxy management ListProxies(ctx context.Context, page, pageSize int, protocol, status, search string) ([]Proxy, int64, error) @@ -1706,6 +1707,11 @@ func (s *adminServiceImpl) checkMixedChannelRisk(ctx context.Context, currentAcc return nil } +// CheckMixedChannelRisk checks whether target groups contain mixed channels for the current account platform. +func (s *adminServiceImpl) CheckMixedChannelRisk(ctx context.Context, currentAccountID int64, currentAccountPlatform string, groupIDs []int64) error { + return s.checkMixedChannelRisk(ctx, currentAccountID, currentAccountPlatform, groupIDs) +} + func (s *adminServiceImpl) attachProxyLatency(ctx context.Context, proxies []ProxyWithAccountCount) { if s.proxyLatencyCache == nil || len(proxies) == 0 { return diff --git a/frontend/src/api/admin/accounts.ts b/frontend/src/api/admin/accounts.ts index 4cb1a6f2..f71ba4ac 100644 --- a/frontend/src/api/admin/accounts.ts +++ b/frontend/src/api/admin/accounts.ts @@ -15,7 +15,9 @@ import type { AccountUsageStatsResponse, TempUnschedulableStatus, AdminDataPayload, - AdminDataImportResult + AdminDataImportResult, + CheckMixedChannelRequest, + CheckMixedChannelResponse } from '@/types' /** @@ -80,6 +82,16 @@ export async function update(id: number, updates: UpdateAccountRequest): Promise return data } +/** + * Check mixed-channel risk for account-group binding. + */ +export async function checkMixedChannelRisk( + payload: CheckMixedChannelRequest +): Promise { + const { data } = await apiClient.post('/admin/accounts/check-mixed-channel', payload) + return data +} + /** * Delete account * @param id - Account ID @@ -458,6 +470,7 @@ export const accountsAPI = { getById, create, update, + checkMixedChannelRisk, delete: deleteAccount, toggleStatus, testAccount, diff --git a/frontend/src/components/account/CreateAccountModal.vue b/frontend/src/components/account/CreateAccountModal.vue index 345266de..3c14d75d 100644 --- a/frontend/src/components/account/CreateAccountModal.vue +++ b/frontend/src/components/account/CreateAccountModal.vue @@ -1932,7 +1932,7 @@ ([]) const geminiOAuthType = ref<'code_assist' | 'google_one' | 'ai_studio'>('google_one') const geminiAIStudioOAuthEnabled = ref(false) -const mixedChannelWarning = useMixedChannelWarning() -const showMixedChannelWarning = mixedChannelWarning.show -const mixedChannelWarningDetails = mixedChannelWarning.details +const showMixedChannelWarning = ref(false) +const mixedChannelWarningDetails = ref<{ groupName: string; currentPlatform: string; otherPlatform: string } | null>( + null +) +const mixedChannelWarningRawMessage = ref('') +const mixedChannelWarningAction = ref<(() => Promise) | null>(null) +const antigravityMixedChannelConfirmed = ref(false) const showAdvancedOAuth = ref(false) const showGeminiHelpDialog = ref(false) @@ -2137,6 +2147,13 @@ const geminiSelectedTier = computed(() => { } }) +const mixedChannelWarningMessageText = computed(() => { + if (mixedChannelWarningDetails.value) { + return t('admin.accounts.mixedChannelWarning', mixedChannelWarningDetails.value) + } + return mixedChannelWarningRawMessage.value +}) + const geminiQuotaDocs = { codeAssist: 'https://developers.google.com/gemini-code-assist/resources/quotas', aiStudio: 'https://ai.google.dev/pricing', @@ -2528,6 +2545,105 @@ const splitTempUnschedKeywords = (value: string) => { .filter((item) => item.length > 0) } +const isAntigravityAccount = (platform: AccountPlatform) => platform === 'antigravity' + +const buildMixedChannelDetails = (resp?: CheckMixedChannelResponse) => { + const details = resp?.details + if (!details) { + return null + } + return { + groupName: details.group_name || 'Unknown', + currentPlatform: details.current_platform || 'Unknown', + otherPlatform: details.other_platform || 'Unknown' + } +} + +const clearMixedChannelDialog = () => { + showMixedChannelWarning.value = false + mixedChannelWarningDetails.value = null + mixedChannelWarningRawMessage.value = '' + mixedChannelWarningAction.value = null +} + +const openMixedChannelDialog = (opts: { + response?: CheckMixedChannelResponse + message?: string + onConfirm: () => Promise +}) => { + mixedChannelWarningDetails.value = buildMixedChannelDetails(opts.response) + mixedChannelWarningRawMessage.value = + opts.message || opts.response?.message || t('admin.accounts.failedToCreate') + mixedChannelWarningAction.value = opts.onConfirm + showMixedChannelWarning.value = true +} + +const withAntigravityConfirmFlag = (payload: CreateAccountRequest): CreateAccountRequest => { + if (isAntigravityAccount(payload.platform) && antigravityMixedChannelConfirmed.value) { + return { + ...payload, + confirm_mixed_channel_risk: true + } + } + const cloned = { ...payload } + delete cloned.confirm_mixed_channel_risk + return cloned +} + +const ensureAntigravityMixedChannelConfirmed = async (onConfirm: () => Promise): Promise => { + if (!isAntigravityAccount(form.platform)) { + return true + } + if (antigravityMixedChannelConfirmed.value) { + return true + } + + try { + const result = await adminAPI.accounts.checkMixedChannelRisk({ + platform: form.platform, + group_ids: form.group_ids + }) + if (!result.has_risk) { + return true + } + openMixedChannelDialog({ + response: result, + onConfirm: async () => { + antigravityMixedChannelConfirmed.value = true + await onConfirm() + } + }) + return false + } catch (error: any) { + appStore.showError(error.response?.data?.message || error.response?.data?.detail || t('admin.accounts.failedToCreate')) + return false + } +} + +const submitCreateAccount = async (payload: CreateAccountRequest) => { + submitting.value = true + try { + await adminAPI.accounts.create(withAntigravityConfirmFlag(payload)) + appStore.showSuccess(t('admin.accounts.accountCreated')) + emit('created') + handleClose() + } catch (error: any) { + if (error.response?.status === 409 && error.response?.data?.error === 'mixed_channel_warning' && isAntigravityAccount(form.platform)) { + openMixedChannelDialog({ + message: error.response?.data?.message, + onConfirm: async () => { + antigravityMixedChannelConfirmed.value = true + await submitCreateAccount(payload) + } + }) + return + } + appStore.showError(error.response?.data?.message || error.response?.data?.detail || t('admin.accounts.failedToCreate')) + } finally { + submitting.value = false + } +} + // Methods const resetForm = () => { step.value = 1 @@ -2583,45 +2699,45 @@ const resetForm = () => { geminiOAuth.resetState() antigravityOAuth.resetState() oauthFlowRef.value?.reset() - mixedChannelWarning.cancel() + antigravityMixedChannelConfirmed.value = false + clearMixedChannelDialog() } const handleClose = () => { - mixedChannelWarning.cancel() + antigravityMixedChannelConfirmed.value = false + clearMixedChannelDialog() emit('close') } // Helper function to create account with mixed channel warning handling -const doCreateAccount = async (payload: any) => { - submitting.value = true - try { - await mixedChannelWarning.tryRequest(payload, (p) => adminAPI.accounts.create(p), { - onSuccess: () => { - appStore.showSuccess(t('admin.accounts.accountCreated')) - emit('created') - handleClose() - }, - onError: (error: any) => { - appStore.showError(error.response?.data?.detail || t('admin.accounts.failedToCreate')) - } - }) - } finally { - submitting.value = false +const doCreateAccount = async (payload: CreateAccountRequest) => { + const canContinue = await ensureAntigravityMixedChannelConfirmed(async () => { + await submitCreateAccount(payload) + }) + if (!canContinue) { + return } + await submitCreateAccount(payload) } // Handle mixed channel warning confirmation const handleMixedChannelConfirm = async () => { + const action = mixedChannelWarningAction.value + if (!action) { + clearMixedChannelDialog() + return + } + clearMixedChannelDialog() submitting.value = true try { - await mixedChannelWarning.confirm() + await action() } finally { submitting.value = false } } const handleMixedChannelCancel = () => { - mixedChannelWarning.cancel() + clearMixedChannelDialog() } const handleSubmit = async () => { @@ -2631,6 +2747,12 @@ const handleSubmit = async () => { appStore.showError(t('admin.accounts.pleaseEnterAccountName')) return } + const canContinue = await ensureAntigravityMixedChannelConfirmed(async () => { + step.value = 2 + }) + if (!canContinue) { + return + } step.value = 2 return } @@ -2666,15 +2788,8 @@ const handleSubmit = async () => { credentials.model_mapping = antigravityModelMapping } - submitting.value = true - try { - const extra = mixedScheduling.value ? { mixed_scheduling: true } : undefined - await createAccountAndFinish(form.platform, 'apikey', credentials, extra) - } catch (error: any) { - appStore.showError(error.response?.data?.detail || t('admin.accounts.failedToCreate')) - } finally { - submitting.value = false - } + const extra = mixedScheduling.value ? { mixed_scheduling: true } : undefined + await createAccountAndFinish(form.platform, 'apikey', credentials, extra) return } @@ -2951,7 +3066,7 @@ const handleAntigravityValidateRT = async (refreshTokenInput: string) => { const accountName = refreshTokens.length > 1 ? `${form.name} #${i + 1}` : form.name // Note: Antigravity doesn't have buildExtraInfo, so we pass empty extra or rely on credentials - await adminAPI.accounts.create({ + const createPayload = withAntigravityConfirmFlag({ name: accountName, notes: form.notes, platform: 'antigravity', @@ -2966,6 +3081,7 @@ const handleAntigravityValidateRT = async (refreshTokenInput: string) => { expires_at: form.expires_at, auto_pause_on_expired: autoPauseOnExpired.value }) + await adminAPI.accounts.create(createPayload) successCount++ } catch (error: any) { failedCount++ diff --git a/frontend/src/components/account/EditAccountModal.vue b/frontend/src/components/account/EditAccountModal.vue index 8f761039..5c7faaed 100644 --- a/frontend/src/components/account/EditAccountModal.vue +++ b/frontend/src/components/account/EditAccountModal.vue @@ -970,7 +970,7 @@ ([]) const tempUnschedEnabled = ref(false) const tempUnschedRules = ref([]) -const mixedChannelWarning = useMixedChannelWarning() -const showMixedChannelWarning = mixedChannelWarning.show -const mixedChannelWarningDetails = mixedChannelWarning.details +const showMixedChannelWarning = ref(false) +const mixedChannelWarningDetails = ref<{ groupName: string; currentPlatform: string; otherPlatform: string } | null>( + null +) +const mixedChannelWarningRawMessage = ref('') +const mixedChannelWarningAction = ref<(() => Promise) | null>(null) +const antigravityMixedChannelConfirmed = ref(false) // Quota control state (Anthropic OAuth/SetupToken only) const windowCostEnabled = ref(false) @@ -1114,6 +1117,13 @@ const defaultBaseUrl = computed(() => { return 'https://api.anthropic.com' }) +const mixedChannelWarningMessageText = computed(() => { + if (mixedChannelWarningDetails.value) { + return t('admin.accounts.mixedChannelWarning', mixedChannelWarningDetails.value) + } + return mixedChannelWarningRawMessage.value +}) + const form = reactive({ name: '', notes: '', @@ -1143,6 +1153,11 @@ watch( () => props.account, (newAccount) => { if (newAccount) { + antigravityMixedChannelConfirmed.value = false + showMixedChannelWarning.value = false + mixedChannelWarningDetails.value = null + mixedChannelWarningRawMessage.value = '' + mixedChannelWarningAction.value = null form.name = newAccount.name form.notes = newAccount.notes || '' form.proxy_id = newAccount.proxy_id @@ -1520,20 +1535,123 @@ function toPositiveNumber(value: unknown) { return Math.trunc(num) } +const isAntigravityAccount = () => props.account?.platform === 'antigravity' + +const buildMixedChannelDetails = (resp?: CheckMixedChannelResponse) => { + const details = resp?.details + if (!details) { + return null + } + return { + groupName: details.group_name || 'Unknown', + currentPlatform: details.current_platform || 'Unknown', + otherPlatform: details.other_platform || 'Unknown' + } +} + +const clearMixedChannelDialog = () => { + showMixedChannelWarning.value = false + mixedChannelWarningDetails.value = null + mixedChannelWarningRawMessage.value = '' + mixedChannelWarningAction.value = null +} + +const openMixedChannelDialog = (opts: { + response?: CheckMixedChannelResponse + message?: string + onConfirm: () => Promise +}) => { + mixedChannelWarningDetails.value = buildMixedChannelDetails(opts.response) + mixedChannelWarningRawMessage.value = + opts.message || opts.response?.message || t('admin.accounts.failedToUpdate') + mixedChannelWarningAction.value = opts.onConfirm + showMixedChannelWarning.value = true +} + +const withAntigravityConfirmFlag = (payload: Record) => { + if (isAntigravityAccount() && antigravityMixedChannelConfirmed.value) { + return { + ...payload, + confirm_mixed_channel_risk: true + } + } + const cloned = { ...payload } + delete cloned.confirm_mixed_channel_risk + return cloned +} + +const ensureAntigravityMixedChannelConfirmed = async (onConfirm: () => Promise): Promise => { + if (!isAntigravityAccount()) { + return true + } + if (antigravityMixedChannelConfirmed.value) { + return true + } + if (!props.account) { + return false + } + + try { + const result = await adminAPI.accounts.checkMixedChannelRisk({ + platform: props.account.platform, + group_ids: form.group_ids, + account_id: props.account.id + }) + if (!result.has_risk) { + return true + } + openMixedChannelDialog({ + response: result, + onConfirm: async () => { + antigravityMixedChannelConfirmed.value = true + await onConfirm() + } + }) + return false + } catch (error: any) { + appStore.showError(error.response?.data?.message || error.response?.data?.detail || t('admin.accounts.failedToUpdate')) + return false + } +} + const formatDateTimeLocal = formatDateTimeLocalInput const parseDateTimeLocal = parseDateTimeLocalInput // Methods const handleClose = () => { - mixedChannelWarning.cancel() + antigravityMixedChannelConfirmed.value = false + clearMixedChannelDialog() emit('close') } +const submitUpdateAccount = async (accountID: number, updatePayload: Record) => { + submitting.value = true + try { + await adminAPI.accounts.update(accountID, withAntigravityConfirmFlag(updatePayload)) + appStore.showSuccess(t('admin.accounts.accountUpdated')) + emit('updated') + handleClose() + } catch (error: any) { + if (error.response?.status === 409 && error.response?.data?.error === 'mixed_channel_warning' && isAntigravityAccount()) { + openMixedChannelDialog({ + message: error.response?.data?.message, + onConfirm: async () => { + antigravityMixedChannelConfirmed.value = true + await submitUpdateAccount(accountID, updatePayload) + } + }) + return + } + appStore.showError(error.response?.data?.message || error.response?.data?.detail || t('admin.accounts.failedToUpdate')) + } finally { + submitting.value = false + } +} + const handleSubmit = async () => { if (!props.account) return const accountID = props.account.id - submitting.value = true const updatePayload: Record = { ...form } try { // 后端期望 proxy_id: 0 表示清除代理,而不是 null @@ -1565,7 +1683,6 @@ const handleSubmit = async () => { newCredentials.api_key = currentCredentials.api_key } else { appStore.showError(t('admin.accounts.apiKeyIsRequired')) - submitting.value = false return } @@ -1585,7 +1702,6 @@ const handleSubmit = async () => { newCredentials.intercept_warmup_requests = true } if (!applyTempUnschedConfig(newCredentials)) { - submitting.value = false return } @@ -1601,7 +1717,6 @@ const handleSubmit = async () => { } if (!applyTempUnschedConfig(newCredentials)) { - submitting.value = false return } @@ -1617,7 +1732,6 @@ const handleSubmit = async () => { delete newCredentials.intercept_warmup_requests } if (!applyTempUnschedConfig(newCredentials)) { - submitting.value = false return } @@ -1700,34 +1814,36 @@ const handleSubmit = async () => { updatePayload.extra = newExtra } - await mixedChannelWarning.tryRequest(updatePayload, (p) => adminAPI.accounts.update(accountID, p), { - onSuccess: () => { - appStore.showSuccess(t('admin.accounts.accountUpdated')) - emit('updated') - handleClose() - }, - onError: (error: any) => { - appStore.showError(error.response?.data?.message || error.response?.data?.detail || t('admin.accounts.failedToUpdate')) - } + const canContinue = await ensureAntigravityMixedChannelConfirmed(async () => { + await submitUpdateAccount(accountID, updatePayload) }) + if (!canContinue) { + return + } + + await submitUpdateAccount(accountID, updatePayload) } catch (error: any) { appStore.showError(error.response?.data?.message || error.response?.data?.detail || t('admin.accounts.failedToUpdate')) - } finally { - submitting.value = false } } // Handle mixed channel warning confirmation const handleMixedChannelConfirm = async () => { + const action = mixedChannelWarningAction.value + if (!action) { + clearMixedChannelDialog() + return + } + clearMixedChannelDialog() submitting.value = true try { - await mixedChannelWarning.confirm() + await action() } finally { submitting.value = false } } const handleMixedChannelCancel = () => { - mixedChannelWarning.cancel() + clearMixedChannelDialog() } diff --git a/frontend/src/composables/useMixedChannelWarning.ts b/frontend/src/composables/useMixedChannelWarning.ts deleted file mode 100644 index 469b369b..00000000 --- a/frontend/src/composables/useMixedChannelWarning.ts +++ /dev/null @@ -1,107 +0,0 @@ -import { ref } from 'vue' - -export interface MixedChannelWarningDetails { - groupName: string - currentPlatform: string - otherPlatform: string -} - -function isMixedChannelWarningError(error: any): boolean { - return error?.response?.status === 409 && error?.response?.data?.error === 'mixed_channel_warning' -} - -function extractMixedChannelWarningDetails(error: any): MixedChannelWarningDetails { - const details = error?.response?.data?.details || {} - return { - groupName: details.group_name || 'Unknown', - currentPlatform: details.current_platform || 'Unknown', - otherPlatform: details.other_platform || 'Unknown' - } -} - -export function useMixedChannelWarning() { - const show = ref(false) - const details = ref(null) - - const pendingPayload = ref(null) - const pendingRequest = ref<((payload: any) => Promise) | null>(null) - const pendingOnSuccess = ref<(() => void) | null>(null) - const pendingOnError = ref<((error: any) => void) | null>(null) - - const clearPending = () => { - pendingPayload.value = null - pendingRequest.value = null - pendingOnSuccess.value = null - pendingOnError.value = null - details.value = null - } - - const tryRequest = async ( - payload: any, - request: (payload: any) => Promise, - opts?: { - onSuccess?: () => void - onError?: (error: any) => void - } - ): Promise => { - try { - await request(payload) - opts?.onSuccess?.() - return true - } catch (error: any) { - if (isMixedChannelWarningError(error)) { - details.value = extractMixedChannelWarningDetails(error) - pendingPayload.value = payload - pendingRequest.value = request - pendingOnSuccess.value = opts?.onSuccess || null - pendingOnError.value = opts?.onError || null - show.value = true - return false - } - - if (opts?.onError) { - opts.onError(error) - return false - } - throw error - } - } - - const confirm = async (): Promise => { - show.value = false - if (!pendingPayload.value || !pendingRequest.value) { - clearPending() - return false - } - - pendingPayload.value.confirm_mixed_channel_risk = true - - try { - await pendingRequest.value(pendingPayload.value) - pendingOnSuccess.value?.() - return true - } catch (error: any) { - if (pendingOnError.value) { - pendingOnError.value(error) - return false - } - throw error - } finally { - clearPending() - } - } - - const cancel = () => { - show.value = false - clearPending() - } - - return { - show, - details, - tryRequest, - confirm, - cancel - } -} - diff --git a/frontend/src/types/index.ts b/frontend/src/types/index.ts index a250820b..100b1617 100644 --- a/frontend/src/types/index.ts +++ b/frontend/src/types/index.ts @@ -716,6 +716,26 @@ export interface UpdateAccountRequest { confirm_mixed_channel_risk?: boolean } +export interface CheckMixedChannelRequest { + platform: AccountPlatform + group_ids: number[] + account_id?: number +} + +export interface MixedChannelWarningDetails { + group_id: number + group_name: string + current_platform: string + other_platform: string +} + +export interface CheckMixedChannelResponse { + has_risk: boolean + error?: string + message?: string + details?: MixedChannelWarningDetails +} + export interface CreateProxyRequest { name: string protocol: ProxyProtocol From ef959bc3c663192027932575088f440c29cbb4f2 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Fri, 13 Feb 2026 03:12:31 +0800 Subject: [PATCH 071/175] chore: bump version to 0.1.81.8 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index a91c933f..b69cc393 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.81.7 +0.1.81.8 From ebb85cf84354c764211a65a8b107a31fde142095 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Fri, 13 Feb 2026 04:15:12 +0800 Subject: [PATCH 072/175] chore: bump version to 0.1.81.9 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index b69cc393..8bfe3bae 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.81.8 +0.1.81.9 From be6e8ff77b92d3f276c1d1ccb6c0c836521c43b7 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Sat, 14 Feb 2026 21:29:36 +0800 Subject: [PATCH 073/175] fix(ui): extend mixed channel check to both Antigravity and Anthropic accounts Problem: - When creating Anthropic Console accounts to groups with Antigravity accounts, the mixed channel warning was not triggered - Only Antigravity accounts triggered the confirmation dialog Solution: - Renamed isAntigravityAccount to needsMixedChannelCheck - Extended check to include both 'antigravity' and 'anthropic' platforms - Updated all 8 call sites in CreateAccountModal and EditAccountModal Changes: - frontend/src/components/account/CreateAccountModal.vue: 4 updates - frontend/src/components/account/EditAccountModal.vue: 4 updates Testing: - Frontend compilation: passed - Backend unit tests: passed - OAuth flow: confirmation parameter correctly passed in Step 2 - Backend compatibility: verified with checkMixedChannelRisk logic Impact: - Both Antigravity and Anthropic accounts now show mixed channel warning - OAuth accounts correctly pass confirm_mixed_channel_risk parameter - No breaking changes to existing functionality --- backend/cmd/server/VERSION | 2 +- frontend/src/components/account/CreateAccountModal.vue | 8 ++++---- frontend/src/components/account/EditAccountModal.vue | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index 8bfe3bae..5d0156a7 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.81.9 +0.1.81.10 diff --git a/frontend/src/components/account/CreateAccountModal.vue b/frontend/src/components/account/CreateAccountModal.vue index 3c14d75d..88f45cca 100644 --- a/frontend/src/components/account/CreateAccountModal.vue +++ b/frontend/src/components/account/CreateAccountModal.vue @@ -2545,7 +2545,7 @@ const splitTempUnschedKeywords = (value: string) => { .filter((item) => item.length > 0) } -const isAntigravityAccount = (platform: AccountPlatform) => platform === 'antigravity' +const needsMixedChannelCheck = (platform: AccountPlatform) => platform === 'antigravity' || platform === 'anthropic' const buildMixedChannelDetails = (resp?: CheckMixedChannelResponse) => { const details = resp?.details @@ -2579,7 +2579,7 @@ const openMixedChannelDialog = (opts: { } const withAntigravityConfirmFlag = (payload: CreateAccountRequest): CreateAccountRequest => { - if (isAntigravityAccount(payload.platform) && antigravityMixedChannelConfirmed.value) { + if (needsMixedChannelCheck(payload.platform) && antigravityMixedChannelConfirmed.value) { return { ...payload, confirm_mixed_channel_risk: true @@ -2591,7 +2591,7 @@ const withAntigravityConfirmFlag = (payload: CreateAccountRequest): CreateAccoun } const ensureAntigravityMixedChannelConfirmed = async (onConfirm: () => Promise): Promise => { - if (!isAntigravityAccount(form.platform)) { + if (!needsMixedChannelCheck(form.platform)) { return true } if (antigravityMixedChannelConfirmed.value) { @@ -2628,7 +2628,7 @@ const submitCreateAccount = async (payload: CreateAccountRequest) => { emit('created') handleClose() } catch (error: any) { - if (error.response?.status === 409 && error.response?.data?.error === 'mixed_channel_warning' && isAntigravityAccount(form.platform)) { + if (error.response?.status === 409 && error.response?.data?.error === 'mixed_channel_warning' && needsMixedChannelCheck(form.platform)) { openMixedChannelDialog({ message: error.response?.data?.message, onConfirm: async () => { diff --git a/frontend/src/components/account/EditAccountModal.vue b/frontend/src/components/account/EditAccountModal.vue index 5c7faaed..9266a2de 100644 --- a/frontend/src/components/account/EditAccountModal.vue +++ b/frontend/src/components/account/EditAccountModal.vue @@ -1535,7 +1535,7 @@ function toPositiveNumber(value: unknown) { return Math.trunc(num) } -const isAntigravityAccount = () => props.account?.platform === 'antigravity' +const needsMixedChannelCheck = () => props.account?.platform === 'antigravity' || props.account?.platform === 'anthropic' const buildMixedChannelDetails = (resp?: CheckMixedChannelResponse) => { const details = resp?.details @@ -1569,7 +1569,7 @@ const openMixedChannelDialog = (opts: { } const withAntigravityConfirmFlag = (payload: Record) => { - if (isAntigravityAccount() && antigravityMixedChannelConfirmed.value) { + if (needsMixedChannelCheck() && antigravityMixedChannelConfirmed.value) { return { ...payload, confirm_mixed_channel_risk: true @@ -1581,7 +1581,7 @@ const withAntigravityConfirmFlag = (payload: Record) => { } const ensureAntigravityMixedChannelConfirmed = async (onConfirm: () => Promise): Promise => { - if (!isAntigravityAccount()) { + if (!needsMixedChannelCheck()) { return true } if (antigravityMixedChannelConfirmed.value) { @@ -1632,7 +1632,7 @@ const submitUpdateAccount = async (accountID: number, updatePayload: Record { From 0913cfc08280540b4a5ca4cf697b4034b2a33a72 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Sat, 14 Feb 2026 22:03:26 +0800 Subject: [PATCH 074/175] chore: bump version to 0.1.83.1 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index 5d0156a7..d778f67c 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.81.10 +0.1.83.1 From c0cfa6acdea94e16971ccccc60a4b0ef3d57f991 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Wed, 18 Feb 2026 14:14:29 +0800 Subject: [PATCH 075/175] chore: bump antigravity user-agent to 1.16.5, version to 0.1.83.2 --- backend/cmd/server/VERSION | 2 +- backend/internal/pkg/antigravity/oauth.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index d778f67c..f788a87d 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.83.1 +0.1.83.2 diff --git a/backend/internal/pkg/antigravity/oauth.go b/backend/internal/pkg/antigravity/oauth.go index d1712c98..313ffb11 100644 --- a/backend/internal/pkg/antigravity/oauth.go +++ b/backend/internal/pkg/antigravity/oauth.go @@ -33,7 +33,7 @@ const ( "https://www.googleapis.com/auth/experimentsandconfigs" // User-Agent(与 Antigravity-Manager 保持一致) - UserAgent = "antigravity/1.15.8 windows/amd64" + UserAgent = "antigravity/1.16.5 windows/amd64" // Session 过期时间 SessionTTL = 30 * time.Minute From 8b021c099d165519f5dbfcd7a9b9130bc1715be2 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Thu, 19 Feb 2026 22:47:32 +0800 Subject: [PATCH 076/175] chore: bump version to 0.1.84.3 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index 474bcd6e..af8c1aa2 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.84.1 +0.1.84.3 From facae2a6dbe7eacd7bdf0ae29af09599fe9cd88f Mon Sep 17 00:00:00 2001 From: erio Date: Fri, 20 Feb 2026 00:16:46 +0800 Subject: [PATCH 077/175] =?UTF-8?q?feat:=20=E6=96=B0=E5=A2=9E=20claude-son?= =?UTF-8?q?net-4-6=20=E6=A8=A1=E5=9E=8B=E6=94=AF=E6=8C=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 后端默认映射、身份注入、迁移文件 - 前端模型列表和快捷映射按钮 --- backend/internal/domain/constants.go | 1 + .../pkg/antigravity/request_transformer.go | 1 + .../service/antigravity_model_mapping_test.go | 6 +++ .../056_add_sonnet46_to_model_mapping.sql | 42 +++++++++++++++++++ frontend/src/composables/useModelWhitelist.ts | 2 + 5 files changed, 52 insertions(+) create mode 100644 backend/migrations/056_add_sonnet46_to_model_mapping.sql diff --git a/backend/internal/domain/constants.go b/backend/internal/domain/constants.go index 05b5adc1..5963994e 100644 --- a/backend/internal/domain/constants.go +++ b/backend/internal/domain/constants.go @@ -73,6 +73,7 @@ var DefaultAntigravityModelMapping = map[string]string{ "claude-opus-4-6-thinking": "claude-opus-4-6-thinking", // 官方模型 "claude-opus-4-6": "claude-opus-4-6-thinking", // 简称映射 "claude-opus-4-5-thinking": "claude-opus-4-6-thinking", // 迁移旧模型 + "claude-sonnet-4-6": "claude-sonnet-4-6", "claude-sonnet-4-5": "claude-sonnet-4-5", "claude-sonnet-4-5-thinking": "claude-sonnet-4-5-thinking", // Claude 详细版本 ID 映射 diff --git a/backend/internal/pkg/antigravity/request_transformer.go b/backend/internal/pkg/antigravity/request_transformer.go index 3ba04b95..55cdd786 100644 --- a/backend/internal/pkg/antigravity/request_transformer.go +++ b/backend/internal/pkg/antigravity/request_transformer.go @@ -206,6 +206,7 @@ type modelInfo struct { var modelInfoMap = map[string]modelInfo{ "claude-opus-4-5": {DisplayName: "Claude Opus 4.5", CanonicalID: "claude-opus-4-5-20250929"}, "claude-opus-4-6": {DisplayName: "Claude Opus 4.6", CanonicalID: "claude-opus-4-6"}, + "claude-sonnet-4-6": {DisplayName: "Claude Sonnet 4.6", CanonicalID: "claude-sonnet-4-6"}, "claude-sonnet-4-5": {DisplayName: "Claude Sonnet 4.5", CanonicalID: "claude-sonnet-4-5-20250929"}, "claude-haiku-4-5": {DisplayName: "Claude Haiku 4.5", CanonicalID: "claude-haiku-4-5-20251001"}, } diff --git a/backend/internal/service/antigravity_model_mapping_test.go b/backend/internal/service/antigravity_model_mapping_test.go index f3621555..71939d26 100644 --- a/backend/internal/service/antigravity_model_mapping_test.go +++ b/backend/internal/service/antigravity_model_mapping_test.go @@ -76,6 +76,12 @@ func TestAntigravityGatewayService_GetMappedModel(t *testing.T) { }, // 3. 默认映射中的透传(映射到自己) + { + name: "默认映射透传 - claude-sonnet-4-6", + requestedModel: "claude-sonnet-4-6", + accountMapping: nil, + expected: "claude-sonnet-4-6", + }, { name: "默认映射透传 - claude-sonnet-4-5", requestedModel: "claude-sonnet-4-5", diff --git a/backend/migrations/056_add_sonnet46_to_model_mapping.sql b/backend/migrations/056_add_sonnet46_to_model_mapping.sql new file mode 100644 index 00000000..aa7657d7 --- /dev/null +++ b/backend/migrations/056_add_sonnet46_to_model_mapping.sql @@ -0,0 +1,42 @@ +-- Add claude-sonnet-4-6 to model_mapping for all Antigravity accounts +-- +-- Background: +-- Antigravity now supports claude-sonnet-4-6 +-- +-- Strategy: +-- Directly overwrite the entire model_mapping with updated mappings +-- This ensures consistency with DefaultAntigravityModelMapping in constants.go + +UPDATE accounts +SET credentials = jsonb_set( + credentials, + '{model_mapping}', + '{ + "claude-opus-4-6-thinking": "claude-opus-4-6-thinking", + "claude-opus-4-6": "claude-opus-4-6-thinking", + "claude-opus-4-5-thinking": "claude-opus-4-6-thinking", + "claude-opus-4-5-20251101": "claude-opus-4-6-thinking", + "claude-sonnet-4-6": "claude-sonnet-4-6", + "claude-sonnet-4-5": "claude-sonnet-4-5", + "claude-sonnet-4-5-thinking": "claude-sonnet-4-5-thinking", + "claude-sonnet-4-5-20250929": "claude-sonnet-4-5", + "claude-haiku-4-5": "claude-sonnet-4-5", + "claude-haiku-4-5-20251001": "claude-sonnet-4-5", + "gemini-2.5-flash": "gemini-2.5-flash", + "gemini-2.5-flash-lite": "gemini-2.5-flash-lite", + "gemini-2.5-flash-thinking": "gemini-2.5-flash-thinking", + "gemini-2.5-pro": "gemini-2.5-pro", + "gemini-3-flash": "gemini-3-flash", + "gemini-3-pro-high": "gemini-3-pro-high", + "gemini-3-pro-low": "gemini-3-pro-low", + "gemini-3-pro-image": "gemini-3-pro-image", + "gemini-3-flash-preview": "gemini-3-flash", + "gemini-3-pro-preview": "gemini-3-pro-high", + "gemini-3-pro-image-preview": "gemini-3-pro-image", + "gpt-oss-120b-medium": "gpt-oss-120b-medium", + "tab_flash_lite_preview": "tab_flash_lite_preview" + }'::jsonb +) +WHERE platform = 'antigravity' + AND deleted_at IS NULL + AND credentials->'model_mapping' IS NOT NULL; diff --git a/frontend/src/composables/useModelWhitelist.ts b/frontend/src/composables/useModelWhitelist.ts index 98c668f0..b57e6af1 100644 --- a/frontend/src/composables/useModelWhitelist.ts +++ b/frontend/src/composables/useModelWhitelist.ts @@ -60,6 +60,7 @@ const antigravityModels = [ // Claude 4.5+ 系列 'claude-opus-4-6', 'claude-opus-4-5-thinking', + 'claude-sonnet-4-6', 'claude-sonnet-4-5', 'claude-sonnet-4-5-thinking', // Gemini 2.5 系列 @@ -271,6 +272,7 @@ const antigravityPresetMappings = [ { label: 'Gemini 3→Flash', from: 'gemini-3*', to: 'gemini-3-flash', color: 'bg-amber-100 text-amber-700 hover:bg-amber-200 dark:bg-amber-900/30 dark:text-amber-400' }, { label: 'Gemini 2.5→Flash', from: 'gemini-2.5*', to: 'gemini-2.5-flash', color: 'bg-orange-100 text-orange-700 hover:bg-orange-200 dark:bg-orange-900/30 dark:text-orange-400' }, // 精确映射 + { label: 'Sonnet 4.6', from: 'claude-sonnet-4-6', to: 'claude-sonnet-4-6', color: 'bg-cyan-100 text-cyan-700 hover:bg-cyan-200 dark:bg-cyan-900/30 dark:text-cyan-400' }, { label: 'Sonnet 4.5', from: 'claude-sonnet-4-5', to: 'claude-sonnet-4-5', color: 'bg-cyan-100 text-cyan-700 hover:bg-cyan-200 dark:bg-cyan-900/30 dark:text-cyan-400' }, { label: 'Opus 4.6-thinking', from: 'claude-opus-4-6-thinking', to: 'claude-opus-4-6-thinking', color: 'bg-pink-100 text-pink-700 hover:bg-pink-200 dark:bg-pink-900/30 dark:text-pink-400' } ] From f819cef6d5584c11dc545c67a99346c3c24ab11d Mon Sep 17 00:00:00 2001 From: erio Date: Fri, 20 Feb 2026 00:20:07 +0800 Subject: [PATCH 078/175] chore: bump version to 0.1.84.4 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index af8c1aa2..c67f4b57 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.84.3 +0.1.84.4 From 737d1ecf5b498d25f170edb235f405eb27623e4c Mon Sep 17 00:00:00 2001 From: erio Date: Fri, 20 Feb 2026 12:22:02 +0800 Subject: [PATCH 079/175] =?UTF-8?q?feat:=20=E6=96=B0=E5=A2=9E=20gemini-3.1?= =?UTF-8?q?-pro-high/low=20=E6=A8=A1=E5=9E=8B=E6=94=AF=E6=8C=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 后端默认映射新增 gemini-3.1-pro-high、gemini-3.1-pro-low、gemini-3.1-pro-preview - 前端模型列表新增 gemini-3.1 系列 - 数据库迁移全量覆写 model_mapping --- backend/internal/domain/constants.go | 5 +++ .../057_add_gemini31_pro_to_model_mapping.sql | 45 +++++++++++++++++++ frontend/src/composables/useModelWhitelist.ts | 3 ++ 3 files changed, 53 insertions(+) create mode 100644 backend/migrations/057_add_gemini31_pro_to_model_mapping.sql diff --git a/backend/internal/domain/constants.go b/backend/internal/domain/constants.go index 5963994e..439502f2 100644 --- a/backend/internal/domain/constants.go +++ b/backend/internal/domain/constants.go @@ -96,6 +96,11 @@ var DefaultAntigravityModelMapping = map[string]string{ "gemini-3-flash-preview": "gemini-3-flash", "gemini-3-pro-preview": "gemini-3-pro-high", "gemini-3-pro-image-preview": "gemini-3-pro-image", + // Gemini 3.1 白名单 + "gemini-3.1-pro-high": "gemini-3.1-pro-high", + "gemini-3.1-pro-low": "gemini-3.1-pro-low", + // Gemini 3.1 preview 映射 + "gemini-3.1-pro-preview": "gemini-3.1-pro-high", // 其他官方模型 "gpt-oss-120b-medium": "gpt-oss-120b-medium", "tab_flash_lite_preview": "tab_flash_lite_preview", diff --git a/backend/migrations/057_add_gemini31_pro_to_model_mapping.sql b/backend/migrations/057_add_gemini31_pro_to_model_mapping.sql new file mode 100644 index 00000000..6305e717 --- /dev/null +++ b/backend/migrations/057_add_gemini31_pro_to_model_mapping.sql @@ -0,0 +1,45 @@ +-- Add gemini-3.1-pro-high, gemini-3.1-pro-low, gemini-3.1-pro-preview to model_mapping +-- +-- Background: +-- Antigravity now supports gemini-3.1-pro-high and gemini-3.1-pro-low +-- +-- Strategy: +-- Directly overwrite the entire model_mapping with updated mappings +-- This ensures consistency with DefaultAntigravityModelMapping in constants.go + +UPDATE accounts +SET credentials = jsonb_set( + credentials, + '{model_mapping}', + '{ + "claude-opus-4-6-thinking": "claude-opus-4-6-thinking", + "claude-opus-4-6": "claude-opus-4-6-thinking", + "claude-opus-4-5-thinking": "claude-opus-4-6-thinking", + "claude-opus-4-5-20251101": "claude-opus-4-6-thinking", + "claude-sonnet-4-6": "claude-sonnet-4-6", + "claude-sonnet-4-5": "claude-sonnet-4-5", + "claude-sonnet-4-5-thinking": "claude-sonnet-4-5-thinking", + "claude-sonnet-4-5-20250929": "claude-sonnet-4-5", + "claude-haiku-4-5": "claude-sonnet-4-5", + "claude-haiku-4-5-20251001": "claude-sonnet-4-5", + "gemini-2.5-flash": "gemini-2.5-flash", + "gemini-2.5-flash-lite": "gemini-2.5-flash-lite", + "gemini-2.5-flash-thinking": "gemini-2.5-flash-thinking", + "gemini-2.5-pro": "gemini-2.5-pro", + "gemini-3-flash": "gemini-3-flash", + "gemini-3-pro-high": "gemini-3-pro-high", + "gemini-3-pro-low": "gemini-3-pro-low", + "gemini-3-pro-image": "gemini-3-pro-image", + "gemini-3-flash-preview": "gemini-3-flash", + "gemini-3-pro-preview": "gemini-3-pro-high", + "gemini-3-pro-image-preview": "gemini-3-pro-image", + "gemini-3.1-pro-high": "gemini-3.1-pro-high", + "gemini-3.1-pro-low": "gemini-3.1-pro-low", + "gemini-3.1-pro-preview": "gemini-3.1-pro-high", + "gpt-oss-120b-medium": "gpt-oss-120b-medium", + "tab_flash_lite_preview": "tab_flash_lite_preview" + }'::jsonb +) +WHERE platform = 'antigravity' + AND deleted_at IS NULL + AND credentials->'model_mapping' IS NOT NULL; diff --git a/frontend/src/composables/useModelWhitelist.ts b/frontend/src/composables/useModelWhitelist.ts index b57e6af1..fd9024df 100644 --- a/frontend/src/composables/useModelWhitelist.ts +++ b/frontend/src/composables/useModelWhitelist.ts @@ -73,6 +73,9 @@ const antigravityModels = [ 'gemini-3-pro-high', 'gemini-3-pro-low', 'gemini-3-pro-image', + // Gemini 3.1 系列 + 'gemini-3.1-pro-high', + 'gemini-3.1-pro-low', // 其他 'gpt-oss-120b-medium', 'tab_flash_lite_preview' From 38dca4f7871523d069de278cc13caf84486ff8fc Mon Sep 17 00:00:00 2001 From: erio Date: Fri, 20 Feb 2026 12:22:13 +0800 Subject: [PATCH 080/175] chore: bump version to 0.1.84.5 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index c67f4b57..82675c62 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.84.4 +0.1.84.5 From 5b06542193cc5d5a15ca706eda00dd6aaf491d70 Mon Sep 17 00:00:00 2001 From: erio Date: Sun, 22 Feb 2026 19:30:46 +0800 Subject: [PATCH 081/175] =?UTF-8?q?feat:=20=E6=9B=B4=E6=96=B0=20Antigravit?= =?UTF-8?q?y=20UserAgent=20=E7=89=88=E6=9C=AC=E5=88=B0=201.18.4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/pkg/antigravity/oauth.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/internal/pkg/antigravity/oauth.go b/backend/internal/pkg/antigravity/oauth.go index 313ffb11..b376ff95 100644 --- a/backend/internal/pkg/antigravity/oauth.go +++ b/backend/internal/pkg/antigravity/oauth.go @@ -33,7 +33,7 @@ const ( "https://www.googleapis.com/auth/experimentsandconfigs" // User-Agent(与 Antigravity-Manager 保持一致) - UserAgent = "antigravity/1.16.5 windows/amd64" + UserAgent = "antigravity/1.18.4 windows/amd64" // Session 过期时间 SessionTTL = 30 * time.Minute From 7eb3b23ddf98b3514ee80e079dc1fed187c4b0d7 Mon Sep 17 00:00:00 2001 From: erio Date: Sun, 22 Feb 2026 19:30:50 +0800 Subject: [PATCH 082/175] chore: bump version to 0.1.84.6 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index 82675c62..fec94d51 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.84.5 +0.1.84.6 From 0ad20c94899463d56ab09ab35bf313d9faf525f3 Mon Sep 17 00:00:00 2001 From: erio Date: Sun, 22 Feb 2026 23:28:11 +0800 Subject: [PATCH 083/175] =?UTF-8?q?fix:=20=E4=BF=AE=E5=A4=8D=20Antigravity?= =?UTF-8?q?=20=E8=B4=A6=E5=8F=B7=20intercept=5Fwarmup=5Frequests=20?= =?UTF-8?q?=E9=85=8D=E7=BD=AE=E6=97=A0=E6=B3=95=E4=BF=9D=E5=AD=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 提取 applyInterceptWarmup 纯函数统一所有调用点: - 修复 upstream 创建时遗漏写入 intercept_warmup_requests - 修复 apikey 编辑时缺少 else 清除逻辑 - 添加前后端单元测试 - 修复 vitest.config.ts mergeConfig 兼容性问题 --- .../service/account_intercept_warmup_test.go | 66 ++++++++++++++++++ .../components/account/CreateAccountModal.vue | 22 +++--- .../components/account/EditAccountModal.vue | 14 ++-- .../__tests__/credentialsBuilder.spec.ts | 46 +++++++++++++ .../components/account/credentialsBuilder.ts | 11 +++ frontend/vitest.config.ts | 68 ++++++++++--------- 6 files changed, 173 insertions(+), 54 deletions(-) create mode 100644 backend/internal/service/account_intercept_warmup_test.go create mode 100644 frontend/src/components/account/__tests__/credentialsBuilder.spec.ts create mode 100644 frontend/src/components/account/credentialsBuilder.ts diff --git a/backend/internal/service/account_intercept_warmup_test.go b/backend/internal/service/account_intercept_warmup_test.go new file mode 100644 index 00000000..f117fd8d --- /dev/null +++ b/backend/internal/service/account_intercept_warmup_test.go @@ -0,0 +1,66 @@ +//go:build unit + +package service + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAccount_IsInterceptWarmupEnabled(t *testing.T) { + tests := []struct { + name string + credentials map[string]any + expected bool + }{ + { + name: "nil credentials", + credentials: nil, + expected: false, + }, + { + name: "empty map", + credentials: map[string]any{}, + expected: false, + }, + { + name: "field not present", + credentials: map[string]any{"access_token": "tok"}, + expected: false, + }, + { + name: "field is true", + credentials: map[string]any{"intercept_warmup_requests": true}, + expected: true, + }, + { + name: "field is false", + credentials: map[string]any{"intercept_warmup_requests": false}, + expected: false, + }, + { + name: "field is string true", + credentials: map[string]any{"intercept_warmup_requests": "true"}, + expected: false, + }, + { + name: "field is int 1", + credentials: map[string]any{"intercept_warmup_requests": 1}, + expected: false, + }, + { + name: "field is nil", + credentials: map[string]any{"intercept_warmup_requests": nil}, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + a := &Account{Credentials: tt.credentials} + result := a.IsInterceptWarmupEnabled() + require.Equal(t, tt.expected, result) + }) + } +} diff --git a/frontend/src/components/account/CreateAccountModal.vue b/frontend/src/components/account/CreateAccountModal.vue index 7ebcb36d..76aa3fea 100644 --- a/frontend/src/components/account/CreateAccountModal.vue +++ b/frontend/src/components/account/CreateAccountModal.vue @@ -2018,6 +2018,7 @@ import Icon from '@/components/icons/Icon.vue' import ProxySelector from '@/components/common/ProxySelector.vue' import GroupSelector from '@/components/common/GroupSelector.vue' import ModelWhitelistSelector from '@/components/account/ModelWhitelistSelector.vue' +import { applyInterceptWarmup } from '@/components/account/credentialsBuilder' import { formatDateTimeLocalInput, parseDateTimeLocalInput } from '@/utils/format' import OAuthAuthorizationFlow from './OAuthAuthorizationFlow.vue' @@ -2832,6 +2833,8 @@ const handleSubmit = async () => { credentials.model_mapping = antigravityModelMapping } + applyInterceptWarmup(credentials, interceptWarmupRequests.value, 'create') + const extra = mixedScheduling.value ? { mixed_scheduling: true } : undefined await createAccountAndFinish(form.platform, 'apikey', credentials, extra) return @@ -2872,10 +2875,7 @@ const handleSubmit = async () => { credentials.custom_error_codes = [...selectedErrorCodes.value] } - // Add intercept warmup requests setting - if (interceptWarmupRequests.value) { - credentials.intercept_warmup_requests = true - } + applyInterceptWarmup(credentials, interceptWarmupRequests.value, 'create') if (!applyTempUnschedConfig(credentials)) { return } @@ -3220,6 +3220,7 @@ const handleAntigravityExchange = async (authCode: string) => { if (!tokenInfo) return const credentials = antigravityOAuth.buildCredentials(tokenInfo) + applyInterceptWarmup(credentials, interceptWarmupRequests.value, 'create') // Antigravity 只使用映射模式 const antigravityModelMapping = buildModelMappingObject( 'mapping', @@ -3291,10 +3292,8 @@ const handleAnthropicExchange = async (authCode: string) => { extra.cache_ttl_override_target = cacheTTLOverrideTarget.value } - const credentials = { - ...tokenInfo, - ...(interceptWarmupRequests.value ? { intercept_warmup_requests: true } : {}) - } + const credentials: Record = { ...tokenInfo } + applyInterceptWarmup(credentials, interceptWarmupRequests.value, 'create') await createAccountAndFinish(form.platform, addMethod.value as AccountType, credentials, extra) } catch (error: any) { oauth.error.value = error.response?.data?.detail || t('admin.accounts.oauth.authFailed') @@ -3392,11 +3391,8 @@ const handleCookieAuth = async (sessionKey: string) => { const accountName = keys.length > 1 ? `${form.name} #${i + 1}` : form.name - // Merge interceptWarmupRequests into credentials - const credentials: Record = { - ...tokenInfo, - ...(interceptWarmupRequests.value ? { intercept_warmup_requests: true } : {}) - } + const credentials: Record = { ...tokenInfo } + applyInterceptWarmup(credentials, interceptWarmupRequests.value, 'create') if (tempUnschedEnabled.value) { credentials.temp_unschedulable_enabled = true credentials.temp_unschedulable_rules = tempUnschedPayload diff --git a/frontend/src/components/account/EditAccountModal.vue b/frontend/src/components/account/EditAccountModal.vue index f3a1b1c9..4510a3c8 100644 --- a/frontend/src/components/account/EditAccountModal.vue +++ b/frontend/src/components/account/EditAccountModal.vue @@ -1033,6 +1033,7 @@ import Icon from '@/components/icons/Icon.vue' import ProxySelector from '@/components/common/ProxySelector.vue' import GroupSelector from '@/components/common/GroupSelector.vue' import ModelWhitelistSelector from '@/components/account/ModelWhitelistSelector.vue' +import { applyInterceptWarmup } from '@/components/account/credentialsBuilder' import { formatDateTimeLocalInput, parseDateTimeLocalInput } from '@/utils/format' import { getPresetMappingsByPlatform, @@ -1748,9 +1749,7 @@ const handleSubmit = async () => { } // Add intercept warmup requests setting - if (interceptWarmupRequests.value) { - newCredentials.intercept_warmup_requests = true - } + applyInterceptWarmup(newCredentials, interceptWarmupRequests.value, 'edit') if (!applyTempUnschedConfig(newCredentials)) { return } @@ -1766,6 +1765,9 @@ const handleSubmit = async () => { newCredentials.api_key = editApiKey.value.trim() } + // Add intercept warmup requests setting + applyInterceptWarmup(newCredentials, interceptWarmupRequests.value, 'edit') + if (!applyTempUnschedConfig(newCredentials)) { return } @@ -1776,11 +1778,7 @@ const handleSubmit = async () => { const currentCredentials = (props.account.credentials as Record) || {} const newCredentials: Record = { ...currentCredentials } - if (interceptWarmupRequests.value) { - newCredentials.intercept_warmup_requests = true - } else { - delete newCredentials.intercept_warmup_requests - } + applyInterceptWarmup(newCredentials, interceptWarmupRequests.value, 'edit') if (!applyTempUnschedConfig(newCredentials)) { return } diff --git a/frontend/src/components/account/__tests__/credentialsBuilder.spec.ts b/frontend/src/components/account/__tests__/credentialsBuilder.spec.ts new file mode 100644 index 00000000..be2a8d52 --- /dev/null +++ b/frontend/src/components/account/__tests__/credentialsBuilder.spec.ts @@ -0,0 +1,46 @@ +import { describe, it, expect } from 'vitest' +import { applyInterceptWarmup } from '../credentialsBuilder' + +describe('applyInterceptWarmup', () => { + it('create + enabled=true: should set intercept_warmup_requests to true', () => { + const creds: Record = { access_token: 'tok' } + applyInterceptWarmup(creds, true, 'create') + expect(creds.intercept_warmup_requests).toBe(true) + }) + + it('create + enabled=false: should not add the field', () => { + const creds: Record = { access_token: 'tok' } + applyInterceptWarmup(creds, false, 'create') + expect('intercept_warmup_requests' in creds).toBe(false) + }) + + it('edit + enabled=true: should set intercept_warmup_requests to true', () => { + const creds: Record = { api_key: 'sk' } + applyInterceptWarmup(creds, true, 'edit') + expect(creds.intercept_warmup_requests).toBe(true) + }) + + it('edit + enabled=false + field exists: should delete the field', () => { + const creds: Record = { api_key: 'sk', intercept_warmup_requests: true } + applyInterceptWarmup(creds, false, 'edit') + expect('intercept_warmup_requests' in creds).toBe(false) + }) + + it('edit + enabled=false + field absent: should not throw', () => { + const creds: Record = { api_key: 'sk' } + applyInterceptWarmup(creds, false, 'edit') + expect('intercept_warmup_requests' in creds).toBe(false) + }) + + it('should not affect other fields', () => { + const creds: Record = { + api_key: 'sk', + base_url: 'url', + intercept_warmup_requests: true + } + applyInterceptWarmup(creds, false, 'edit') + expect(creds.api_key).toBe('sk') + expect(creds.base_url).toBe('url') + expect('intercept_warmup_requests' in creds).toBe(false) + }) +}) diff --git a/frontend/src/components/account/credentialsBuilder.ts b/frontend/src/components/account/credentialsBuilder.ts new file mode 100644 index 00000000..b8008e8b --- /dev/null +++ b/frontend/src/components/account/credentialsBuilder.ts @@ -0,0 +1,11 @@ +export function applyInterceptWarmup( + credentials: Record, + enabled: boolean, + mode: 'create' | 'edit' +): void { + if (enabled) { + credentials.intercept_warmup_requests = true + } else if (mode === 'edit') { + delete credentials.intercept_warmup_requests + } +} diff --git a/frontend/vitest.config.ts b/frontend/vitest.config.ts index 0b20cb60..2ff23c77 100644 --- a/frontend/vitest.config.ts +++ b/frontend/vitest.config.ts @@ -1,35 +1,37 @@ -import { defineConfig, mergeConfig } from 'vitest/config' -import viteConfig from './vite.config' +import { defineConfig } from 'vitest/config' +import { resolve } from 'path' -export default mergeConfig( - viteConfig, - defineConfig({ - test: { - globals: true, - environment: 'jsdom', - include: ['src/**/*.{test,spec}.{js,ts,jsx,tsx}'], - exclude: ['node_modules', 'dist'], - coverage: { - provider: 'v8', - reporter: ['text', 'json', 'html'], - include: ['src/**/*.{js,ts,vue}'], - exclude: [ - 'node_modules', - 'src/**/*.d.ts', - 'src/**/*.spec.ts', - 'src/**/*.test.ts', - 'src/main.ts' - ], - thresholds: { - global: { - statements: 80, - branches: 80, - functions: 80, - lines: 80 - } - } - }, - setupFiles: ['./src/__tests__/setup.ts'] +export default defineConfig({ + resolve: { + alias: { + '@': resolve(__dirname, 'src'), + 'vue-i18n': 'vue-i18n/dist/vue-i18n.runtime.esm-bundler.js' } - }) -) + }, + test: { + globals: true, + environment: 'jsdom', + include: ['src/**/*.{test,spec}.{js,ts,jsx,tsx}'], + exclude: ['node_modules', 'dist'], + coverage: { + provider: 'v8', + reporter: ['text', 'json', 'html'], + include: ['src/**/*.{js,ts,vue}'], + exclude: [ + 'node_modules', + 'src/**/*.d.ts', + 'src/**/*.spec.ts', + 'src/**/*.test.ts', + 'src/main.ts' + ], + thresholds: { + global: { + statements: 80, + branches: 80, + functions: 80, + lines: 80 + } + } + } + } +}) From 39d7300a8e958f3fabaefdf2cb635113b447cc97 Mon Sep 17 00:00:00 2001 From: erio Date: Sun, 22 Feb 2026 23:28:19 +0800 Subject: [PATCH 084/175] chore: bump version to 0.1.84.7 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index fec94d51..410baf3e 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.84.6 +0.1.84.7 From 292fa7a6d2a093788c0122d15bffbcc27e7cf562 Mon Sep 17 00:00:00 2001 From: erio Date: Tue, 24 Feb 2026 14:45:55 +0800 Subject: [PATCH 085/175] =?UTF-8?q?fix:=20=E4=BF=AE=E5=A4=8D=20Antigravity?= =?UTF-8?q?=20Claude=204.6=20=E6=A8=A1=E5=9E=8B=E7=94=A8=E9=87=8F=E7=AA=97?= =?UTF-8?q?=E5=8F=A3=E4=B8=8D=E6=98=BE=E7=A4=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 模型名列表只包含 claude-sonnet-4-5 和 claude-opus-4-5-thinking, 升级到 4.6 后后端返回 claude-sonnet-4-6/claude-opus-4-6-thinking, 前端匹配不到导致 Claude 用量条不显示。 --- .../components/account/AccountUsageCell.vue | 19 +++++++++++-------- frontend/src/i18n/locales/en.ts | 2 +- frontend/src/i18n/locales/zh.ts | 2 +- 3 files changed, 13 insertions(+), 10 deletions(-) diff --git a/frontend/src/components/account/AccountUsageCell.vue b/frontend/src/components/account/AccountUsageCell.vue index c0212c5a..54c397da 100644 --- a/frontend/src/components/account/AccountUsageCell.vue +++ b/frontend/src/components/account/AccountUsageCell.vue @@ -172,12 +172,12 @@ color="purple" /> - +
@@ -534,9 +534,12 @@ const antigravity3FlashUsageFromAPI = computed(() => getAntigravityUsageFromAPI( // Gemini 3 Image from API const antigravity3ImageUsageFromAPI = computed(() => getAntigravityUsageFromAPI(['gemini-3-pro-image'])) -// Claude 4.5 from API -const antigravityClaude45UsageFromAPI = computed(() => - getAntigravityUsageFromAPI(['claude-sonnet-4-5', 'claude-opus-4-5-thinking']) +// Claude from API (all Claude model variants) +const antigravityClaudeUsageFromAPI = computed(() => + getAntigravityUsageFromAPI([ + 'claude-sonnet-4-5', 'claude-opus-4-5-thinking', + 'claude-sonnet-4-6', 'claude-opus-4-6-thinking', + ]) ) // Antigravity 账户类型(从 load_code_assist 响应中提取) diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index 90515478..888f2ce4 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -2006,7 +2006,7 @@ export default { gemini3Pro: 'G3P', gemini3Flash: 'G3F', gemini3Image: 'G3I', - claude45: 'C4.5' + claude: 'Claude' }, tier: { free: 'Free', diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index 78bdfaa2..a21699b9 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -1570,7 +1570,7 @@ export default { gemini3Pro: 'G3P', gemini3Flash: 'G3F', gemini3Image: 'G3I', - claude45: 'C4.5' + claude: 'Claude' }, tier: { free: 'Free', From 8661bf883773cda3decee696e1f3b892e3a2f48e Mon Sep 17 00:00:00 2001 From: erio Date: Tue, 24 Feb 2026 14:46:06 +0800 Subject: [PATCH 086/175] chore: bump version to 0.1.84.8 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index 410baf3e..6fa91917 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.84.7 +0.1.84.8 From e2b39694922a9e123d03ddf668116efdc2eb63eb Mon Sep 17 00:00:00 2001 From: erio Date: Wed, 25 Feb 2026 18:50:26 +0800 Subject: [PATCH 087/175] =?UTF-8?q?ui:=20=E5=87=8F=E5=B0=8F=E6=A8=A1?= =?UTF-8?q?=E5=9E=8B=E9=99=90=E6=B5=81=E5=BE=BD=E7=AB=A0=E5=8C=BA=E5=9F=9F?= =?UTF-8?q?=E9=97=B4=E8=B7=9D=EF=BC=8C=E4=B8=8E=E7=94=A8=E9=87=8F=E7=AA=97?= =?UTF-8?q?=E5=8F=A3=E5=88=97=E9=AB=98=E5=BA=A6=E5=AF=B9=E9=BD=90?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- frontend/src/components/account/AccountStatusIndicator.vue | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/frontend/src/components/account/AccountStatusIndicator.vue b/frontend/src/components/account/AccountStatusIndicator.vue index af32ea0c..7841ab68 100644 --- a/frontend/src/components/account/AccountStatusIndicator.vue +++ b/frontend/src/components/account/AccountStatusIndicator.vue @@ -77,10 +77,10 @@
-
+
{{ formatScopeName(item.model) }} From d552ad767336aa4978cfe57e73db96a4d589e22e Mon Sep 17 00:00:00 2001 From: erio Date: Wed, 25 Feb 2026 19:24:56 +0800 Subject: [PATCH 088/175] =?UTF-8?q?fix:=20=E4=BF=AE=E5=A4=8D=E5=90=88?= =?UTF-8?q?=E5=B9=B6=E5=90=8E=E7=9A=84=E9=87=8D=E5=A4=8D=E4=BB=A3=E7=A0=81?= =?UTF-8?q?=E5=92=8C=E9=97=AD=E5=8C=85=E7=AD=BE=E5=90=8D=E4=B8=8D=E5=8C=B9?= =?UTF-8?q?=E9=85=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 删除 account_handler.go 中重复的 CheckMixedChannel 函数 - 修复 gateway_handler.go 闭包调用传参不匹配(上游已改为闭包捕获) - 恢复 docker-compose.yml 中缺失的 postgres_data volume 定义 --- .../internal/handler/admin/account_handler.go | 44 ------------------- backend/internal/handler/gateway_handler.go | 4 +- deploy/docker-compose.yml | 2 + 3 files changed, 4 insertions(+), 46 deletions(-) diff --git a/backend/internal/handler/admin/account_handler.go b/backend/internal/handler/admin/account_handler.go index 5cb70be4..df82476c 100644 --- a/backend/internal/handler/admin/account_handler.go +++ b/backend/internal/handler/admin/account_handler.go @@ -440,50 +440,6 @@ func (h *AccountHandler) CheckMixedChannel(c *gin.Context) { response.Success(c, gin.H{"has_risk": false}) } -// CheckMixedChannel handles checking mixed channel risk for account-group binding. -// POST /api/v1/admin/accounts/check-mixed-channel -func (h *AccountHandler) CheckMixedChannel(c *gin.Context) { - var req CheckMixedChannelRequest - if err := c.ShouldBindJSON(&req); err != nil { - response.BadRequest(c, "Invalid request: "+err.Error()) - return - } - - if len(req.GroupIDs) == 0 { - response.Success(c, gin.H{"has_risk": false}) - return - } - - accountID := int64(0) - if req.AccountID != nil { - accountID = *req.AccountID - } - - err := h.adminService.CheckMixedChannelRisk(c.Request.Context(), accountID, req.Platform, req.GroupIDs) - if err != nil { - var mixedErr *service.MixedChannelError - if errors.As(err, &mixedErr) { - response.Success(c, gin.H{ - "has_risk": true, - "error": "mixed_channel_warning", - "message": mixedErr.Error(), - "details": gin.H{ - "group_id": mixedErr.GroupID, - "group_name": mixedErr.GroupName, - "current_platform": mixedErr.CurrentPlatform, - "other_platform": mixedErr.OtherPlatform, - }, - }) - return - } - - response.ErrorFrom(c, err) - return - } - - response.Success(c, gin.H{"has_risk": false}) -} - // Create handles creating a new account // POST /api/v1/admin/accounts func (h *AccountHandler) Create(c *gin.Context) { diff --git a/backend/internal/handler/gateway_handler.go b/backend/internal/handler/gateway_handler.go index a4c0ba90..fe40e9d2 100644 --- a/backend/internal/handler/gateway_handler.go +++ b/backend/internal/handler/gateway_handler.go @@ -423,7 +423,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { zap.Int64("account_id", account.ID), ).Error("gateway.record_usage_failed", zap.Error(err)) } - }(result, account, userAgent, clientIP, fs.ForceCacheBilling) + }) return } } @@ -649,7 +649,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { zap.Int64("account_id", account.ID), ).Error("gateway.record_usage_failed", zap.Error(err)) } - }(result, account, userAgent, clientIP, fs.ForceCacheBilling) + }) return } if !retryWithFallback { diff --git a/deploy/docker-compose.yml b/deploy/docker-compose.yml index b57e444f..1d92458f 100644 --- a/deploy/docker-compose.yml +++ b/deploy/docker-compose.yml @@ -225,6 +225,8 @@ services: volumes: sub2api_data: driver: local + postgres_data: + driver: local redis_data: driver: local From 5c07e11473d0f3020a107566604749ff732fc284 Mon Sep 17 00:00:00 2001 From: erio Date: Wed, 25 Feb 2026 20:08:44 +0800 Subject: [PATCH 089/175] refactor: remove unused UserAgent constant The UserAgent constant was never referenced; all HTTP requests use GetUserAgent() which reads from defaultUserAgentVersion (configurable via ANTIGRAVITY_USER_AGENT_VERSION env var). --- backend/internal/pkg/antigravity/oauth.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/backend/internal/pkg/antigravity/oauth.go b/backend/internal/pkg/antigravity/oauth.go index f6ece2da..47c75142 100644 --- a/backend/internal/pkg/antigravity/oauth.go +++ b/backend/internal/pkg/antigravity/oauth.go @@ -38,9 +38,6 @@ const ( "https://www.googleapis.com/auth/cclog " + "https://www.googleapis.com/auth/experimentsandconfigs" - // User-Agent(与 Antigravity-Manager 保持一致) - UserAgent = "antigravity/1.18.4 windows/amd64" - // Session 过期时间 SessionTTL = 30 * time.Minute From efe8810dff4f114a9adb4208955f8dd8c34e596c Mon Sep 17 00:00:00 2001 From: erio Date: Wed, 25 Feb 2026 22:10:57 +0800 Subject: [PATCH 090/175] fix: remove duplicate model_mapping field in GeminiCredentials The field was defined twice (line 563 and 585) due to merge conflict with upstream v0.1.86, causing TypeScript compilation error TS2300. --- frontend/src/types/index.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/frontend/src/types/index.ts b/frontend/src/types/index.ts index 9a4184c5..a54cfcef 100644 --- a/frontend/src/types/index.ts +++ b/frontend/src/types/index.ts @@ -560,7 +560,6 @@ export interface ProxyQualityCheckResult { export interface GeminiCredentials { // API Key authentication api_key?: string - model_mapping?: Record // OAuth authentication access_token?: string From 78ac6a7a29f17308577828b7a8e5fe831b2962dc Mon Sep 17 00:00:00 2001 From: erio Date: Fri, 27 Feb 2026 00:59:11 +0800 Subject: [PATCH 091/175] =?UTF-8?q?docs:=20=E6=B7=BB=E5=8A=A0=20Star=20?= =?UTF-8?q?=E7=8E=AF=E5=A2=83=E9=83=A8=E7=BD=B2=E4=BF=A1=E6=81=AF=E5=B9=B6?= =?UTF-8?q?=E5=88=9B=E5=BB=BA=20AGENTS.md?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - CLAUDE.md 中添加 Star 环境记录(端口 8086、数据库 star、Redis DB 4) - 复制 CLAUDE.md 为 AGENTS.md --- AGENTS.md | 73 ++++++++++++++++++++++++++++++++++++++++++++----------- CLAUDE.md | 8 ++++-- 2 files changed, 65 insertions(+), 16 deletions(-) diff --git a/AGENTS.md b/AGENTS.md index 85592334..5cf453ae 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -99,7 +99,7 @@ git push origin main - 本地已配置 SSH 别名 `clicodeplus` 连接到生产服务器(运行服务) - 本地已配置 SSH 别名 `us-asaki-root` 连接到构建服务器(拉取代码、构建镜像) -- 生产服务器部署目录:`/root/sub2api`(正式)、`/root/sub2api-beta`(测试) +- 生产服务器部署目录:`/root/sub2api`(正式)、`/root/sub2api-beta`(测试)、`/root/sub2api-star`(Star) - 生产服务器使用 Docker Compose 部署 - **镜像统一在构建服务器上构建**,避免生产服务器因编译占用 CPU/内存影响线上服务 @@ -109,20 +109,45 @@ git push origin main |--------|----------|------| | 构建服务器 | `us-asaki-root` | 拉取代码、`docker build` 构建镜像 | | 生产服务器 | `clicodeplus` | 加载镜像、运行服务、部署验证 | +| 数据库服务器 | `db-clicodeplus` | PostgreSQL 16 + Redis 7,所有环境共用 | + +> 数据库服务器运维手册:`db-clicodeplus:/root/README.md` ### 部署环境说明 -| 环境 | 目录(生产服务器) | 端口 | 数据库 | 容器名 | -|------|------|------|--------|--------| -| 正式 | `/root/sub2api` | 8080 | `sub2api` | `sub2api` | -| Beta | `/root/sub2api-beta` | 8084 | `beta` | `sub2api-beta` | +| 环境 | 目录(生产服务器) | 端口 | 数据库 | Redis DB | 容器名 | +|------|------|------|--------|----------|--------| +| 正式 | `/root/sub2api` | 8080 | `sub2api` | 0 | `sub2api` | +| Beta | `/root/sub2api-beta` | 8084 | `beta` | 2 | `sub2api-beta` | +| OpenAI | `/root/sub2api-openai` | 8083 | `openai` | 3 | `sub2api-openai` | +| Star | `/root/sub2api-star` | 8086 | `star` | 4 | `sub2api-star` | -### 外部数据库 +### 外部数据库与 Redis -正式和 Beta 环境**共用外部 PostgreSQL 数据库**(非容器内数据库),配置在 `.env` 文件中: -- `DATABASE_HOST`:外部数据库地址 -- `DATABASE_SSLMODE`:SSL 模式(通常为 `require`) -- `POSTGRES_USER` / `POSTGRES_DB`:用户名和数据库名 +所有环境(正式、Beta、OpenAI、Star)共用 `db.clicodeplus.com` 上的 **PostgreSQL 16** 和 **Redis 7**,不使用容器内数据库或 Redis。 + +**PostgreSQL**(端口 5432,TLS 加密,scram-sha-256 认证): + +| 环境 | 用户名 | 数据库 | +|------|--------|--------| +| 正式 | `sub2api` | `sub2api` | +| Beta | `beta` | `beta` | +| OpenAI | `openai` | `openai` | +| Star | `star` | `star` | + +**Redis**(端口 6379,密码认证): + +| 环境 | DB | +|------|-----| +| 正式 | 0 | +| Beta | 2 | +| OpenAI | 3 | +| Star | 4 | + +**配置方式**: +- 数据库通过 `.env` 中的 `DATABASE_HOST`、`DATABASE_SSLMODE`、`POSTGRES_USER`、`POSTGRES_PASSWORD`、`POSTGRES_DB` 配置 +- Redis 通过 `docker-compose.override.yml` 覆盖 `REDIS_HOST`(因主 compose 文件硬编码为 `redis`),密码通过 `.env` 中的 `REDIS_PASSWORD` 配置 +- 各环境的 `docker-compose.override.yml` 已通过 `depends_on: !reset {}` 和 `redis: profiles: [disabled]` 去掉了对容器 Redis 的依赖 #### 数据库操作命令 @@ -311,14 +336,20 @@ perl -pi -e 's/^SERVER_PORT=.*/SERVER_PORT=8084/' ./.env perl -pi -e 's/^POSTGRES_USER=.*/POSTGRES_USER=beta/' ./.env perl -pi -e 's/^POSTGRES_DB=.*/POSTGRES_DB=beta/' ./.env -# 5) 写 compose override(避免与现网容器名冲突,镜像使用构建服务器传输的 sub2api:beta) +# 5) 写 compose override(避免与现网容器名冲突,镜像使用构建服务器传输的 sub2api:beta,Redis 使用外部服务) cat > docker-compose.override.yml <<'YAML' services: sub2api: image: sub2api:beta container_name: sub2api-beta + environment: + - DATABASE_HOST=${DATABASE_HOST:-postgres} + - DATABASE_SSLMODE=${DATABASE_SSLMODE:-disable} + - REDIS_HOST=db.clicodeplus.com + depends_on: !reset {} redis: - container_name: sub2api-beta-redis + profiles: + - disabled YAML # 6) 启动 beta(独立 project,确保不影响现网) @@ -332,10 +363,11 @@ docker logs sub2api-beta --tail 50 ### 数据库配置约定(beta) -- 数据库地址/SSL/密码:与现网一致(从现网 `.env` 复制即可)。 +- 数据库地址/SSL/密码:与现网一致(从现网 `.env` 复制即可),均指向 `db.clicodeplus.com`。 - 仅修改: - `POSTGRES_USER=beta` - `POSTGRES_DB=beta` + - `REDIS_DB=2` 注意:需要数据库侧已存在 `beta` 用户与 `beta` 数据库,并授予权限;否则容器会启动失败并不断重启。 @@ -415,7 +447,19 @@ git checkout -B release/custom-0.1.69 fork/release/custom-0.1.69 # 配置环境变量 cd deploy cp .env.example .env -vim .env # 配置 DATABASE_URL, REDIS_URL, JWT_SECRET 等 +vim .env # 配置 DATABASE_HOST=db.clicodeplus.com, POSTGRES_PASSWORD, REDIS_PASSWORD, JWT_SECRET 等 + +# 创建 override 文件(Redis 指向外部服务,去掉容器 Redis 依赖) +cat > docker-compose.override.yml <<'YAML' +services: + sub2api: + environment: + - REDIS_HOST=db.clicodeplus.com + depends_on: !reset {} + redis: + profiles: + - disabled +YAML ``` ### 5. 生产服务器:更新镜像标签并启动服务 @@ -540,6 +584,7 @@ x-api-key: admin-xxx | 正式 | `https://clicodeplus.com` | 生产环境 | | Beta | `http://<服务器IP>:8084` | 仅内网访问 | | OpenAI | `http://<服务器IP>:8083` | 仅内网访问 | +| Star | `https://hyntoken.com` | 独立环境 | > 以下接口文档中,`${BASE}` 代表环境基础地址,`${KEY}` 代表 `.env` 中的 `ADMIN_API_KEY`。操作前执行 `source .env` 或 `export KEY=$ADMIN_API_KEY` 加载。 diff --git a/CLAUDE.md b/CLAUDE.md index e8e9c93f..5cf453ae 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -99,7 +99,7 @@ git push origin main - 本地已配置 SSH 别名 `clicodeplus` 连接到生产服务器(运行服务) - 本地已配置 SSH 别名 `us-asaki-root` 连接到构建服务器(拉取代码、构建镜像) -- 生产服务器部署目录:`/root/sub2api`(正式)、`/root/sub2api-beta`(测试) +- 生产服务器部署目录:`/root/sub2api`(正式)、`/root/sub2api-beta`(测试)、`/root/sub2api-star`(Star) - 生产服务器使用 Docker Compose 部署 - **镜像统一在构建服务器上构建**,避免生产服务器因编译占用 CPU/内存影响线上服务 @@ -120,10 +120,11 @@ git push origin main | 正式 | `/root/sub2api` | 8080 | `sub2api` | 0 | `sub2api` | | Beta | `/root/sub2api-beta` | 8084 | `beta` | 2 | `sub2api-beta` | | OpenAI | `/root/sub2api-openai` | 8083 | `openai` | 3 | `sub2api-openai` | +| Star | `/root/sub2api-star` | 8086 | `star` | 4 | `sub2api-star` | ### 外部数据库与 Redis -所有环境(正式、Beta、OpenAI)共用 `db.clicodeplus.com` 上的 **PostgreSQL 16** 和 **Redis 7**,不使用容器内数据库或 Redis。 +所有环境(正式、Beta、OpenAI、Star)共用 `db.clicodeplus.com` 上的 **PostgreSQL 16** 和 **Redis 7**,不使用容器内数据库或 Redis。 **PostgreSQL**(端口 5432,TLS 加密,scram-sha-256 认证): @@ -132,6 +133,7 @@ git push origin main | 正式 | `sub2api` | `sub2api` | | Beta | `beta` | `beta` | | OpenAI | `openai` | `openai` | +| Star | `star` | `star` | **Redis**(端口 6379,密码认证): @@ -140,6 +142,7 @@ git push origin main | 正式 | 0 | | Beta | 2 | | OpenAI | 3 | +| Star | 4 | **配置方式**: - 数据库通过 `.env` 中的 `DATABASE_HOST`、`DATABASE_SSLMODE`、`POSTGRES_USER`、`POSTGRES_PASSWORD`、`POSTGRES_DB` 配置 @@ -581,6 +584,7 @@ x-api-key: admin-xxx | 正式 | `https://clicodeplus.com` | 生产环境 | | Beta | `http://<服务器IP>:8084` | 仅内网访问 | | OpenAI | `http://<服务器IP>:8083` | 仅内网访问 | +| Star | `https://hyntoken.com` | 独立环境 | > 以下接口文档中,`${BASE}` 代表环境基础地址,`${KEY}` 代表 `.env` 中的 `ADMIN_API_KEY`。操作前执行 `source .env` 或 `export KEY=$ADMIN_API_KEY` 加载。 From 1fb6e9e830f540b43d4a89e95f7c2767ea417e95 Mon Sep 17 00:00:00 2001 From: erio Date: Fri, 27 Feb 2026 01:54:54 +0800 Subject: [PATCH 092/175] feat: add claude max usage simulation with group switch --- backend/ent/client.go | 171 ++- backend/ent/ent.go | 2 + backend/ent/group.go | 29 +- backend/ent/group/group.go | 10 + backend/ent/group/where.go | 15 + backend/ent/group_create.go | 65 + backend/ent/group_update.go | 34 + backend/ent/hook/hook.go | 12 + backend/ent/idempotencyrecord.go | 228 ++++ .../idempotencyrecord/idempotencyrecord.go | 148 +++ backend/ent/idempotencyrecord/where.go | 755 +++++++++++ backend/ent/idempotencyrecord_create.go | 1132 +++++++++++++++++ backend/ent/idempotencyrecord_delete.go | 88 ++ backend/ent/idempotencyrecord_query.go | 564 ++++++++ backend/ent/idempotencyrecord_update.go | 676 ++++++++++ backend/ent/intercept/intercept.go | 30 + backend/ent/migrate/schema.go | 43 + backend/ent/mutation.go | 1040 ++++++++++++++- backend/ent/predicate/predicate.go | 3 + backend/ent/runtime/runtime.go | 40 + backend/ent/schema/group.go | 34 +- backend/ent/tx.go | 3 + backend/go.sum | 10 + .../internal/handler/admin/group_handler.go | 16 +- backend/internal/handler/dto/mappers.go | 15 +- backend/internal/handler/dto/types.go | 2 + backend/internal/handler/gateway_handler.go | 2 + backend/internal/repository/api_key_repo.go | 2 + backend/internal/repository/group_repo.go | 6 +- backend/internal/service/admin_service.go | 31 +- .../service/admin_service_group_test.go | 54 + .../internal/service/api_key_auth_cache.go | 7 +- .../service/api_key_auth_cache_impl.go | 2 + .../service/claude_max_simulation_test.go | 92 ++ .../gateway_record_usage_claude_max_test.go | 140 ++ backend/internal/service/gateway_service.go | 246 +++- backend/internal/service/group.go | 3 + .../060_add_group_simulate_claude_max.sql | 3 + frontend/src/i18n/locales/en.ts | 8 + frontend/src/i18n/locales/zh.ts | 8 + frontend/src/types/index.ts | 4 + frontend/src/views/admin/GroupsView.vue | 130 ++ 42 files changed, 5831 insertions(+), 72 deletions(-) create mode 100644 backend/ent/idempotencyrecord.go create mode 100644 backend/ent/idempotencyrecord/idempotencyrecord.go create mode 100644 backend/ent/idempotencyrecord/where.go create mode 100644 backend/ent/idempotencyrecord_create.go create mode 100644 backend/ent/idempotencyrecord_delete.go create mode 100644 backend/ent/idempotencyrecord_query.go create mode 100644 backend/ent/idempotencyrecord_update.go create mode 100644 backend/internal/service/claude_max_simulation_test.go create mode 100644 backend/internal/service/gateway_record_usage_claude_max_test.go create mode 100644 backend/migrations/060_add_group_simulate_claude_max.sql diff --git a/backend/ent/client.go b/backend/ent/client.go index 504c1755..7ebbaa32 100644 --- a/backend/ent/client.go +++ b/backend/ent/client.go @@ -22,6 +22,7 @@ import ( "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule" "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/idempotencyrecord" "github.com/Wei-Shaw/sub2api/ent/promocode" "github.com/Wei-Shaw/sub2api/ent/promocodeusage" "github.com/Wei-Shaw/sub2api/ent/proxy" @@ -58,6 +59,8 @@ type Client struct { ErrorPassthroughRule *ErrorPassthroughRuleClient // Group is the client for interacting with the Group builders. Group *GroupClient + // IdempotencyRecord is the client for interacting with the IdempotencyRecord builders. + IdempotencyRecord *IdempotencyRecordClient // PromoCode is the client for interacting with the PromoCode builders. PromoCode *PromoCodeClient // PromoCodeUsage is the client for interacting with the PromoCodeUsage builders. @@ -102,6 +105,7 @@ func (c *Client) init() { c.AnnouncementRead = NewAnnouncementReadClient(c.config) c.ErrorPassthroughRule = NewErrorPassthroughRuleClient(c.config) c.Group = NewGroupClient(c.config) + c.IdempotencyRecord = NewIdempotencyRecordClient(c.config) c.PromoCode = NewPromoCodeClient(c.config) c.PromoCodeUsage = NewPromoCodeUsageClient(c.config) c.Proxy = NewProxyClient(c.config) @@ -214,6 +218,7 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) { AnnouncementRead: NewAnnouncementReadClient(cfg), ErrorPassthroughRule: NewErrorPassthroughRuleClient(cfg), Group: NewGroupClient(cfg), + IdempotencyRecord: NewIdempotencyRecordClient(cfg), PromoCode: NewPromoCodeClient(cfg), PromoCodeUsage: NewPromoCodeUsageClient(cfg), Proxy: NewProxyClient(cfg), @@ -253,6 +258,7 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) AnnouncementRead: NewAnnouncementReadClient(cfg), ErrorPassthroughRule: NewErrorPassthroughRuleClient(cfg), Group: NewGroupClient(cfg), + IdempotencyRecord: NewIdempotencyRecordClient(cfg), PromoCode: NewPromoCodeClient(cfg), PromoCodeUsage: NewPromoCodeUsageClient(cfg), Proxy: NewProxyClient(cfg), @@ -296,10 +302,10 @@ func (c *Client) Close() error { func (c *Client) Use(hooks ...Hook) { for _, n := range []interface{ Use(...Hook) }{ c.APIKey, c.Account, c.AccountGroup, c.Announcement, c.AnnouncementRead, - c.ErrorPassthroughRule, c.Group, c.PromoCode, c.PromoCodeUsage, c.Proxy, - c.RedeemCode, c.SecuritySecret, c.Setting, c.UsageCleanupTask, c.UsageLog, - c.User, c.UserAllowedGroup, c.UserAttributeDefinition, c.UserAttributeValue, - c.UserSubscription, + c.ErrorPassthroughRule, c.Group, c.IdempotencyRecord, c.PromoCode, + c.PromoCodeUsage, c.Proxy, c.RedeemCode, c.SecuritySecret, c.Setting, + c.UsageCleanupTask, c.UsageLog, c.User, c.UserAllowedGroup, + c.UserAttributeDefinition, c.UserAttributeValue, c.UserSubscription, } { n.Use(hooks...) } @@ -310,10 +316,10 @@ func (c *Client) Use(hooks ...Hook) { func (c *Client) Intercept(interceptors ...Interceptor) { for _, n := range []interface{ Intercept(...Interceptor) }{ c.APIKey, c.Account, c.AccountGroup, c.Announcement, c.AnnouncementRead, - c.ErrorPassthroughRule, c.Group, c.PromoCode, c.PromoCodeUsage, c.Proxy, - c.RedeemCode, c.SecuritySecret, c.Setting, c.UsageCleanupTask, c.UsageLog, - c.User, c.UserAllowedGroup, c.UserAttributeDefinition, c.UserAttributeValue, - c.UserSubscription, + c.ErrorPassthroughRule, c.Group, c.IdempotencyRecord, c.PromoCode, + c.PromoCodeUsage, c.Proxy, c.RedeemCode, c.SecuritySecret, c.Setting, + c.UsageCleanupTask, c.UsageLog, c.User, c.UserAllowedGroup, + c.UserAttributeDefinition, c.UserAttributeValue, c.UserSubscription, } { n.Intercept(interceptors...) } @@ -336,6 +342,8 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { return c.ErrorPassthroughRule.mutate(ctx, m) case *GroupMutation: return c.Group.mutate(ctx, m) + case *IdempotencyRecordMutation: + return c.IdempotencyRecord.mutate(ctx, m) case *PromoCodeMutation: return c.PromoCode.mutate(ctx, m) case *PromoCodeUsageMutation: @@ -1575,6 +1583,139 @@ func (c *GroupClient) mutate(ctx context.Context, m *GroupMutation) (Value, erro } } +// IdempotencyRecordClient is a client for the IdempotencyRecord schema. +type IdempotencyRecordClient struct { + config +} + +// NewIdempotencyRecordClient returns a client for the IdempotencyRecord from the given config. +func NewIdempotencyRecordClient(c config) *IdempotencyRecordClient { + return &IdempotencyRecordClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `idempotencyrecord.Hooks(f(g(h())))`. +func (c *IdempotencyRecordClient) Use(hooks ...Hook) { + c.hooks.IdempotencyRecord = append(c.hooks.IdempotencyRecord, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `idempotencyrecord.Intercept(f(g(h())))`. +func (c *IdempotencyRecordClient) Intercept(interceptors ...Interceptor) { + c.inters.IdempotencyRecord = append(c.inters.IdempotencyRecord, interceptors...) +} + +// Create returns a builder for creating a IdempotencyRecord entity. +func (c *IdempotencyRecordClient) Create() *IdempotencyRecordCreate { + mutation := newIdempotencyRecordMutation(c.config, OpCreate) + return &IdempotencyRecordCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of IdempotencyRecord entities. +func (c *IdempotencyRecordClient) CreateBulk(builders ...*IdempotencyRecordCreate) *IdempotencyRecordCreateBulk { + return &IdempotencyRecordCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *IdempotencyRecordClient) MapCreateBulk(slice any, setFunc func(*IdempotencyRecordCreate, int)) *IdempotencyRecordCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &IdempotencyRecordCreateBulk{err: fmt.Errorf("calling to IdempotencyRecordClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*IdempotencyRecordCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &IdempotencyRecordCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for IdempotencyRecord. +func (c *IdempotencyRecordClient) Update() *IdempotencyRecordUpdate { + mutation := newIdempotencyRecordMutation(c.config, OpUpdate) + return &IdempotencyRecordUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *IdempotencyRecordClient) UpdateOne(_m *IdempotencyRecord) *IdempotencyRecordUpdateOne { + mutation := newIdempotencyRecordMutation(c.config, OpUpdateOne, withIdempotencyRecord(_m)) + return &IdempotencyRecordUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *IdempotencyRecordClient) UpdateOneID(id int64) *IdempotencyRecordUpdateOne { + mutation := newIdempotencyRecordMutation(c.config, OpUpdateOne, withIdempotencyRecordID(id)) + return &IdempotencyRecordUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for IdempotencyRecord. +func (c *IdempotencyRecordClient) Delete() *IdempotencyRecordDelete { + mutation := newIdempotencyRecordMutation(c.config, OpDelete) + return &IdempotencyRecordDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *IdempotencyRecordClient) DeleteOne(_m *IdempotencyRecord) *IdempotencyRecordDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *IdempotencyRecordClient) DeleteOneID(id int64) *IdempotencyRecordDeleteOne { + builder := c.Delete().Where(idempotencyrecord.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &IdempotencyRecordDeleteOne{builder} +} + +// Query returns a query builder for IdempotencyRecord. +func (c *IdempotencyRecordClient) Query() *IdempotencyRecordQuery { + return &IdempotencyRecordQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeIdempotencyRecord}, + inters: c.Interceptors(), + } +} + +// Get returns a IdempotencyRecord entity by its id. +func (c *IdempotencyRecordClient) Get(ctx context.Context, id int64) (*IdempotencyRecord, error) { + return c.Query().Where(idempotencyrecord.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *IdempotencyRecordClient) GetX(ctx context.Context, id int64) *IdempotencyRecord { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *IdempotencyRecordClient) Hooks() []Hook { + return c.hooks.IdempotencyRecord +} + +// Interceptors returns the client interceptors. +func (c *IdempotencyRecordClient) Interceptors() []Interceptor { + return c.inters.IdempotencyRecord +} + +func (c *IdempotencyRecordClient) mutate(ctx context.Context, m *IdempotencyRecordMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&IdempotencyRecordCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&IdempotencyRecordUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&IdempotencyRecordUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&IdempotencyRecordDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown IdempotencyRecord mutation op: %q", m.Op()) + } +} + // PromoCodeClient is a client for the PromoCode schema. type PromoCodeClient struct { config @@ -3747,15 +3888,17 @@ func (c *UserSubscriptionClient) mutate(ctx context.Context, m *UserSubscription type ( hooks struct { APIKey, Account, AccountGroup, Announcement, AnnouncementRead, - ErrorPassthroughRule, Group, PromoCode, PromoCodeUsage, Proxy, RedeemCode, - SecuritySecret, Setting, UsageCleanupTask, UsageLog, User, UserAllowedGroup, - UserAttributeDefinition, UserAttributeValue, UserSubscription []ent.Hook + ErrorPassthroughRule, Group, IdempotencyRecord, PromoCode, PromoCodeUsage, + Proxy, RedeemCode, SecuritySecret, Setting, UsageCleanupTask, UsageLog, User, + UserAllowedGroup, UserAttributeDefinition, UserAttributeValue, + UserSubscription []ent.Hook } inters struct { APIKey, Account, AccountGroup, Announcement, AnnouncementRead, - ErrorPassthroughRule, Group, PromoCode, PromoCodeUsage, Proxy, RedeemCode, - SecuritySecret, Setting, UsageCleanupTask, UsageLog, User, UserAllowedGroup, - UserAttributeDefinition, UserAttributeValue, UserSubscription []ent.Interceptor + ErrorPassthroughRule, Group, IdempotencyRecord, PromoCode, PromoCodeUsage, + Proxy, RedeemCode, SecuritySecret, Setting, UsageCleanupTask, UsageLog, User, + UserAllowedGroup, UserAttributeDefinition, UserAttributeValue, + UserSubscription []ent.Interceptor } ) diff --git a/backend/ent/ent.go b/backend/ent/ent.go index c4ec3387..5197e4d8 100644 --- a/backend/ent/ent.go +++ b/backend/ent/ent.go @@ -19,6 +19,7 @@ import ( "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule" "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/idempotencyrecord" "github.com/Wei-Shaw/sub2api/ent/promocode" "github.com/Wei-Shaw/sub2api/ent/promocodeusage" "github.com/Wei-Shaw/sub2api/ent/proxy" @@ -99,6 +100,7 @@ func checkColumn(t, c string) error { announcementread.Table: announcementread.ValidColumn, errorpassthroughrule.Table: errorpassthroughrule.ValidColumn, group.Table: group.ValidColumn, + idempotencyrecord.Table: idempotencyrecord.ValidColumn, promocode.Table: promocode.ValidColumn, promocodeusage.Table: promocodeusage.ValidColumn, proxy.Table: proxy.ValidColumn, diff --git a/backend/ent/group.go b/backend/ent/group.go index 79ec5bf5..db4641a8 100644 --- a/backend/ent/group.go +++ b/backend/ent/group.go @@ -60,22 +60,24 @@ type Group struct { SoraVideoPricePerRequest *float64 `json:"sora_video_price_per_request,omitempty"` // SoraVideoPricePerRequestHd holds the value of the "sora_video_price_per_request_hd" field. SoraVideoPricePerRequestHd *float64 `json:"sora_video_price_per_request_hd,omitempty"` - // 是否仅允许 Claude Code 客户端 + // allow Claude Code client only ClaudeCodeOnly bool `json:"claude_code_only,omitempty"` - // 非 Claude Code 请求降级使用的分组 ID + // fallback group for non-Claude-Code requests FallbackGroupID *int64 `json:"fallback_group_id,omitempty"` - // 无效请求兜底使用的分组 ID + // fallback group for invalid request FallbackGroupIDOnInvalidRequest *int64 `json:"fallback_group_id_on_invalid_request,omitempty"` - // 模型路由配置:模型模式 -> 优先账号ID列表 + // model routing config: pattern -> account ids ModelRouting map[string][]int64 `json:"model_routing,omitempty"` - // 是否启用模型路由配置 + // whether model routing is enabled ModelRoutingEnabled bool `json:"model_routing_enabled,omitempty"` - // 是否注入 MCP XML 调用协议提示词(仅 antigravity 平台) + // whether MCP XML prompt injection is enabled McpXMLInject bool `json:"mcp_xml_inject,omitempty"` - // 支持的模型系列:claude, gemini_text, gemini_image + // supported model scopes: claude, gemini_text, gemini_image SupportedModelScopes []string `json:"supported_model_scopes,omitempty"` - // 分组显示排序,数值越小越靠前 + // group display order, lower comes first SortOrder int `json:"sort_order,omitempty"` + // simulate claude usage as claude-max style (1h cache write) + SimulateClaudeMaxEnabled bool `json:"simulate_claude_max_enabled,omitempty"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the GroupQuery when eager-loading is set. Edges GroupEdges `json:"edges"` @@ -184,7 +186,7 @@ func (*Group) scanValues(columns []string) ([]any, error) { switch columns[i] { case group.FieldModelRouting, group.FieldSupportedModelScopes: values[i] = new([]byte) - case group.FieldIsExclusive, group.FieldClaudeCodeOnly, group.FieldModelRoutingEnabled, group.FieldMcpXMLInject: + case group.FieldIsExclusive, group.FieldClaudeCodeOnly, group.FieldModelRoutingEnabled, group.FieldMcpXMLInject, group.FieldSimulateClaudeMaxEnabled: values[i] = new(sql.NullBool) case group.FieldRateMultiplier, group.FieldDailyLimitUsd, group.FieldWeeklyLimitUsd, group.FieldMonthlyLimitUsd, group.FieldImagePrice1k, group.FieldImagePrice2k, group.FieldImagePrice4k, group.FieldSoraImagePrice360, group.FieldSoraImagePrice540, group.FieldSoraVideoPricePerRequest, group.FieldSoraVideoPricePerRequestHd: values[i] = new(sql.NullFloat64) @@ -407,6 +409,12 @@ func (_m *Group) assignValues(columns []string, values []any) error { } else if value.Valid { _m.SortOrder = int(value.Int64) } + case group.FieldSimulateClaudeMaxEnabled: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field simulate_claude_max_enabled", values[i]) + } else if value.Valid { + _m.SimulateClaudeMaxEnabled = value.Bool + } default: _m.selectValues.Set(columns[i], values[i]) } @@ -597,6 +605,9 @@ func (_m *Group) String() string { builder.WriteString(", ") builder.WriteString("sort_order=") builder.WriteString(fmt.Sprintf("%v", _m.SortOrder)) + builder.WriteString(", ") + builder.WriteString("simulate_claude_max_enabled=") + builder.WriteString(fmt.Sprintf("%v", _m.SimulateClaudeMaxEnabled)) builder.WriteByte(')') return builder.String() } diff --git a/backend/ent/group/group.go b/backend/ent/group/group.go index 133123a1..ab889171 100644 --- a/backend/ent/group/group.go +++ b/backend/ent/group/group.go @@ -73,6 +73,8 @@ const ( FieldSupportedModelScopes = "supported_model_scopes" // FieldSortOrder holds the string denoting the sort_order field in the database. FieldSortOrder = "sort_order" + // FieldSimulateClaudeMaxEnabled holds the string denoting the simulate_claude_max_enabled field in the database. + FieldSimulateClaudeMaxEnabled = "simulate_claude_max_enabled" // EdgeAPIKeys holds the string denoting the api_keys edge name in mutations. EdgeAPIKeys = "api_keys" // EdgeRedeemCodes holds the string denoting the redeem_codes edge name in mutations. @@ -177,6 +179,7 @@ var Columns = []string{ FieldMcpXMLInject, FieldSupportedModelScopes, FieldSortOrder, + FieldSimulateClaudeMaxEnabled, } var ( @@ -242,6 +245,8 @@ var ( DefaultSupportedModelScopes []string // DefaultSortOrder holds the default value on creation for the "sort_order" field. DefaultSortOrder int + // DefaultSimulateClaudeMaxEnabled holds the default value on creation for the "simulate_claude_max_enabled" field. + DefaultSimulateClaudeMaxEnabled bool ) // OrderOption defines the ordering options for the Group queries. @@ -387,6 +392,11 @@ func BySortOrder(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldSortOrder, opts...).ToFunc() } +// BySimulateClaudeMaxEnabled orders the results by the simulate_claude_max_enabled field. +func BySimulateClaudeMaxEnabled(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSimulateClaudeMaxEnabled, opts...).ToFunc() +} + // ByAPIKeysCount orders the results by api_keys count. func ByAPIKeysCount(opts ...sql.OrderTermOption) OrderOption { return func(s *sql.Selector) { diff --git a/backend/ent/group/where.go b/backend/ent/group/where.go index 127d4ae9..e7d88991 100644 --- a/backend/ent/group/where.go +++ b/backend/ent/group/where.go @@ -190,6 +190,11 @@ func SortOrder(v int) predicate.Group { return predicate.Group(sql.FieldEQ(FieldSortOrder, v)) } +// SimulateClaudeMaxEnabled applies equality check predicate on the "simulate_claude_max_enabled" field. It's identical to SimulateClaudeMaxEnabledEQ. +func SimulateClaudeMaxEnabled(v bool) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldSimulateClaudeMaxEnabled, v)) +} + // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.Group { return predicate.Group(sql.FieldEQ(FieldCreatedAt, v)) @@ -1425,6 +1430,16 @@ func SortOrderLTE(v int) predicate.Group { return predicate.Group(sql.FieldLTE(FieldSortOrder, v)) } +// SimulateClaudeMaxEnabledEQ applies the EQ predicate on the "simulate_claude_max_enabled" field. +func SimulateClaudeMaxEnabledEQ(v bool) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldSimulateClaudeMaxEnabled, v)) +} + +// SimulateClaudeMaxEnabledNEQ applies the NEQ predicate on the "simulate_claude_max_enabled" field. +func SimulateClaudeMaxEnabledNEQ(v bool) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldSimulateClaudeMaxEnabled, v)) +} + // HasAPIKeys applies the HasEdge predicate on the "api_keys" edge. func HasAPIKeys() predicate.Group { return predicate.Group(func(s *sql.Selector) { diff --git a/backend/ent/group_create.go b/backend/ent/group_create.go index 4416516b..9cd3a766 100644 --- a/backend/ent/group_create.go +++ b/backend/ent/group_create.go @@ -410,6 +410,20 @@ func (_c *GroupCreate) SetNillableSortOrder(v *int) *GroupCreate { return _c } +// SetSimulateClaudeMaxEnabled sets the "simulate_claude_max_enabled" field. +func (_c *GroupCreate) SetSimulateClaudeMaxEnabled(v bool) *GroupCreate { + _c.mutation.SetSimulateClaudeMaxEnabled(v) + return _c +} + +// SetNillableSimulateClaudeMaxEnabled sets the "simulate_claude_max_enabled" field if the given value is not nil. +func (_c *GroupCreate) SetNillableSimulateClaudeMaxEnabled(v *bool) *GroupCreate { + if v != nil { + _c.SetSimulateClaudeMaxEnabled(*v) + } + return _c +} + // AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs. func (_c *GroupCreate) AddAPIKeyIDs(ids ...int64) *GroupCreate { _c.mutation.AddAPIKeyIDs(ids...) @@ -595,6 +609,10 @@ func (_c *GroupCreate) defaults() error { v := group.DefaultSortOrder _c.mutation.SetSortOrder(v) } + if _, ok := _c.mutation.SimulateClaudeMaxEnabled(); !ok { + v := group.DefaultSimulateClaudeMaxEnabled + _c.mutation.SetSimulateClaudeMaxEnabled(v) + } return nil } @@ -662,6 +680,9 @@ func (_c *GroupCreate) check() error { if _, ok := _c.mutation.SortOrder(); !ok { return &ValidationError{Name: "sort_order", err: errors.New(`ent: missing required field "Group.sort_order"`)} } + if _, ok := _c.mutation.SimulateClaudeMaxEnabled(); !ok { + return &ValidationError{Name: "simulate_claude_max_enabled", err: errors.New(`ent: missing required field "Group.simulate_claude_max_enabled"`)} + } return nil } @@ -805,6 +826,10 @@ func (_c *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) { _spec.SetField(group.FieldSortOrder, field.TypeInt, value) _node.SortOrder = value } + if value, ok := _c.mutation.SimulateClaudeMaxEnabled(); ok { + _spec.SetField(group.FieldSimulateClaudeMaxEnabled, field.TypeBool, value) + _node.SimulateClaudeMaxEnabled = value + } if nodes := _c.mutation.APIKeysIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, @@ -1477,6 +1502,18 @@ func (u *GroupUpsert) AddSortOrder(v int) *GroupUpsert { return u } +// SetSimulateClaudeMaxEnabled sets the "simulate_claude_max_enabled" field. +func (u *GroupUpsert) SetSimulateClaudeMaxEnabled(v bool) *GroupUpsert { + u.Set(group.FieldSimulateClaudeMaxEnabled, v) + return u +} + +// UpdateSimulateClaudeMaxEnabled sets the "simulate_claude_max_enabled" field to the value that was provided on create. +func (u *GroupUpsert) UpdateSimulateClaudeMaxEnabled() *GroupUpsert { + u.SetExcluded(group.FieldSimulateClaudeMaxEnabled) + return u +} + // UpdateNewValues updates the mutable fields using the new values that were set on create. // Using this option is equivalent to using: // @@ -2124,6 +2161,20 @@ func (u *GroupUpsertOne) UpdateSortOrder() *GroupUpsertOne { }) } +// SetSimulateClaudeMaxEnabled sets the "simulate_claude_max_enabled" field. +func (u *GroupUpsertOne) SetSimulateClaudeMaxEnabled(v bool) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetSimulateClaudeMaxEnabled(v) + }) +} + +// UpdateSimulateClaudeMaxEnabled sets the "simulate_claude_max_enabled" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateSimulateClaudeMaxEnabled() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateSimulateClaudeMaxEnabled() + }) +} + // Exec executes the query. func (u *GroupUpsertOne) Exec(ctx context.Context) error { if len(u.create.conflict) == 0 { @@ -2937,6 +2988,20 @@ func (u *GroupUpsertBulk) UpdateSortOrder() *GroupUpsertBulk { }) } +// SetSimulateClaudeMaxEnabled sets the "simulate_claude_max_enabled" field. +func (u *GroupUpsertBulk) SetSimulateClaudeMaxEnabled(v bool) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetSimulateClaudeMaxEnabled(v) + }) +} + +// UpdateSimulateClaudeMaxEnabled sets the "simulate_claude_max_enabled" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateSimulateClaudeMaxEnabled() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateSimulateClaudeMaxEnabled() + }) +} + // Exec executes the query. func (u *GroupUpsertBulk) Exec(ctx context.Context) error { if u.create.err != nil { diff --git a/backend/ent/group_update.go b/backend/ent/group_update.go index db510e05..044d24a9 100644 --- a/backend/ent/group_update.go +++ b/backend/ent/group_update.go @@ -604,6 +604,20 @@ func (_u *GroupUpdate) AddSortOrder(v int) *GroupUpdate { return _u } +// SetSimulateClaudeMaxEnabled sets the "simulate_claude_max_enabled" field. +func (_u *GroupUpdate) SetSimulateClaudeMaxEnabled(v bool) *GroupUpdate { + _u.mutation.SetSimulateClaudeMaxEnabled(v) + return _u +} + +// SetNillableSimulateClaudeMaxEnabled sets the "simulate_claude_max_enabled" field if the given value is not nil. +func (_u *GroupUpdate) SetNillableSimulateClaudeMaxEnabled(v *bool) *GroupUpdate { + if v != nil { + _u.SetSimulateClaudeMaxEnabled(*v) + } + return _u +} + // AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs. func (_u *GroupUpdate) AddAPIKeyIDs(ids ...int64) *GroupUpdate { _u.mutation.AddAPIKeyIDs(ids...) @@ -1083,6 +1097,9 @@ func (_u *GroupUpdate) sqlSave(ctx context.Context) (_node int, err error) { if value, ok := _u.mutation.AddedSortOrder(); ok { _spec.AddField(group.FieldSortOrder, field.TypeInt, value) } + if value, ok := _u.mutation.SimulateClaudeMaxEnabled(); ok { + _spec.SetField(group.FieldSimulateClaudeMaxEnabled, field.TypeBool, value) + } if _u.mutation.APIKeysCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, @@ -1966,6 +1983,20 @@ func (_u *GroupUpdateOne) AddSortOrder(v int) *GroupUpdateOne { return _u } +// SetSimulateClaudeMaxEnabled sets the "simulate_claude_max_enabled" field. +func (_u *GroupUpdateOne) SetSimulateClaudeMaxEnabled(v bool) *GroupUpdateOne { + _u.mutation.SetSimulateClaudeMaxEnabled(v) + return _u +} + +// SetNillableSimulateClaudeMaxEnabled sets the "simulate_claude_max_enabled" field if the given value is not nil. +func (_u *GroupUpdateOne) SetNillableSimulateClaudeMaxEnabled(v *bool) *GroupUpdateOne { + if v != nil { + _u.SetSimulateClaudeMaxEnabled(*v) + } + return _u +} + // AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs. func (_u *GroupUpdateOne) AddAPIKeyIDs(ids ...int64) *GroupUpdateOne { _u.mutation.AddAPIKeyIDs(ids...) @@ -2475,6 +2506,9 @@ func (_u *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error) if value, ok := _u.mutation.AddedSortOrder(); ok { _spec.AddField(group.FieldSortOrder, field.TypeInt, value) } + if value, ok := _u.mutation.SimulateClaudeMaxEnabled(); ok { + _spec.SetField(group.FieldSimulateClaudeMaxEnabled, field.TypeBool, value) + } if _u.mutation.APIKeysCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, diff --git a/backend/ent/hook/hook.go b/backend/ent/hook/hook.go index aff9caa0..49d7f3c5 100644 --- a/backend/ent/hook/hook.go +++ b/backend/ent/hook/hook.go @@ -93,6 +93,18 @@ func (f GroupFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.GroupMutation", m) } +// The IdempotencyRecordFunc type is an adapter to allow the use of ordinary +// function as IdempotencyRecord mutator. +type IdempotencyRecordFunc func(context.Context, *ent.IdempotencyRecordMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f IdempotencyRecordFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.IdempotencyRecordMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.IdempotencyRecordMutation", m) +} + // The PromoCodeFunc type is an adapter to allow the use of ordinary // function as PromoCode mutator. type PromoCodeFunc func(context.Context, *ent.PromoCodeMutation) (ent.Value, error) diff --git a/backend/ent/idempotencyrecord.go b/backend/ent/idempotencyrecord.go new file mode 100644 index 00000000..ab120f8f --- /dev/null +++ b/backend/ent/idempotencyrecord.go @@ -0,0 +1,228 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/idempotencyrecord" +) + +// IdempotencyRecord is the model entity for the IdempotencyRecord schema. +type IdempotencyRecord struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Scope holds the value of the "scope" field. + Scope string `json:"scope,omitempty"` + // IdempotencyKeyHash holds the value of the "idempotency_key_hash" field. + IdempotencyKeyHash string `json:"idempotency_key_hash,omitempty"` + // RequestFingerprint holds the value of the "request_fingerprint" field. + RequestFingerprint string `json:"request_fingerprint,omitempty"` + // Status holds the value of the "status" field. + Status string `json:"status,omitempty"` + // ResponseStatus holds the value of the "response_status" field. + ResponseStatus *int `json:"response_status,omitempty"` + // ResponseBody holds the value of the "response_body" field. + ResponseBody *string `json:"response_body,omitempty"` + // ErrorReason holds the value of the "error_reason" field. + ErrorReason *string `json:"error_reason,omitempty"` + // LockedUntil holds the value of the "locked_until" field. + LockedUntil *time.Time `json:"locked_until,omitempty"` + // ExpiresAt holds the value of the "expires_at" field. + ExpiresAt time.Time `json:"expires_at,omitempty"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*IdempotencyRecord) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case idempotencyrecord.FieldID, idempotencyrecord.FieldResponseStatus: + values[i] = new(sql.NullInt64) + case idempotencyrecord.FieldScope, idempotencyrecord.FieldIdempotencyKeyHash, idempotencyrecord.FieldRequestFingerprint, idempotencyrecord.FieldStatus, idempotencyrecord.FieldResponseBody, idempotencyrecord.FieldErrorReason: + values[i] = new(sql.NullString) + case idempotencyrecord.FieldCreatedAt, idempotencyrecord.FieldUpdatedAt, idempotencyrecord.FieldLockedUntil, idempotencyrecord.FieldExpiresAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the IdempotencyRecord fields. +func (_m *IdempotencyRecord) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case idempotencyrecord.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case idempotencyrecord.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case idempotencyrecord.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + _m.UpdatedAt = value.Time + } + case idempotencyrecord.FieldScope: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field scope", values[i]) + } else if value.Valid { + _m.Scope = value.String + } + case idempotencyrecord.FieldIdempotencyKeyHash: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field idempotency_key_hash", values[i]) + } else if value.Valid { + _m.IdempotencyKeyHash = value.String + } + case idempotencyrecord.FieldRequestFingerprint: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field request_fingerprint", values[i]) + } else if value.Valid { + _m.RequestFingerprint = value.String + } + case idempotencyrecord.FieldStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field status", values[i]) + } else if value.Valid { + _m.Status = value.String + } + case idempotencyrecord.FieldResponseStatus: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field response_status", values[i]) + } else if value.Valid { + _m.ResponseStatus = new(int) + *_m.ResponseStatus = int(value.Int64) + } + case idempotencyrecord.FieldResponseBody: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field response_body", values[i]) + } else if value.Valid { + _m.ResponseBody = new(string) + *_m.ResponseBody = value.String + } + case idempotencyrecord.FieldErrorReason: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field error_reason", values[i]) + } else if value.Valid { + _m.ErrorReason = new(string) + *_m.ErrorReason = value.String + } + case idempotencyrecord.FieldLockedUntil: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field locked_until", values[i]) + } else if value.Valid { + _m.LockedUntil = new(time.Time) + *_m.LockedUntil = value.Time + } + case idempotencyrecord.FieldExpiresAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field expires_at", values[i]) + } else if value.Valid { + _m.ExpiresAt = value.Time + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the IdempotencyRecord. +// This includes values selected through modifiers, order, etc. +func (_m *IdempotencyRecord) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// Update returns a builder for updating this IdempotencyRecord. +// Note that you need to call IdempotencyRecord.Unwrap() before calling this method if this IdempotencyRecord +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *IdempotencyRecord) Update() *IdempotencyRecordUpdateOne { + return NewIdempotencyRecordClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the IdempotencyRecord entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *IdempotencyRecord) Unwrap() *IdempotencyRecord { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: IdempotencyRecord is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *IdempotencyRecord) String() string { + var builder strings.Builder + builder.WriteString("IdempotencyRecord(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("scope=") + builder.WriteString(_m.Scope) + builder.WriteString(", ") + builder.WriteString("idempotency_key_hash=") + builder.WriteString(_m.IdempotencyKeyHash) + builder.WriteString(", ") + builder.WriteString("request_fingerprint=") + builder.WriteString(_m.RequestFingerprint) + builder.WriteString(", ") + builder.WriteString("status=") + builder.WriteString(_m.Status) + builder.WriteString(", ") + if v := _m.ResponseStatus; v != nil { + builder.WriteString("response_status=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + if v := _m.ResponseBody; v != nil { + builder.WriteString("response_body=") + builder.WriteString(*v) + } + builder.WriteString(", ") + if v := _m.ErrorReason; v != nil { + builder.WriteString("error_reason=") + builder.WriteString(*v) + } + builder.WriteString(", ") + if v := _m.LockedUntil; v != nil { + builder.WriteString("locked_until=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + builder.WriteString("expires_at=") + builder.WriteString(_m.ExpiresAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// IdempotencyRecords is a parsable slice of IdempotencyRecord. +type IdempotencyRecords []*IdempotencyRecord diff --git a/backend/ent/idempotencyrecord/idempotencyrecord.go b/backend/ent/idempotencyrecord/idempotencyrecord.go new file mode 100644 index 00000000..d9686f60 --- /dev/null +++ b/backend/ent/idempotencyrecord/idempotencyrecord.go @@ -0,0 +1,148 @@ +// Code generated by ent, DO NOT EDIT. + +package idempotencyrecord + +import ( + "time" + + "entgo.io/ent/dialect/sql" +) + +const ( + // Label holds the string label denoting the idempotencyrecord type in the database. + Label = "idempotency_record" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldScope holds the string denoting the scope field in the database. + FieldScope = "scope" + // FieldIdempotencyKeyHash holds the string denoting the idempotency_key_hash field in the database. + FieldIdempotencyKeyHash = "idempotency_key_hash" + // FieldRequestFingerprint holds the string denoting the request_fingerprint field in the database. + FieldRequestFingerprint = "request_fingerprint" + // FieldStatus holds the string denoting the status field in the database. + FieldStatus = "status" + // FieldResponseStatus holds the string denoting the response_status field in the database. + FieldResponseStatus = "response_status" + // FieldResponseBody holds the string denoting the response_body field in the database. + FieldResponseBody = "response_body" + // FieldErrorReason holds the string denoting the error_reason field in the database. + FieldErrorReason = "error_reason" + // FieldLockedUntil holds the string denoting the locked_until field in the database. + FieldLockedUntil = "locked_until" + // FieldExpiresAt holds the string denoting the expires_at field in the database. + FieldExpiresAt = "expires_at" + // Table holds the table name of the idempotencyrecord in the database. + Table = "idempotency_records" +) + +// Columns holds all SQL columns for idempotencyrecord fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldScope, + FieldIdempotencyKeyHash, + FieldRequestFingerprint, + FieldStatus, + FieldResponseStatus, + FieldResponseBody, + FieldErrorReason, + FieldLockedUntil, + FieldExpiresAt, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // ScopeValidator is a validator for the "scope" field. It is called by the builders before save. + ScopeValidator func(string) error + // IdempotencyKeyHashValidator is a validator for the "idempotency_key_hash" field. It is called by the builders before save. + IdempotencyKeyHashValidator func(string) error + // RequestFingerprintValidator is a validator for the "request_fingerprint" field. It is called by the builders before save. + RequestFingerprintValidator func(string) error + // StatusValidator is a validator for the "status" field. It is called by the builders before save. + StatusValidator func(string) error + // ErrorReasonValidator is a validator for the "error_reason" field. It is called by the builders before save. + ErrorReasonValidator func(string) error +) + +// OrderOption defines the ordering options for the IdempotencyRecord queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByScope orders the results by the scope field. +func ByScope(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldScope, opts...).ToFunc() +} + +// ByIdempotencyKeyHash orders the results by the idempotency_key_hash field. +func ByIdempotencyKeyHash(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIdempotencyKeyHash, opts...).ToFunc() +} + +// ByRequestFingerprint orders the results by the request_fingerprint field. +func ByRequestFingerprint(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRequestFingerprint, opts...).ToFunc() +} + +// ByStatus orders the results by the status field. +func ByStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatus, opts...).ToFunc() +} + +// ByResponseStatus orders the results by the response_status field. +func ByResponseStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldResponseStatus, opts...).ToFunc() +} + +// ByResponseBody orders the results by the response_body field. +func ByResponseBody(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldResponseBody, opts...).ToFunc() +} + +// ByErrorReason orders the results by the error_reason field. +func ByErrorReason(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldErrorReason, opts...).ToFunc() +} + +// ByLockedUntil orders the results by the locked_until field. +func ByLockedUntil(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLockedUntil, opts...).ToFunc() +} + +// ByExpiresAt orders the results by the expires_at field. +func ByExpiresAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldExpiresAt, opts...).ToFunc() +} diff --git a/backend/ent/idempotencyrecord/where.go b/backend/ent/idempotencyrecord/where.go new file mode 100644 index 00000000..c3d8d9d5 --- /dev/null +++ b/backend/ent/idempotencyrecord/where.go @@ -0,0 +1,755 @@ +// Code generated by ent, DO NOT EDIT. + +package idempotencyrecord + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLTE(FieldID, id)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// Scope applies equality check predicate on the "scope" field. It's identical to ScopeEQ. +func Scope(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldScope, v)) +} + +// IdempotencyKeyHash applies equality check predicate on the "idempotency_key_hash" field. It's identical to IdempotencyKeyHashEQ. +func IdempotencyKeyHash(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldIdempotencyKeyHash, v)) +} + +// RequestFingerprint applies equality check predicate on the "request_fingerprint" field. It's identical to RequestFingerprintEQ. +func RequestFingerprint(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldRequestFingerprint, v)) +} + +// Status applies equality check predicate on the "status" field. It's identical to StatusEQ. +func Status(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldStatus, v)) +} + +// ResponseStatus applies equality check predicate on the "response_status" field. It's identical to ResponseStatusEQ. +func ResponseStatus(v int) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldResponseStatus, v)) +} + +// ResponseBody applies equality check predicate on the "response_body" field. It's identical to ResponseBodyEQ. +func ResponseBody(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldResponseBody, v)) +} + +// ErrorReason applies equality check predicate on the "error_reason" field. It's identical to ErrorReasonEQ. +func ErrorReason(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldErrorReason, v)) +} + +// LockedUntil applies equality check predicate on the "locked_until" field. It's identical to LockedUntilEQ. +func LockedUntil(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldLockedUntil, v)) +} + +// ExpiresAt applies equality check predicate on the "expires_at" field. It's identical to ExpiresAtEQ. +func ExpiresAt(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldExpiresAt, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// ScopeEQ applies the EQ predicate on the "scope" field. +func ScopeEQ(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldScope, v)) +} + +// ScopeNEQ applies the NEQ predicate on the "scope" field. +func ScopeNEQ(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNEQ(FieldScope, v)) +} + +// ScopeIn applies the In predicate on the "scope" field. +func ScopeIn(vs ...string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldIn(FieldScope, vs...)) +} + +// ScopeNotIn applies the NotIn predicate on the "scope" field. +func ScopeNotIn(vs ...string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNotIn(FieldScope, vs...)) +} + +// ScopeGT applies the GT predicate on the "scope" field. +func ScopeGT(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGT(FieldScope, v)) +} + +// ScopeGTE applies the GTE predicate on the "scope" field. +func ScopeGTE(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGTE(FieldScope, v)) +} + +// ScopeLT applies the LT predicate on the "scope" field. +func ScopeLT(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLT(FieldScope, v)) +} + +// ScopeLTE applies the LTE predicate on the "scope" field. +func ScopeLTE(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLTE(FieldScope, v)) +} + +// ScopeContains applies the Contains predicate on the "scope" field. +func ScopeContains(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldContains(FieldScope, v)) +} + +// ScopeHasPrefix applies the HasPrefix predicate on the "scope" field. +func ScopeHasPrefix(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldHasPrefix(FieldScope, v)) +} + +// ScopeHasSuffix applies the HasSuffix predicate on the "scope" field. +func ScopeHasSuffix(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldHasSuffix(FieldScope, v)) +} + +// ScopeEqualFold applies the EqualFold predicate on the "scope" field. +func ScopeEqualFold(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEqualFold(FieldScope, v)) +} + +// ScopeContainsFold applies the ContainsFold predicate on the "scope" field. +func ScopeContainsFold(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldContainsFold(FieldScope, v)) +} + +// IdempotencyKeyHashEQ applies the EQ predicate on the "idempotency_key_hash" field. +func IdempotencyKeyHashEQ(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldIdempotencyKeyHash, v)) +} + +// IdempotencyKeyHashNEQ applies the NEQ predicate on the "idempotency_key_hash" field. +func IdempotencyKeyHashNEQ(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNEQ(FieldIdempotencyKeyHash, v)) +} + +// IdempotencyKeyHashIn applies the In predicate on the "idempotency_key_hash" field. +func IdempotencyKeyHashIn(vs ...string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldIn(FieldIdempotencyKeyHash, vs...)) +} + +// IdempotencyKeyHashNotIn applies the NotIn predicate on the "idempotency_key_hash" field. +func IdempotencyKeyHashNotIn(vs ...string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNotIn(FieldIdempotencyKeyHash, vs...)) +} + +// IdempotencyKeyHashGT applies the GT predicate on the "idempotency_key_hash" field. +func IdempotencyKeyHashGT(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGT(FieldIdempotencyKeyHash, v)) +} + +// IdempotencyKeyHashGTE applies the GTE predicate on the "idempotency_key_hash" field. +func IdempotencyKeyHashGTE(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGTE(FieldIdempotencyKeyHash, v)) +} + +// IdempotencyKeyHashLT applies the LT predicate on the "idempotency_key_hash" field. +func IdempotencyKeyHashLT(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLT(FieldIdempotencyKeyHash, v)) +} + +// IdempotencyKeyHashLTE applies the LTE predicate on the "idempotency_key_hash" field. +func IdempotencyKeyHashLTE(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLTE(FieldIdempotencyKeyHash, v)) +} + +// IdempotencyKeyHashContains applies the Contains predicate on the "idempotency_key_hash" field. +func IdempotencyKeyHashContains(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldContains(FieldIdempotencyKeyHash, v)) +} + +// IdempotencyKeyHashHasPrefix applies the HasPrefix predicate on the "idempotency_key_hash" field. +func IdempotencyKeyHashHasPrefix(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldHasPrefix(FieldIdempotencyKeyHash, v)) +} + +// IdempotencyKeyHashHasSuffix applies the HasSuffix predicate on the "idempotency_key_hash" field. +func IdempotencyKeyHashHasSuffix(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldHasSuffix(FieldIdempotencyKeyHash, v)) +} + +// IdempotencyKeyHashEqualFold applies the EqualFold predicate on the "idempotency_key_hash" field. +func IdempotencyKeyHashEqualFold(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEqualFold(FieldIdempotencyKeyHash, v)) +} + +// IdempotencyKeyHashContainsFold applies the ContainsFold predicate on the "idempotency_key_hash" field. +func IdempotencyKeyHashContainsFold(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldContainsFold(FieldIdempotencyKeyHash, v)) +} + +// RequestFingerprintEQ applies the EQ predicate on the "request_fingerprint" field. +func RequestFingerprintEQ(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldRequestFingerprint, v)) +} + +// RequestFingerprintNEQ applies the NEQ predicate on the "request_fingerprint" field. +func RequestFingerprintNEQ(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNEQ(FieldRequestFingerprint, v)) +} + +// RequestFingerprintIn applies the In predicate on the "request_fingerprint" field. +func RequestFingerprintIn(vs ...string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldIn(FieldRequestFingerprint, vs...)) +} + +// RequestFingerprintNotIn applies the NotIn predicate on the "request_fingerprint" field. +func RequestFingerprintNotIn(vs ...string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNotIn(FieldRequestFingerprint, vs...)) +} + +// RequestFingerprintGT applies the GT predicate on the "request_fingerprint" field. +func RequestFingerprintGT(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGT(FieldRequestFingerprint, v)) +} + +// RequestFingerprintGTE applies the GTE predicate on the "request_fingerprint" field. +func RequestFingerprintGTE(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGTE(FieldRequestFingerprint, v)) +} + +// RequestFingerprintLT applies the LT predicate on the "request_fingerprint" field. +func RequestFingerprintLT(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLT(FieldRequestFingerprint, v)) +} + +// RequestFingerprintLTE applies the LTE predicate on the "request_fingerprint" field. +func RequestFingerprintLTE(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLTE(FieldRequestFingerprint, v)) +} + +// RequestFingerprintContains applies the Contains predicate on the "request_fingerprint" field. +func RequestFingerprintContains(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldContains(FieldRequestFingerprint, v)) +} + +// RequestFingerprintHasPrefix applies the HasPrefix predicate on the "request_fingerprint" field. +func RequestFingerprintHasPrefix(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldHasPrefix(FieldRequestFingerprint, v)) +} + +// RequestFingerprintHasSuffix applies the HasSuffix predicate on the "request_fingerprint" field. +func RequestFingerprintHasSuffix(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldHasSuffix(FieldRequestFingerprint, v)) +} + +// RequestFingerprintEqualFold applies the EqualFold predicate on the "request_fingerprint" field. +func RequestFingerprintEqualFold(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEqualFold(FieldRequestFingerprint, v)) +} + +// RequestFingerprintContainsFold applies the ContainsFold predicate on the "request_fingerprint" field. +func RequestFingerprintContainsFold(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldContainsFold(FieldRequestFingerprint, v)) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldStatus, v)) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNEQ(FieldStatus, v)) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldIn(FieldStatus, vs...)) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNotIn(FieldStatus, vs...)) +} + +// StatusGT applies the GT predicate on the "status" field. +func StatusGT(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGT(FieldStatus, v)) +} + +// StatusGTE applies the GTE predicate on the "status" field. +func StatusGTE(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGTE(FieldStatus, v)) +} + +// StatusLT applies the LT predicate on the "status" field. +func StatusLT(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLT(FieldStatus, v)) +} + +// StatusLTE applies the LTE predicate on the "status" field. +func StatusLTE(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLTE(FieldStatus, v)) +} + +// StatusContains applies the Contains predicate on the "status" field. +func StatusContains(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldContains(FieldStatus, v)) +} + +// StatusHasPrefix applies the HasPrefix predicate on the "status" field. +func StatusHasPrefix(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldHasPrefix(FieldStatus, v)) +} + +// StatusHasSuffix applies the HasSuffix predicate on the "status" field. +func StatusHasSuffix(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldHasSuffix(FieldStatus, v)) +} + +// StatusEqualFold applies the EqualFold predicate on the "status" field. +func StatusEqualFold(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEqualFold(FieldStatus, v)) +} + +// StatusContainsFold applies the ContainsFold predicate on the "status" field. +func StatusContainsFold(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldContainsFold(FieldStatus, v)) +} + +// ResponseStatusEQ applies the EQ predicate on the "response_status" field. +func ResponseStatusEQ(v int) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldResponseStatus, v)) +} + +// ResponseStatusNEQ applies the NEQ predicate on the "response_status" field. +func ResponseStatusNEQ(v int) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNEQ(FieldResponseStatus, v)) +} + +// ResponseStatusIn applies the In predicate on the "response_status" field. +func ResponseStatusIn(vs ...int) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldIn(FieldResponseStatus, vs...)) +} + +// ResponseStatusNotIn applies the NotIn predicate on the "response_status" field. +func ResponseStatusNotIn(vs ...int) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNotIn(FieldResponseStatus, vs...)) +} + +// ResponseStatusGT applies the GT predicate on the "response_status" field. +func ResponseStatusGT(v int) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGT(FieldResponseStatus, v)) +} + +// ResponseStatusGTE applies the GTE predicate on the "response_status" field. +func ResponseStatusGTE(v int) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGTE(FieldResponseStatus, v)) +} + +// ResponseStatusLT applies the LT predicate on the "response_status" field. +func ResponseStatusLT(v int) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLT(FieldResponseStatus, v)) +} + +// ResponseStatusLTE applies the LTE predicate on the "response_status" field. +func ResponseStatusLTE(v int) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLTE(FieldResponseStatus, v)) +} + +// ResponseStatusIsNil applies the IsNil predicate on the "response_status" field. +func ResponseStatusIsNil() predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldIsNull(FieldResponseStatus)) +} + +// ResponseStatusNotNil applies the NotNil predicate on the "response_status" field. +func ResponseStatusNotNil() predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNotNull(FieldResponseStatus)) +} + +// ResponseBodyEQ applies the EQ predicate on the "response_body" field. +func ResponseBodyEQ(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldResponseBody, v)) +} + +// ResponseBodyNEQ applies the NEQ predicate on the "response_body" field. +func ResponseBodyNEQ(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNEQ(FieldResponseBody, v)) +} + +// ResponseBodyIn applies the In predicate on the "response_body" field. +func ResponseBodyIn(vs ...string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldIn(FieldResponseBody, vs...)) +} + +// ResponseBodyNotIn applies the NotIn predicate on the "response_body" field. +func ResponseBodyNotIn(vs ...string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNotIn(FieldResponseBody, vs...)) +} + +// ResponseBodyGT applies the GT predicate on the "response_body" field. +func ResponseBodyGT(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGT(FieldResponseBody, v)) +} + +// ResponseBodyGTE applies the GTE predicate on the "response_body" field. +func ResponseBodyGTE(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGTE(FieldResponseBody, v)) +} + +// ResponseBodyLT applies the LT predicate on the "response_body" field. +func ResponseBodyLT(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLT(FieldResponseBody, v)) +} + +// ResponseBodyLTE applies the LTE predicate on the "response_body" field. +func ResponseBodyLTE(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLTE(FieldResponseBody, v)) +} + +// ResponseBodyContains applies the Contains predicate on the "response_body" field. +func ResponseBodyContains(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldContains(FieldResponseBody, v)) +} + +// ResponseBodyHasPrefix applies the HasPrefix predicate on the "response_body" field. +func ResponseBodyHasPrefix(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldHasPrefix(FieldResponseBody, v)) +} + +// ResponseBodyHasSuffix applies the HasSuffix predicate on the "response_body" field. +func ResponseBodyHasSuffix(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldHasSuffix(FieldResponseBody, v)) +} + +// ResponseBodyIsNil applies the IsNil predicate on the "response_body" field. +func ResponseBodyIsNil() predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldIsNull(FieldResponseBody)) +} + +// ResponseBodyNotNil applies the NotNil predicate on the "response_body" field. +func ResponseBodyNotNil() predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNotNull(FieldResponseBody)) +} + +// ResponseBodyEqualFold applies the EqualFold predicate on the "response_body" field. +func ResponseBodyEqualFold(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEqualFold(FieldResponseBody, v)) +} + +// ResponseBodyContainsFold applies the ContainsFold predicate on the "response_body" field. +func ResponseBodyContainsFold(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldContainsFold(FieldResponseBody, v)) +} + +// ErrorReasonEQ applies the EQ predicate on the "error_reason" field. +func ErrorReasonEQ(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldErrorReason, v)) +} + +// ErrorReasonNEQ applies the NEQ predicate on the "error_reason" field. +func ErrorReasonNEQ(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNEQ(FieldErrorReason, v)) +} + +// ErrorReasonIn applies the In predicate on the "error_reason" field. +func ErrorReasonIn(vs ...string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldIn(FieldErrorReason, vs...)) +} + +// ErrorReasonNotIn applies the NotIn predicate on the "error_reason" field. +func ErrorReasonNotIn(vs ...string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNotIn(FieldErrorReason, vs...)) +} + +// ErrorReasonGT applies the GT predicate on the "error_reason" field. +func ErrorReasonGT(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGT(FieldErrorReason, v)) +} + +// ErrorReasonGTE applies the GTE predicate on the "error_reason" field. +func ErrorReasonGTE(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGTE(FieldErrorReason, v)) +} + +// ErrorReasonLT applies the LT predicate on the "error_reason" field. +func ErrorReasonLT(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLT(FieldErrorReason, v)) +} + +// ErrorReasonLTE applies the LTE predicate on the "error_reason" field. +func ErrorReasonLTE(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLTE(FieldErrorReason, v)) +} + +// ErrorReasonContains applies the Contains predicate on the "error_reason" field. +func ErrorReasonContains(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldContains(FieldErrorReason, v)) +} + +// ErrorReasonHasPrefix applies the HasPrefix predicate on the "error_reason" field. +func ErrorReasonHasPrefix(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldHasPrefix(FieldErrorReason, v)) +} + +// ErrorReasonHasSuffix applies the HasSuffix predicate on the "error_reason" field. +func ErrorReasonHasSuffix(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldHasSuffix(FieldErrorReason, v)) +} + +// ErrorReasonIsNil applies the IsNil predicate on the "error_reason" field. +func ErrorReasonIsNil() predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldIsNull(FieldErrorReason)) +} + +// ErrorReasonNotNil applies the NotNil predicate on the "error_reason" field. +func ErrorReasonNotNil() predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNotNull(FieldErrorReason)) +} + +// ErrorReasonEqualFold applies the EqualFold predicate on the "error_reason" field. +func ErrorReasonEqualFold(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEqualFold(FieldErrorReason, v)) +} + +// ErrorReasonContainsFold applies the ContainsFold predicate on the "error_reason" field. +func ErrorReasonContainsFold(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldContainsFold(FieldErrorReason, v)) +} + +// LockedUntilEQ applies the EQ predicate on the "locked_until" field. +func LockedUntilEQ(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldLockedUntil, v)) +} + +// LockedUntilNEQ applies the NEQ predicate on the "locked_until" field. +func LockedUntilNEQ(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNEQ(FieldLockedUntil, v)) +} + +// LockedUntilIn applies the In predicate on the "locked_until" field. +func LockedUntilIn(vs ...time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldIn(FieldLockedUntil, vs...)) +} + +// LockedUntilNotIn applies the NotIn predicate on the "locked_until" field. +func LockedUntilNotIn(vs ...time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNotIn(FieldLockedUntil, vs...)) +} + +// LockedUntilGT applies the GT predicate on the "locked_until" field. +func LockedUntilGT(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGT(FieldLockedUntil, v)) +} + +// LockedUntilGTE applies the GTE predicate on the "locked_until" field. +func LockedUntilGTE(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGTE(FieldLockedUntil, v)) +} + +// LockedUntilLT applies the LT predicate on the "locked_until" field. +func LockedUntilLT(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLT(FieldLockedUntil, v)) +} + +// LockedUntilLTE applies the LTE predicate on the "locked_until" field. +func LockedUntilLTE(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLTE(FieldLockedUntil, v)) +} + +// LockedUntilIsNil applies the IsNil predicate on the "locked_until" field. +func LockedUntilIsNil() predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldIsNull(FieldLockedUntil)) +} + +// LockedUntilNotNil applies the NotNil predicate on the "locked_until" field. +func LockedUntilNotNil() predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNotNull(FieldLockedUntil)) +} + +// ExpiresAtEQ applies the EQ predicate on the "expires_at" field. +func ExpiresAtEQ(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldExpiresAt, v)) +} + +// ExpiresAtNEQ applies the NEQ predicate on the "expires_at" field. +func ExpiresAtNEQ(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNEQ(FieldExpiresAt, v)) +} + +// ExpiresAtIn applies the In predicate on the "expires_at" field. +func ExpiresAtIn(vs ...time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldIn(FieldExpiresAt, vs...)) +} + +// ExpiresAtNotIn applies the NotIn predicate on the "expires_at" field. +func ExpiresAtNotIn(vs ...time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNotIn(FieldExpiresAt, vs...)) +} + +// ExpiresAtGT applies the GT predicate on the "expires_at" field. +func ExpiresAtGT(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGT(FieldExpiresAt, v)) +} + +// ExpiresAtGTE applies the GTE predicate on the "expires_at" field. +func ExpiresAtGTE(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGTE(FieldExpiresAt, v)) +} + +// ExpiresAtLT applies the LT predicate on the "expires_at" field. +func ExpiresAtLT(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLT(FieldExpiresAt, v)) +} + +// ExpiresAtLTE applies the LTE predicate on the "expires_at" field. +func ExpiresAtLTE(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLTE(FieldExpiresAt, v)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.IdempotencyRecord) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.IdempotencyRecord) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.IdempotencyRecord) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.NotPredicates(p)) +} diff --git a/backend/ent/idempotencyrecord_create.go b/backend/ent/idempotencyrecord_create.go new file mode 100644 index 00000000..bf4deaf2 --- /dev/null +++ b/backend/ent/idempotencyrecord_create.go @@ -0,0 +1,1132 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/idempotencyrecord" +) + +// IdempotencyRecordCreate is the builder for creating a IdempotencyRecord entity. +type IdempotencyRecordCreate struct { + config + mutation *IdempotencyRecordMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCreatedAt sets the "created_at" field. +func (_c *IdempotencyRecordCreate) SetCreatedAt(v time.Time) *IdempotencyRecordCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *IdempotencyRecordCreate) SetNillableCreatedAt(v *time.Time) *IdempotencyRecordCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetUpdatedAt sets the "updated_at" field. +func (_c *IdempotencyRecordCreate) SetUpdatedAt(v time.Time) *IdempotencyRecordCreate { + _c.mutation.SetUpdatedAt(v) + return _c +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (_c *IdempotencyRecordCreate) SetNillableUpdatedAt(v *time.Time) *IdempotencyRecordCreate { + if v != nil { + _c.SetUpdatedAt(*v) + } + return _c +} + +// SetScope sets the "scope" field. +func (_c *IdempotencyRecordCreate) SetScope(v string) *IdempotencyRecordCreate { + _c.mutation.SetScope(v) + return _c +} + +// SetIdempotencyKeyHash sets the "idempotency_key_hash" field. +func (_c *IdempotencyRecordCreate) SetIdempotencyKeyHash(v string) *IdempotencyRecordCreate { + _c.mutation.SetIdempotencyKeyHash(v) + return _c +} + +// SetRequestFingerprint sets the "request_fingerprint" field. +func (_c *IdempotencyRecordCreate) SetRequestFingerprint(v string) *IdempotencyRecordCreate { + _c.mutation.SetRequestFingerprint(v) + return _c +} + +// SetStatus sets the "status" field. +func (_c *IdempotencyRecordCreate) SetStatus(v string) *IdempotencyRecordCreate { + _c.mutation.SetStatus(v) + return _c +} + +// SetResponseStatus sets the "response_status" field. +func (_c *IdempotencyRecordCreate) SetResponseStatus(v int) *IdempotencyRecordCreate { + _c.mutation.SetResponseStatus(v) + return _c +} + +// SetNillableResponseStatus sets the "response_status" field if the given value is not nil. +func (_c *IdempotencyRecordCreate) SetNillableResponseStatus(v *int) *IdempotencyRecordCreate { + if v != nil { + _c.SetResponseStatus(*v) + } + return _c +} + +// SetResponseBody sets the "response_body" field. +func (_c *IdempotencyRecordCreate) SetResponseBody(v string) *IdempotencyRecordCreate { + _c.mutation.SetResponseBody(v) + return _c +} + +// SetNillableResponseBody sets the "response_body" field if the given value is not nil. +func (_c *IdempotencyRecordCreate) SetNillableResponseBody(v *string) *IdempotencyRecordCreate { + if v != nil { + _c.SetResponseBody(*v) + } + return _c +} + +// SetErrorReason sets the "error_reason" field. +func (_c *IdempotencyRecordCreate) SetErrorReason(v string) *IdempotencyRecordCreate { + _c.mutation.SetErrorReason(v) + return _c +} + +// SetNillableErrorReason sets the "error_reason" field if the given value is not nil. +func (_c *IdempotencyRecordCreate) SetNillableErrorReason(v *string) *IdempotencyRecordCreate { + if v != nil { + _c.SetErrorReason(*v) + } + return _c +} + +// SetLockedUntil sets the "locked_until" field. +func (_c *IdempotencyRecordCreate) SetLockedUntil(v time.Time) *IdempotencyRecordCreate { + _c.mutation.SetLockedUntil(v) + return _c +} + +// SetNillableLockedUntil sets the "locked_until" field if the given value is not nil. +func (_c *IdempotencyRecordCreate) SetNillableLockedUntil(v *time.Time) *IdempotencyRecordCreate { + if v != nil { + _c.SetLockedUntil(*v) + } + return _c +} + +// SetExpiresAt sets the "expires_at" field. +func (_c *IdempotencyRecordCreate) SetExpiresAt(v time.Time) *IdempotencyRecordCreate { + _c.mutation.SetExpiresAt(v) + return _c +} + +// Mutation returns the IdempotencyRecordMutation object of the builder. +func (_c *IdempotencyRecordCreate) Mutation() *IdempotencyRecordMutation { + return _c.mutation +} + +// Save creates the IdempotencyRecord in the database. +func (_c *IdempotencyRecordCreate) Save(ctx context.Context) (*IdempotencyRecord, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *IdempotencyRecordCreate) SaveX(ctx context.Context) *IdempotencyRecord { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *IdempotencyRecordCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *IdempotencyRecordCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *IdempotencyRecordCreate) defaults() { + if _, ok := _c.mutation.CreatedAt(); !ok { + v := idempotencyrecord.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + v := idempotencyrecord.DefaultUpdatedAt() + _c.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *IdempotencyRecordCreate) check() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "IdempotencyRecord.created_at"`)} + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "IdempotencyRecord.updated_at"`)} + } + if _, ok := _c.mutation.Scope(); !ok { + return &ValidationError{Name: "scope", err: errors.New(`ent: missing required field "IdempotencyRecord.scope"`)} + } + if v, ok := _c.mutation.Scope(); ok { + if err := idempotencyrecord.ScopeValidator(v); err != nil { + return &ValidationError{Name: "scope", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.scope": %w`, err)} + } + } + if _, ok := _c.mutation.IdempotencyKeyHash(); !ok { + return &ValidationError{Name: "idempotency_key_hash", err: errors.New(`ent: missing required field "IdempotencyRecord.idempotency_key_hash"`)} + } + if v, ok := _c.mutation.IdempotencyKeyHash(); ok { + if err := idempotencyrecord.IdempotencyKeyHashValidator(v); err != nil { + return &ValidationError{Name: "idempotency_key_hash", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.idempotency_key_hash": %w`, err)} + } + } + if _, ok := _c.mutation.RequestFingerprint(); !ok { + return &ValidationError{Name: "request_fingerprint", err: errors.New(`ent: missing required field "IdempotencyRecord.request_fingerprint"`)} + } + if v, ok := _c.mutation.RequestFingerprint(); ok { + if err := idempotencyrecord.RequestFingerprintValidator(v); err != nil { + return &ValidationError{Name: "request_fingerprint", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.request_fingerprint": %w`, err)} + } + } + if _, ok := _c.mutation.Status(); !ok { + return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "IdempotencyRecord.status"`)} + } + if v, ok := _c.mutation.Status(); ok { + if err := idempotencyrecord.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.status": %w`, err)} + } + } + if v, ok := _c.mutation.ErrorReason(); ok { + if err := idempotencyrecord.ErrorReasonValidator(v); err != nil { + return &ValidationError{Name: "error_reason", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.error_reason": %w`, err)} + } + } + if _, ok := _c.mutation.ExpiresAt(); !ok { + return &ValidationError{Name: "expires_at", err: errors.New(`ent: missing required field "IdempotencyRecord.expires_at"`)} + } + return nil +} + +func (_c *IdempotencyRecordCreate) sqlSave(ctx context.Context) (*IdempotencyRecord, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *IdempotencyRecordCreate) createSpec() (*IdempotencyRecord, *sqlgraph.CreateSpec) { + var ( + _node = &IdempotencyRecord{config: _c.config} + _spec = sqlgraph.NewCreateSpec(idempotencyrecord.Table, sqlgraph.NewFieldSpec(idempotencyrecord.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(idempotencyrecord.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.UpdatedAt(); ok { + _spec.SetField(idempotencyrecord.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if value, ok := _c.mutation.Scope(); ok { + _spec.SetField(idempotencyrecord.FieldScope, field.TypeString, value) + _node.Scope = value + } + if value, ok := _c.mutation.IdempotencyKeyHash(); ok { + _spec.SetField(idempotencyrecord.FieldIdempotencyKeyHash, field.TypeString, value) + _node.IdempotencyKeyHash = value + } + if value, ok := _c.mutation.RequestFingerprint(); ok { + _spec.SetField(idempotencyrecord.FieldRequestFingerprint, field.TypeString, value) + _node.RequestFingerprint = value + } + if value, ok := _c.mutation.Status(); ok { + _spec.SetField(idempotencyrecord.FieldStatus, field.TypeString, value) + _node.Status = value + } + if value, ok := _c.mutation.ResponseStatus(); ok { + _spec.SetField(idempotencyrecord.FieldResponseStatus, field.TypeInt, value) + _node.ResponseStatus = &value + } + if value, ok := _c.mutation.ResponseBody(); ok { + _spec.SetField(idempotencyrecord.FieldResponseBody, field.TypeString, value) + _node.ResponseBody = &value + } + if value, ok := _c.mutation.ErrorReason(); ok { + _spec.SetField(idempotencyrecord.FieldErrorReason, field.TypeString, value) + _node.ErrorReason = &value + } + if value, ok := _c.mutation.LockedUntil(); ok { + _spec.SetField(idempotencyrecord.FieldLockedUntil, field.TypeTime, value) + _node.LockedUntil = &value + } + if value, ok := _c.mutation.ExpiresAt(); ok { + _spec.SetField(idempotencyrecord.FieldExpiresAt, field.TypeTime, value) + _node.ExpiresAt = value + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.IdempotencyRecord.Create(). +// SetCreatedAt(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.IdempotencyRecordUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *IdempotencyRecordCreate) OnConflict(opts ...sql.ConflictOption) *IdempotencyRecordUpsertOne { + _c.conflict = opts + return &IdempotencyRecordUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.IdempotencyRecord.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *IdempotencyRecordCreate) OnConflictColumns(columns ...string) *IdempotencyRecordUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &IdempotencyRecordUpsertOne{ + create: _c, + } +} + +type ( + // IdempotencyRecordUpsertOne is the builder for "upsert"-ing + // one IdempotencyRecord node. + IdempotencyRecordUpsertOne struct { + create *IdempotencyRecordCreate + } + + // IdempotencyRecordUpsert is the "OnConflict" setter. + IdempotencyRecordUpsert struct { + *sql.UpdateSet + } +) + +// SetUpdatedAt sets the "updated_at" field. +func (u *IdempotencyRecordUpsert) SetUpdatedAt(v time.Time) *IdempotencyRecordUpsert { + u.Set(idempotencyrecord.FieldUpdatedAt, v) + return u +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *IdempotencyRecordUpsert) UpdateUpdatedAt() *IdempotencyRecordUpsert { + u.SetExcluded(idempotencyrecord.FieldUpdatedAt) + return u +} + +// SetScope sets the "scope" field. +func (u *IdempotencyRecordUpsert) SetScope(v string) *IdempotencyRecordUpsert { + u.Set(idempotencyrecord.FieldScope, v) + return u +} + +// UpdateScope sets the "scope" field to the value that was provided on create. +func (u *IdempotencyRecordUpsert) UpdateScope() *IdempotencyRecordUpsert { + u.SetExcluded(idempotencyrecord.FieldScope) + return u +} + +// SetIdempotencyKeyHash sets the "idempotency_key_hash" field. +func (u *IdempotencyRecordUpsert) SetIdempotencyKeyHash(v string) *IdempotencyRecordUpsert { + u.Set(idempotencyrecord.FieldIdempotencyKeyHash, v) + return u +} + +// UpdateIdempotencyKeyHash sets the "idempotency_key_hash" field to the value that was provided on create. +func (u *IdempotencyRecordUpsert) UpdateIdempotencyKeyHash() *IdempotencyRecordUpsert { + u.SetExcluded(idempotencyrecord.FieldIdempotencyKeyHash) + return u +} + +// SetRequestFingerprint sets the "request_fingerprint" field. +func (u *IdempotencyRecordUpsert) SetRequestFingerprint(v string) *IdempotencyRecordUpsert { + u.Set(idempotencyrecord.FieldRequestFingerprint, v) + return u +} + +// UpdateRequestFingerprint sets the "request_fingerprint" field to the value that was provided on create. +func (u *IdempotencyRecordUpsert) UpdateRequestFingerprint() *IdempotencyRecordUpsert { + u.SetExcluded(idempotencyrecord.FieldRequestFingerprint) + return u +} + +// SetStatus sets the "status" field. +func (u *IdempotencyRecordUpsert) SetStatus(v string) *IdempotencyRecordUpsert { + u.Set(idempotencyrecord.FieldStatus, v) + return u +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *IdempotencyRecordUpsert) UpdateStatus() *IdempotencyRecordUpsert { + u.SetExcluded(idempotencyrecord.FieldStatus) + return u +} + +// SetResponseStatus sets the "response_status" field. +func (u *IdempotencyRecordUpsert) SetResponseStatus(v int) *IdempotencyRecordUpsert { + u.Set(idempotencyrecord.FieldResponseStatus, v) + return u +} + +// UpdateResponseStatus sets the "response_status" field to the value that was provided on create. +func (u *IdempotencyRecordUpsert) UpdateResponseStatus() *IdempotencyRecordUpsert { + u.SetExcluded(idempotencyrecord.FieldResponseStatus) + return u +} + +// AddResponseStatus adds v to the "response_status" field. +func (u *IdempotencyRecordUpsert) AddResponseStatus(v int) *IdempotencyRecordUpsert { + u.Add(idempotencyrecord.FieldResponseStatus, v) + return u +} + +// ClearResponseStatus clears the value of the "response_status" field. +func (u *IdempotencyRecordUpsert) ClearResponseStatus() *IdempotencyRecordUpsert { + u.SetNull(idempotencyrecord.FieldResponseStatus) + return u +} + +// SetResponseBody sets the "response_body" field. +func (u *IdempotencyRecordUpsert) SetResponseBody(v string) *IdempotencyRecordUpsert { + u.Set(idempotencyrecord.FieldResponseBody, v) + return u +} + +// UpdateResponseBody sets the "response_body" field to the value that was provided on create. +func (u *IdempotencyRecordUpsert) UpdateResponseBody() *IdempotencyRecordUpsert { + u.SetExcluded(idempotencyrecord.FieldResponseBody) + return u +} + +// ClearResponseBody clears the value of the "response_body" field. +func (u *IdempotencyRecordUpsert) ClearResponseBody() *IdempotencyRecordUpsert { + u.SetNull(idempotencyrecord.FieldResponseBody) + return u +} + +// SetErrorReason sets the "error_reason" field. +func (u *IdempotencyRecordUpsert) SetErrorReason(v string) *IdempotencyRecordUpsert { + u.Set(idempotencyrecord.FieldErrorReason, v) + return u +} + +// UpdateErrorReason sets the "error_reason" field to the value that was provided on create. +func (u *IdempotencyRecordUpsert) UpdateErrorReason() *IdempotencyRecordUpsert { + u.SetExcluded(idempotencyrecord.FieldErrorReason) + return u +} + +// ClearErrorReason clears the value of the "error_reason" field. +func (u *IdempotencyRecordUpsert) ClearErrorReason() *IdempotencyRecordUpsert { + u.SetNull(idempotencyrecord.FieldErrorReason) + return u +} + +// SetLockedUntil sets the "locked_until" field. +func (u *IdempotencyRecordUpsert) SetLockedUntil(v time.Time) *IdempotencyRecordUpsert { + u.Set(idempotencyrecord.FieldLockedUntil, v) + return u +} + +// UpdateLockedUntil sets the "locked_until" field to the value that was provided on create. +func (u *IdempotencyRecordUpsert) UpdateLockedUntil() *IdempotencyRecordUpsert { + u.SetExcluded(idempotencyrecord.FieldLockedUntil) + return u +} + +// ClearLockedUntil clears the value of the "locked_until" field. +func (u *IdempotencyRecordUpsert) ClearLockedUntil() *IdempotencyRecordUpsert { + u.SetNull(idempotencyrecord.FieldLockedUntil) + return u +} + +// SetExpiresAt sets the "expires_at" field. +func (u *IdempotencyRecordUpsert) SetExpiresAt(v time.Time) *IdempotencyRecordUpsert { + u.Set(idempotencyrecord.FieldExpiresAt, v) + return u +} + +// UpdateExpiresAt sets the "expires_at" field to the value that was provided on create. +func (u *IdempotencyRecordUpsert) UpdateExpiresAt() *IdempotencyRecordUpsert { + u.SetExcluded(idempotencyrecord.FieldExpiresAt) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.IdempotencyRecord.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *IdempotencyRecordUpsertOne) UpdateNewValues() *IdempotencyRecordUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(idempotencyrecord.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.IdempotencyRecord.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *IdempotencyRecordUpsertOne) Ignore() *IdempotencyRecordUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *IdempotencyRecordUpsertOne) DoNothing() *IdempotencyRecordUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the IdempotencyRecordCreate.OnConflict +// documentation for more info. +func (u *IdempotencyRecordUpsertOne) Update(set func(*IdempotencyRecordUpsert)) *IdempotencyRecordUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&IdempotencyRecordUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *IdempotencyRecordUpsertOne) SetUpdatedAt(v time.Time) *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertOne) UpdateUpdatedAt() *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetScope sets the "scope" field. +func (u *IdempotencyRecordUpsertOne) SetScope(v string) *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetScope(v) + }) +} + +// UpdateScope sets the "scope" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertOne) UpdateScope() *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateScope() + }) +} + +// SetIdempotencyKeyHash sets the "idempotency_key_hash" field. +func (u *IdempotencyRecordUpsertOne) SetIdempotencyKeyHash(v string) *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetIdempotencyKeyHash(v) + }) +} + +// UpdateIdempotencyKeyHash sets the "idempotency_key_hash" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertOne) UpdateIdempotencyKeyHash() *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateIdempotencyKeyHash() + }) +} + +// SetRequestFingerprint sets the "request_fingerprint" field. +func (u *IdempotencyRecordUpsertOne) SetRequestFingerprint(v string) *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetRequestFingerprint(v) + }) +} + +// UpdateRequestFingerprint sets the "request_fingerprint" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertOne) UpdateRequestFingerprint() *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateRequestFingerprint() + }) +} + +// SetStatus sets the "status" field. +func (u *IdempotencyRecordUpsertOne) SetStatus(v string) *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertOne) UpdateStatus() *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateStatus() + }) +} + +// SetResponseStatus sets the "response_status" field. +func (u *IdempotencyRecordUpsertOne) SetResponseStatus(v int) *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetResponseStatus(v) + }) +} + +// AddResponseStatus adds v to the "response_status" field. +func (u *IdempotencyRecordUpsertOne) AddResponseStatus(v int) *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.AddResponseStatus(v) + }) +} + +// UpdateResponseStatus sets the "response_status" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertOne) UpdateResponseStatus() *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateResponseStatus() + }) +} + +// ClearResponseStatus clears the value of the "response_status" field. +func (u *IdempotencyRecordUpsertOne) ClearResponseStatus() *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.ClearResponseStatus() + }) +} + +// SetResponseBody sets the "response_body" field. +func (u *IdempotencyRecordUpsertOne) SetResponseBody(v string) *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetResponseBody(v) + }) +} + +// UpdateResponseBody sets the "response_body" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertOne) UpdateResponseBody() *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateResponseBody() + }) +} + +// ClearResponseBody clears the value of the "response_body" field. +func (u *IdempotencyRecordUpsertOne) ClearResponseBody() *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.ClearResponseBody() + }) +} + +// SetErrorReason sets the "error_reason" field. +func (u *IdempotencyRecordUpsertOne) SetErrorReason(v string) *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetErrorReason(v) + }) +} + +// UpdateErrorReason sets the "error_reason" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertOne) UpdateErrorReason() *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateErrorReason() + }) +} + +// ClearErrorReason clears the value of the "error_reason" field. +func (u *IdempotencyRecordUpsertOne) ClearErrorReason() *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.ClearErrorReason() + }) +} + +// SetLockedUntil sets the "locked_until" field. +func (u *IdempotencyRecordUpsertOne) SetLockedUntil(v time.Time) *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetLockedUntil(v) + }) +} + +// UpdateLockedUntil sets the "locked_until" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertOne) UpdateLockedUntil() *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateLockedUntil() + }) +} + +// ClearLockedUntil clears the value of the "locked_until" field. +func (u *IdempotencyRecordUpsertOne) ClearLockedUntil() *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.ClearLockedUntil() + }) +} + +// SetExpiresAt sets the "expires_at" field. +func (u *IdempotencyRecordUpsertOne) SetExpiresAt(v time.Time) *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetExpiresAt(v) + }) +} + +// UpdateExpiresAt sets the "expires_at" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertOne) UpdateExpiresAt() *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateExpiresAt() + }) +} + +// Exec executes the query. +func (u *IdempotencyRecordUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for IdempotencyRecordCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *IdempotencyRecordUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *IdempotencyRecordUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *IdempotencyRecordUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// IdempotencyRecordCreateBulk is the builder for creating many IdempotencyRecord entities in bulk. +type IdempotencyRecordCreateBulk struct { + config + err error + builders []*IdempotencyRecordCreate + conflict []sql.ConflictOption +} + +// Save creates the IdempotencyRecord entities in the database. +func (_c *IdempotencyRecordCreateBulk) Save(ctx context.Context) ([]*IdempotencyRecord, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*IdempotencyRecord, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*IdempotencyRecordMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *IdempotencyRecordCreateBulk) SaveX(ctx context.Context) []*IdempotencyRecord { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *IdempotencyRecordCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *IdempotencyRecordCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.IdempotencyRecord.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.IdempotencyRecordUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *IdempotencyRecordCreateBulk) OnConflict(opts ...sql.ConflictOption) *IdempotencyRecordUpsertBulk { + _c.conflict = opts + return &IdempotencyRecordUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.IdempotencyRecord.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *IdempotencyRecordCreateBulk) OnConflictColumns(columns ...string) *IdempotencyRecordUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &IdempotencyRecordUpsertBulk{ + create: _c, + } +} + +// IdempotencyRecordUpsertBulk is the builder for "upsert"-ing +// a bulk of IdempotencyRecord nodes. +type IdempotencyRecordUpsertBulk struct { + create *IdempotencyRecordCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.IdempotencyRecord.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *IdempotencyRecordUpsertBulk) UpdateNewValues() *IdempotencyRecordUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(idempotencyrecord.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.IdempotencyRecord.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *IdempotencyRecordUpsertBulk) Ignore() *IdempotencyRecordUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *IdempotencyRecordUpsertBulk) DoNothing() *IdempotencyRecordUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the IdempotencyRecordCreateBulk.OnConflict +// documentation for more info. +func (u *IdempotencyRecordUpsertBulk) Update(set func(*IdempotencyRecordUpsert)) *IdempotencyRecordUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&IdempotencyRecordUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *IdempotencyRecordUpsertBulk) SetUpdatedAt(v time.Time) *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertBulk) UpdateUpdatedAt() *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetScope sets the "scope" field. +func (u *IdempotencyRecordUpsertBulk) SetScope(v string) *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetScope(v) + }) +} + +// UpdateScope sets the "scope" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertBulk) UpdateScope() *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateScope() + }) +} + +// SetIdempotencyKeyHash sets the "idempotency_key_hash" field. +func (u *IdempotencyRecordUpsertBulk) SetIdempotencyKeyHash(v string) *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetIdempotencyKeyHash(v) + }) +} + +// UpdateIdempotencyKeyHash sets the "idempotency_key_hash" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertBulk) UpdateIdempotencyKeyHash() *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateIdempotencyKeyHash() + }) +} + +// SetRequestFingerprint sets the "request_fingerprint" field. +func (u *IdempotencyRecordUpsertBulk) SetRequestFingerprint(v string) *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetRequestFingerprint(v) + }) +} + +// UpdateRequestFingerprint sets the "request_fingerprint" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertBulk) UpdateRequestFingerprint() *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateRequestFingerprint() + }) +} + +// SetStatus sets the "status" field. +func (u *IdempotencyRecordUpsertBulk) SetStatus(v string) *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertBulk) UpdateStatus() *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateStatus() + }) +} + +// SetResponseStatus sets the "response_status" field. +func (u *IdempotencyRecordUpsertBulk) SetResponseStatus(v int) *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetResponseStatus(v) + }) +} + +// AddResponseStatus adds v to the "response_status" field. +func (u *IdempotencyRecordUpsertBulk) AddResponseStatus(v int) *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.AddResponseStatus(v) + }) +} + +// UpdateResponseStatus sets the "response_status" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertBulk) UpdateResponseStatus() *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateResponseStatus() + }) +} + +// ClearResponseStatus clears the value of the "response_status" field. +func (u *IdempotencyRecordUpsertBulk) ClearResponseStatus() *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.ClearResponseStatus() + }) +} + +// SetResponseBody sets the "response_body" field. +func (u *IdempotencyRecordUpsertBulk) SetResponseBody(v string) *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetResponseBody(v) + }) +} + +// UpdateResponseBody sets the "response_body" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertBulk) UpdateResponseBody() *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateResponseBody() + }) +} + +// ClearResponseBody clears the value of the "response_body" field. +func (u *IdempotencyRecordUpsertBulk) ClearResponseBody() *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.ClearResponseBody() + }) +} + +// SetErrorReason sets the "error_reason" field. +func (u *IdempotencyRecordUpsertBulk) SetErrorReason(v string) *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetErrorReason(v) + }) +} + +// UpdateErrorReason sets the "error_reason" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertBulk) UpdateErrorReason() *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateErrorReason() + }) +} + +// ClearErrorReason clears the value of the "error_reason" field. +func (u *IdempotencyRecordUpsertBulk) ClearErrorReason() *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.ClearErrorReason() + }) +} + +// SetLockedUntil sets the "locked_until" field. +func (u *IdempotencyRecordUpsertBulk) SetLockedUntil(v time.Time) *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetLockedUntil(v) + }) +} + +// UpdateLockedUntil sets the "locked_until" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertBulk) UpdateLockedUntil() *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateLockedUntil() + }) +} + +// ClearLockedUntil clears the value of the "locked_until" field. +func (u *IdempotencyRecordUpsertBulk) ClearLockedUntil() *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.ClearLockedUntil() + }) +} + +// SetExpiresAt sets the "expires_at" field. +func (u *IdempotencyRecordUpsertBulk) SetExpiresAt(v time.Time) *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetExpiresAt(v) + }) +} + +// UpdateExpiresAt sets the "expires_at" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertBulk) UpdateExpiresAt() *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateExpiresAt() + }) +} + +// Exec executes the query. +func (u *IdempotencyRecordUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the IdempotencyRecordCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for IdempotencyRecordCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *IdempotencyRecordUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/idempotencyrecord_delete.go b/backend/ent/idempotencyrecord_delete.go new file mode 100644 index 00000000..f5c87559 --- /dev/null +++ b/backend/ent/idempotencyrecord_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/idempotencyrecord" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// IdempotencyRecordDelete is the builder for deleting a IdempotencyRecord entity. +type IdempotencyRecordDelete struct { + config + hooks []Hook + mutation *IdempotencyRecordMutation +} + +// Where appends a list predicates to the IdempotencyRecordDelete builder. +func (_d *IdempotencyRecordDelete) Where(ps ...predicate.IdempotencyRecord) *IdempotencyRecordDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *IdempotencyRecordDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *IdempotencyRecordDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *IdempotencyRecordDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(idempotencyrecord.Table, sqlgraph.NewFieldSpec(idempotencyrecord.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// IdempotencyRecordDeleteOne is the builder for deleting a single IdempotencyRecord entity. +type IdempotencyRecordDeleteOne struct { + _d *IdempotencyRecordDelete +} + +// Where appends a list predicates to the IdempotencyRecordDelete builder. +func (_d *IdempotencyRecordDeleteOne) Where(ps ...predicate.IdempotencyRecord) *IdempotencyRecordDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *IdempotencyRecordDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{idempotencyrecord.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *IdempotencyRecordDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/idempotencyrecord_query.go b/backend/ent/idempotencyrecord_query.go new file mode 100644 index 00000000..fbba4dfa --- /dev/null +++ b/backend/ent/idempotencyrecord_query.go @@ -0,0 +1,564 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/idempotencyrecord" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// IdempotencyRecordQuery is the builder for querying IdempotencyRecord entities. +type IdempotencyRecordQuery struct { + config + ctx *QueryContext + order []idempotencyrecord.OrderOption + inters []Interceptor + predicates []predicate.IdempotencyRecord + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the IdempotencyRecordQuery builder. +func (_q *IdempotencyRecordQuery) Where(ps ...predicate.IdempotencyRecord) *IdempotencyRecordQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *IdempotencyRecordQuery) Limit(limit int) *IdempotencyRecordQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *IdempotencyRecordQuery) Offset(offset int) *IdempotencyRecordQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *IdempotencyRecordQuery) Unique(unique bool) *IdempotencyRecordQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *IdempotencyRecordQuery) Order(o ...idempotencyrecord.OrderOption) *IdempotencyRecordQuery { + _q.order = append(_q.order, o...) + return _q +} + +// First returns the first IdempotencyRecord entity from the query. +// Returns a *NotFoundError when no IdempotencyRecord was found. +func (_q *IdempotencyRecordQuery) First(ctx context.Context) (*IdempotencyRecord, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{idempotencyrecord.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *IdempotencyRecordQuery) FirstX(ctx context.Context) *IdempotencyRecord { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first IdempotencyRecord ID from the query. +// Returns a *NotFoundError when no IdempotencyRecord ID was found. +func (_q *IdempotencyRecordQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{idempotencyrecord.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *IdempotencyRecordQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single IdempotencyRecord entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one IdempotencyRecord entity is found. +// Returns a *NotFoundError when no IdempotencyRecord entities are found. +func (_q *IdempotencyRecordQuery) Only(ctx context.Context) (*IdempotencyRecord, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{idempotencyrecord.Label} + default: + return nil, &NotSingularError{idempotencyrecord.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *IdempotencyRecordQuery) OnlyX(ctx context.Context) *IdempotencyRecord { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only IdempotencyRecord ID in the query. +// Returns a *NotSingularError when more than one IdempotencyRecord ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *IdempotencyRecordQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{idempotencyrecord.Label} + default: + err = &NotSingularError{idempotencyrecord.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *IdempotencyRecordQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of IdempotencyRecords. +func (_q *IdempotencyRecordQuery) All(ctx context.Context) ([]*IdempotencyRecord, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*IdempotencyRecord, *IdempotencyRecordQuery]() + return withInterceptors[[]*IdempotencyRecord](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *IdempotencyRecordQuery) AllX(ctx context.Context) []*IdempotencyRecord { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of IdempotencyRecord IDs. +func (_q *IdempotencyRecordQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(idempotencyrecord.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *IdempotencyRecordQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *IdempotencyRecordQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*IdempotencyRecordQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *IdempotencyRecordQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *IdempotencyRecordQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *IdempotencyRecordQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the IdempotencyRecordQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *IdempotencyRecordQuery) Clone() *IdempotencyRecordQuery { + if _q == nil { + return nil + } + return &IdempotencyRecordQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]idempotencyrecord.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.IdempotencyRecord{}, _q.predicates...), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.IdempotencyRecord.Query(). +// GroupBy(idempotencyrecord.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *IdempotencyRecordQuery) GroupBy(field string, fields ...string) *IdempotencyRecordGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &IdempotencyRecordGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = idempotencyrecord.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.IdempotencyRecord.Query(). +// Select(idempotencyrecord.FieldCreatedAt). +// Scan(ctx, &v) +func (_q *IdempotencyRecordQuery) Select(fields ...string) *IdempotencyRecordSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &IdempotencyRecordSelect{IdempotencyRecordQuery: _q} + sbuild.label = idempotencyrecord.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a IdempotencyRecordSelect configured with the given aggregations. +func (_q *IdempotencyRecordQuery) Aggregate(fns ...AggregateFunc) *IdempotencyRecordSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *IdempotencyRecordQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !idempotencyrecord.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *IdempotencyRecordQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*IdempotencyRecord, error) { + var ( + nodes = []*IdempotencyRecord{} + _spec = _q.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*IdempotencyRecord).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &IdempotencyRecord{config: _q.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (_q *IdempotencyRecordQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *IdempotencyRecordQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(idempotencyrecord.Table, idempotencyrecord.Columns, sqlgraph.NewFieldSpec(idempotencyrecord.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, idempotencyrecord.FieldID) + for i := range fields { + if fields[i] != idempotencyrecord.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *IdempotencyRecordQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(idempotencyrecord.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = idempotencyrecord.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, m := range _q.modifiers { + m(selector) + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *IdempotencyRecordQuery) ForUpdate(opts ...sql.LockOption) *IdempotencyRecordQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *IdempotencyRecordQuery) ForShare(opts ...sql.LockOption) *IdempotencyRecordQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + +// IdempotencyRecordGroupBy is the group-by builder for IdempotencyRecord entities. +type IdempotencyRecordGroupBy struct { + selector + build *IdempotencyRecordQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *IdempotencyRecordGroupBy) Aggregate(fns ...AggregateFunc) *IdempotencyRecordGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *IdempotencyRecordGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*IdempotencyRecordQuery, *IdempotencyRecordGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *IdempotencyRecordGroupBy) sqlScan(ctx context.Context, root *IdempotencyRecordQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// IdempotencyRecordSelect is the builder for selecting fields of IdempotencyRecord entities. +type IdempotencyRecordSelect struct { + *IdempotencyRecordQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *IdempotencyRecordSelect) Aggregate(fns ...AggregateFunc) *IdempotencyRecordSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *IdempotencyRecordSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*IdempotencyRecordQuery, *IdempotencyRecordSelect](ctx, _s.IdempotencyRecordQuery, _s, _s.inters, v) +} + +func (_s *IdempotencyRecordSelect) sqlScan(ctx context.Context, root *IdempotencyRecordQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/idempotencyrecord_update.go b/backend/ent/idempotencyrecord_update.go new file mode 100644 index 00000000..f839e5c0 --- /dev/null +++ b/backend/ent/idempotencyrecord_update.go @@ -0,0 +1,676 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/idempotencyrecord" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// IdempotencyRecordUpdate is the builder for updating IdempotencyRecord entities. +type IdempotencyRecordUpdate struct { + config + hooks []Hook + mutation *IdempotencyRecordMutation +} + +// Where appends a list predicates to the IdempotencyRecordUpdate builder. +func (_u *IdempotencyRecordUpdate) Where(ps ...predicate.IdempotencyRecord) *IdempotencyRecordUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *IdempotencyRecordUpdate) SetUpdatedAt(v time.Time) *IdempotencyRecordUpdate { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetScope sets the "scope" field. +func (_u *IdempotencyRecordUpdate) SetScope(v string) *IdempotencyRecordUpdate { + _u.mutation.SetScope(v) + return _u +} + +// SetNillableScope sets the "scope" field if the given value is not nil. +func (_u *IdempotencyRecordUpdate) SetNillableScope(v *string) *IdempotencyRecordUpdate { + if v != nil { + _u.SetScope(*v) + } + return _u +} + +// SetIdempotencyKeyHash sets the "idempotency_key_hash" field. +func (_u *IdempotencyRecordUpdate) SetIdempotencyKeyHash(v string) *IdempotencyRecordUpdate { + _u.mutation.SetIdempotencyKeyHash(v) + return _u +} + +// SetNillableIdempotencyKeyHash sets the "idempotency_key_hash" field if the given value is not nil. +func (_u *IdempotencyRecordUpdate) SetNillableIdempotencyKeyHash(v *string) *IdempotencyRecordUpdate { + if v != nil { + _u.SetIdempotencyKeyHash(*v) + } + return _u +} + +// SetRequestFingerprint sets the "request_fingerprint" field. +func (_u *IdempotencyRecordUpdate) SetRequestFingerprint(v string) *IdempotencyRecordUpdate { + _u.mutation.SetRequestFingerprint(v) + return _u +} + +// SetNillableRequestFingerprint sets the "request_fingerprint" field if the given value is not nil. +func (_u *IdempotencyRecordUpdate) SetNillableRequestFingerprint(v *string) *IdempotencyRecordUpdate { + if v != nil { + _u.SetRequestFingerprint(*v) + } + return _u +} + +// SetStatus sets the "status" field. +func (_u *IdempotencyRecordUpdate) SetStatus(v string) *IdempotencyRecordUpdate { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *IdempotencyRecordUpdate) SetNillableStatus(v *string) *IdempotencyRecordUpdate { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetResponseStatus sets the "response_status" field. +func (_u *IdempotencyRecordUpdate) SetResponseStatus(v int) *IdempotencyRecordUpdate { + _u.mutation.ResetResponseStatus() + _u.mutation.SetResponseStatus(v) + return _u +} + +// SetNillableResponseStatus sets the "response_status" field if the given value is not nil. +func (_u *IdempotencyRecordUpdate) SetNillableResponseStatus(v *int) *IdempotencyRecordUpdate { + if v != nil { + _u.SetResponseStatus(*v) + } + return _u +} + +// AddResponseStatus adds value to the "response_status" field. +func (_u *IdempotencyRecordUpdate) AddResponseStatus(v int) *IdempotencyRecordUpdate { + _u.mutation.AddResponseStatus(v) + return _u +} + +// ClearResponseStatus clears the value of the "response_status" field. +func (_u *IdempotencyRecordUpdate) ClearResponseStatus() *IdempotencyRecordUpdate { + _u.mutation.ClearResponseStatus() + return _u +} + +// SetResponseBody sets the "response_body" field. +func (_u *IdempotencyRecordUpdate) SetResponseBody(v string) *IdempotencyRecordUpdate { + _u.mutation.SetResponseBody(v) + return _u +} + +// SetNillableResponseBody sets the "response_body" field if the given value is not nil. +func (_u *IdempotencyRecordUpdate) SetNillableResponseBody(v *string) *IdempotencyRecordUpdate { + if v != nil { + _u.SetResponseBody(*v) + } + return _u +} + +// ClearResponseBody clears the value of the "response_body" field. +func (_u *IdempotencyRecordUpdate) ClearResponseBody() *IdempotencyRecordUpdate { + _u.mutation.ClearResponseBody() + return _u +} + +// SetErrorReason sets the "error_reason" field. +func (_u *IdempotencyRecordUpdate) SetErrorReason(v string) *IdempotencyRecordUpdate { + _u.mutation.SetErrorReason(v) + return _u +} + +// SetNillableErrorReason sets the "error_reason" field if the given value is not nil. +func (_u *IdempotencyRecordUpdate) SetNillableErrorReason(v *string) *IdempotencyRecordUpdate { + if v != nil { + _u.SetErrorReason(*v) + } + return _u +} + +// ClearErrorReason clears the value of the "error_reason" field. +func (_u *IdempotencyRecordUpdate) ClearErrorReason() *IdempotencyRecordUpdate { + _u.mutation.ClearErrorReason() + return _u +} + +// SetLockedUntil sets the "locked_until" field. +func (_u *IdempotencyRecordUpdate) SetLockedUntil(v time.Time) *IdempotencyRecordUpdate { + _u.mutation.SetLockedUntil(v) + return _u +} + +// SetNillableLockedUntil sets the "locked_until" field if the given value is not nil. +func (_u *IdempotencyRecordUpdate) SetNillableLockedUntil(v *time.Time) *IdempotencyRecordUpdate { + if v != nil { + _u.SetLockedUntil(*v) + } + return _u +} + +// ClearLockedUntil clears the value of the "locked_until" field. +func (_u *IdempotencyRecordUpdate) ClearLockedUntil() *IdempotencyRecordUpdate { + _u.mutation.ClearLockedUntil() + return _u +} + +// SetExpiresAt sets the "expires_at" field. +func (_u *IdempotencyRecordUpdate) SetExpiresAt(v time.Time) *IdempotencyRecordUpdate { + _u.mutation.SetExpiresAt(v) + return _u +} + +// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil. +func (_u *IdempotencyRecordUpdate) SetNillableExpiresAt(v *time.Time) *IdempotencyRecordUpdate { + if v != nil { + _u.SetExpiresAt(*v) + } + return _u +} + +// Mutation returns the IdempotencyRecordMutation object of the builder. +func (_u *IdempotencyRecordUpdate) Mutation() *IdempotencyRecordMutation { + return _u.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *IdempotencyRecordUpdate) Save(ctx context.Context) (int, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *IdempotencyRecordUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *IdempotencyRecordUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *IdempotencyRecordUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *IdempotencyRecordUpdate) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := idempotencyrecord.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *IdempotencyRecordUpdate) check() error { + if v, ok := _u.mutation.Scope(); ok { + if err := idempotencyrecord.ScopeValidator(v); err != nil { + return &ValidationError{Name: "scope", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.scope": %w`, err)} + } + } + if v, ok := _u.mutation.IdempotencyKeyHash(); ok { + if err := idempotencyrecord.IdempotencyKeyHashValidator(v); err != nil { + return &ValidationError{Name: "idempotency_key_hash", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.idempotency_key_hash": %w`, err)} + } + } + if v, ok := _u.mutation.RequestFingerprint(); ok { + if err := idempotencyrecord.RequestFingerprintValidator(v); err != nil { + return &ValidationError{Name: "request_fingerprint", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.request_fingerprint": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := idempotencyrecord.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.status": %w`, err)} + } + } + if v, ok := _u.mutation.ErrorReason(); ok { + if err := idempotencyrecord.ErrorReasonValidator(v); err != nil { + return &ValidationError{Name: "error_reason", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.error_reason": %w`, err)} + } + } + return nil +} + +func (_u *IdempotencyRecordUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(idempotencyrecord.Table, idempotencyrecord.Columns, sqlgraph.NewFieldSpec(idempotencyrecord.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(idempotencyrecord.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.Scope(); ok { + _spec.SetField(idempotencyrecord.FieldScope, field.TypeString, value) + } + if value, ok := _u.mutation.IdempotencyKeyHash(); ok { + _spec.SetField(idempotencyrecord.FieldIdempotencyKeyHash, field.TypeString, value) + } + if value, ok := _u.mutation.RequestFingerprint(); ok { + _spec.SetField(idempotencyrecord.FieldRequestFingerprint, field.TypeString, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(idempotencyrecord.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.ResponseStatus(); ok { + _spec.SetField(idempotencyrecord.FieldResponseStatus, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedResponseStatus(); ok { + _spec.AddField(idempotencyrecord.FieldResponseStatus, field.TypeInt, value) + } + if _u.mutation.ResponseStatusCleared() { + _spec.ClearField(idempotencyrecord.FieldResponseStatus, field.TypeInt) + } + if value, ok := _u.mutation.ResponseBody(); ok { + _spec.SetField(idempotencyrecord.FieldResponseBody, field.TypeString, value) + } + if _u.mutation.ResponseBodyCleared() { + _spec.ClearField(idempotencyrecord.FieldResponseBody, field.TypeString) + } + if value, ok := _u.mutation.ErrorReason(); ok { + _spec.SetField(idempotencyrecord.FieldErrorReason, field.TypeString, value) + } + if _u.mutation.ErrorReasonCleared() { + _spec.ClearField(idempotencyrecord.FieldErrorReason, field.TypeString) + } + if value, ok := _u.mutation.LockedUntil(); ok { + _spec.SetField(idempotencyrecord.FieldLockedUntil, field.TypeTime, value) + } + if _u.mutation.LockedUntilCleared() { + _spec.ClearField(idempotencyrecord.FieldLockedUntil, field.TypeTime) + } + if value, ok := _u.mutation.ExpiresAt(); ok { + _spec.SetField(idempotencyrecord.FieldExpiresAt, field.TypeTime, value) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{idempotencyrecord.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// IdempotencyRecordUpdateOne is the builder for updating a single IdempotencyRecord entity. +type IdempotencyRecordUpdateOne struct { + config + fields []string + hooks []Hook + mutation *IdempotencyRecordMutation +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *IdempotencyRecordUpdateOne) SetUpdatedAt(v time.Time) *IdempotencyRecordUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetScope sets the "scope" field. +func (_u *IdempotencyRecordUpdateOne) SetScope(v string) *IdempotencyRecordUpdateOne { + _u.mutation.SetScope(v) + return _u +} + +// SetNillableScope sets the "scope" field if the given value is not nil. +func (_u *IdempotencyRecordUpdateOne) SetNillableScope(v *string) *IdempotencyRecordUpdateOne { + if v != nil { + _u.SetScope(*v) + } + return _u +} + +// SetIdempotencyKeyHash sets the "idempotency_key_hash" field. +func (_u *IdempotencyRecordUpdateOne) SetIdempotencyKeyHash(v string) *IdempotencyRecordUpdateOne { + _u.mutation.SetIdempotencyKeyHash(v) + return _u +} + +// SetNillableIdempotencyKeyHash sets the "idempotency_key_hash" field if the given value is not nil. +func (_u *IdempotencyRecordUpdateOne) SetNillableIdempotencyKeyHash(v *string) *IdempotencyRecordUpdateOne { + if v != nil { + _u.SetIdempotencyKeyHash(*v) + } + return _u +} + +// SetRequestFingerprint sets the "request_fingerprint" field. +func (_u *IdempotencyRecordUpdateOne) SetRequestFingerprint(v string) *IdempotencyRecordUpdateOne { + _u.mutation.SetRequestFingerprint(v) + return _u +} + +// SetNillableRequestFingerprint sets the "request_fingerprint" field if the given value is not nil. +func (_u *IdempotencyRecordUpdateOne) SetNillableRequestFingerprint(v *string) *IdempotencyRecordUpdateOne { + if v != nil { + _u.SetRequestFingerprint(*v) + } + return _u +} + +// SetStatus sets the "status" field. +func (_u *IdempotencyRecordUpdateOne) SetStatus(v string) *IdempotencyRecordUpdateOne { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *IdempotencyRecordUpdateOne) SetNillableStatus(v *string) *IdempotencyRecordUpdateOne { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetResponseStatus sets the "response_status" field. +func (_u *IdempotencyRecordUpdateOne) SetResponseStatus(v int) *IdempotencyRecordUpdateOne { + _u.mutation.ResetResponseStatus() + _u.mutation.SetResponseStatus(v) + return _u +} + +// SetNillableResponseStatus sets the "response_status" field if the given value is not nil. +func (_u *IdempotencyRecordUpdateOne) SetNillableResponseStatus(v *int) *IdempotencyRecordUpdateOne { + if v != nil { + _u.SetResponseStatus(*v) + } + return _u +} + +// AddResponseStatus adds value to the "response_status" field. +func (_u *IdempotencyRecordUpdateOne) AddResponseStatus(v int) *IdempotencyRecordUpdateOne { + _u.mutation.AddResponseStatus(v) + return _u +} + +// ClearResponseStatus clears the value of the "response_status" field. +func (_u *IdempotencyRecordUpdateOne) ClearResponseStatus() *IdempotencyRecordUpdateOne { + _u.mutation.ClearResponseStatus() + return _u +} + +// SetResponseBody sets the "response_body" field. +func (_u *IdempotencyRecordUpdateOne) SetResponseBody(v string) *IdempotencyRecordUpdateOne { + _u.mutation.SetResponseBody(v) + return _u +} + +// SetNillableResponseBody sets the "response_body" field if the given value is not nil. +func (_u *IdempotencyRecordUpdateOne) SetNillableResponseBody(v *string) *IdempotencyRecordUpdateOne { + if v != nil { + _u.SetResponseBody(*v) + } + return _u +} + +// ClearResponseBody clears the value of the "response_body" field. +func (_u *IdempotencyRecordUpdateOne) ClearResponseBody() *IdempotencyRecordUpdateOne { + _u.mutation.ClearResponseBody() + return _u +} + +// SetErrorReason sets the "error_reason" field. +func (_u *IdempotencyRecordUpdateOne) SetErrorReason(v string) *IdempotencyRecordUpdateOne { + _u.mutation.SetErrorReason(v) + return _u +} + +// SetNillableErrorReason sets the "error_reason" field if the given value is not nil. +func (_u *IdempotencyRecordUpdateOne) SetNillableErrorReason(v *string) *IdempotencyRecordUpdateOne { + if v != nil { + _u.SetErrorReason(*v) + } + return _u +} + +// ClearErrorReason clears the value of the "error_reason" field. +func (_u *IdempotencyRecordUpdateOne) ClearErrorReason() *IdempotencyRecordUpdateOne { + _u.mutation.ClearErrorReason() + return _u +} + +// SetLockedUntil sets the "locked_until" field. +func (_u *IdempotencyRecordUpdateOne) SetLockedUntil(v time.Time) *IdempotencyRecordUpdateOne { + _u.mutation.SetLockedUntil(v) + return _u +} + +// SetNillableLockedUntil sets the "locked_until" field if the given value is not nil. +func (_u *IdempotencyRecordUpdateOne) SetNillableLockedUntil(v *time.Time) *IdempotencyRecordUpdateOne { + if v != nil { + _u.SetLockedUntil(*v) + } + return _u +} + +// ClearLockedUntil clears the value of the "locked_until" field. +func (_u *IdempotencyRecordUpdateOne) ClearLockedUntil() *IdempotencyRecordUpdateOne { + _u.mutation.ClearLockedUntil() + return _u +} + +// SetExpiresAt sets the "expires_at" field. +func (_u *IdempotencyRecordUpdateOne) SetExpiresAt(v time.Time) *IdempotencyRecordUpdateOne { + _u.mutation.SetExpiresAt(v) + return _u +} + +// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil. +func (_u *IdempotencyRecordUpdateOne) SetNillableExpiresAt(v *time.Time) *IdempotencyRecordUpdateOne { + if v != nil { + _u.SetExpiresAt(*v) + } + return _u +} + +// Mutation returns the IdempotencyRecordMutation object of the builder. +func (_u *IdempotencyRecordUpdateOne) Mutation() *IdempotencyRecordMutation { + return _u.mutation +} + +// Where appends a list predicates to the IdempotencyRecordUpdate builder. +func (_u *IdempotencyRecordUpdateOne) Where(ps ...predicate.IdempotencyRecord) *IdempotencyRecordUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *IdempotencyRecordUpdateOne) Select(field string, fields ...string) *IdempotencyRecordUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated IdempotencyRecord entity. +func (_u *IdempotencyRecordUpdateOne) Save(ctx context.Context) (*IdempotencyRecord, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *IdempotencyRecordUpdateOne) SaveX(ctx context.Context) *IdempotencyRecord { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *IdempotencyRecordUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *IdempotencyRecordUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *IdempotencyRecordUpdateOne) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := idempotencyrecord.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *IdempotencyRecordUpdateOne) check() error { + if v, ok := _u.mutation.Scope(); ok { + if err := idempotencyrecord.ScopeValidator(v); err != nil { + return &ValidationError{Name: "scope", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.scope": %w`, err)} + } + } + if v, ok := _u.mutation.IdempotencyKeyHash(); ok { + if err := idempotencyrecord.IdempotencyKeyHashValidator(v); err != nil { + return &ValidationError{Name: "idempotency_key_hash", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.idempotency_key_hash": %w`, err)} + } + } + if v, ok := _u.mutation.RequestFingerprint(); ok { + if err := idempotencyrecord.RequestFingerprintValidator(v); err != nil { + return &ValidationError{Name: "request_fingerprint", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.request_fingerprint": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := idempotencyrecord.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.status": %w`, err)} + } + } + if v, ok := _u.mutation.ErrorReason(); ok { + if err := idempotencyrecord.ErrorReasonValidator(v); err != nil { + return &ValidationError{Name: "error_reason", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.error_reason": %w`, err)} + } + } + return nil +} + +func (_u *IdempotencyRecordUpdateOne) sqlSave(ctx context.Context) (_node *IdempotencyRecord, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(idempotencyrecord.Table, idempotencyrecord.Columns, sqlgraph.NewFieldSpec(idempotencyrecord.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "IdempotencyRecord.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, idempotencyrecord.FieldID) + for _, f := range fields { + if !idempotencyrecord.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != idempotencyrecord.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(idempotencyrecord.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.Scope(); ok { + _spec.SetField(idempotencyrecord.FieldScope, field.TypeString, value) + } + if value, ok := _u.mutation.IdempotencyKeyHash(); ok { + _spec.SetField(idempotencyrecord.FieldIdempotencyKeyHash, field.TypeString, value) + } + if value, ok := _u.mutation.RequestFingerprint(); ok { + _spec.SetField(idempotencyrecord.FieldRequestFingerprint, field.TypeString, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(idempotencyrecord.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.ResponseStatus(); ok { + _spec.SetField(idempotencyrecord.FieldResponseStatus, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedResponseStatus(); ok { + _spec.AddField(idempotencyrecord.FieldResponseStatus, field.TypeInt, value) + } + if _u.mutation.ResponseStatusCleared() { + _spec.ClearField(idempotencyrecord.FieldResponseStatus, field.TypeInt) + } + if value, ok := _u.mutation.ResponseBody(); ok { + _spec.SetField(idempotencyrecord.FieldResponseBody, field.TypeString, value) + } + if _u.mutation.ResponseBodyCleared() { + _spec.ClearField(idempotencyrecord.FieldResponseBody, field.TypeString) + } + if value, ok := _u.mutation.ErrorReason(); ok { + _spec.SetField(idempotencyrecord.FieldErrorReason, field.TypeString, value) + } + if _u.mutation.ErrorReasonCleared() { + _spec.ClearField(idempotencyrecord.FieldErrorReason, field.TypeString) + } + if value, ok := _u.mutation.LockedUntil(); ok { + _spec.SetField(idempotencyrecord.FieldLockedUntil, field.TypeTime, value) + } + if _u.mutation.LockedUntilCleared() { + _spec.ClearField(idempotencyrecord.FieldLockedUntil, field.TypeTime) + } + if value, ok := _u.mutation.ExpiresAt(); ok { + _spec.SetField(idempotencyrecord.FieldExpiresAt, field.TypeTime, value) + } + _node = &IdempotencyRecord{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{idempotencyrecord.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/intercept/intercept.go b/backend/ent/intercept/intercept.go index 290fb163..e7746402 100644 --- a/backend/ent/intercept/intercept.go +++ b/backend/ent/intercept/intercept.go @@ -15,6 +15,7 @@ import ( "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule" "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/idempotencyrecord" "github.com/Wei-Shaw/sub2api/ent/predicate" "github.com/Wei-Shaw/sub2api/ent/promocode" "github.com/Wei-Shaw/sub2api/ent/promocodeusage" @@ -276,6 +277,33 @@ func (f TraverseGroup) Traverse(ctx context.Context, q ent.Query) error { return fmt.Errorf("unexpected query type %T. expect *ent.GroupQuery", q) } +// The IdempotencyRecordFunc type is an adapter to allow the use of ordinary function as a Querier. +type IdempotencyRecordFunc func(context.Context, *ent.IdempotencyRecordQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f IdempotencyRecordFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.IdempotencyRecordQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.IdempotencyRecordQuery", q) +} + +// The TraverseIdempotencyRecord type is an adapter to allow the use of ordinary function as Traverser. +type TraverseIdempotencyRecord func(context.Context, *ent.IdempotencyRecordQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseIdempotencyRecord) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseIdempotencyRecord) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.IdempotencyRecordQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.IdempotencyRecordQuery", q) +} + // The PromoCodeFunc type is an adapter to allow the use of ordinary function as a Querier. type PromoCodeFunc func(context.Context, *ent.PromoCodeQuery) (ent.Value, error) @@ -644,6 +672,8 @@ func NewQuery(q ent.Query) (Query, error) { return &query[*ent.ErrorPassthroughRuleQuery, predicate.ErrorPassthroughRule, errorpassthroughrule.OrderOption]{typ: ent.TypeErrorPassthroughRule, tq: q}, nil case *ent.GroupQuery: return &query[*ent.GroupQuery, predicate.Group, group.OrderOption]{typ: ent.TypeGroup, tq: q}, nil + case *ent.IdempotencyRecordQuery: + return &query[*ent.IdempotencyRecordQuery, predicate.IdempotencyRecord, idempotencyrecord.OrderOption]{typ: ent.TypeIdempotencyRecord, tq: q}, nil case *ent.PromoCodeQuery: return &query[*ent.PromoCodeQuery, predicate.PromoCode, promocode.OrderOption]{typ: ent.TypePromoCode, tq: q}, nil case *ent.PromoCodeUsageQuery: diff --git a/backend/ent/migrate/schema.go b/backend/ent/migrate/schema.go index aba00d4f..8fc1c9b6 100644 --- a/backend/ent/migrate/schema.go +++ b/backend/ent/migrate/schema.go @@ -384,6 +384,7 @@ var ( {Name: "mcp_xml_inject", Type: field.TypeBool, Default: true}, {Name: "supported_model_scopes", Type: field.TypeJSON, SchemaType: map[string]string{"postgres": "jsonb"}}, {Name: "sort_order", Type: field.TypeInt, Default: 0}, + {Name: "simulate_claude_max_enabled", Type: field.TypeBool, Default: false}, } // GroupsTable holds the schema information for the "groups" table. GroupsTable = &schema.Table{ @@ -423,6 +424,44 @@ var ( }, }, } + // IdempotencyRecordsColumns holds the columns for the "idempotency_records" table. + IdempotencyRecordsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "scope", Type: field.TypeString, Size: 128}, + {Name: "idempotency_key_hash", Type: field.TypeString, Size: 64}, + {Name: "request_fingerprint", Type: field.TypeString, Size: 64}, + {Name: "status", Type: field.TypeString, Size: 32}, + {Name: "response_status", Type: field.TypeInt, Nullable: true}, + {Name: "response_body", Type: field.TypeString, Nullable: true}, + {Name: "error_reason", Type: field.TypeString, Nullable: true, Size: 128}, + {Name: "locked_until", Type: field.TypeTime, Nullable: true}, + {Name: "expires_at", Type: field.TypeTime}, + } + // IdempotencyRecordsTable holds the schema information for the "idempotency_records" table. + IdempotencyRecordsTable = &schema.Table{ + Name: "idempotency_records", + Columns: IdempotencyRecordsColumns, + PrimaryKey: []*schema.Column{IdempotencyRecordsColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "idempotencyrecord_scope_idempotency_key_hash", + Unique: true, + Columns: []*schema.Column{IdempotencyRecordsColumns[3], IdempotencyRecordsColumns[4]}, + }, + { + Name: "idempotencyrecord_expires_at", + Unique: false, + Columns: []*schema.Column{IdempotencyRecordsColumns[11]}, + }, + { + Name: "idempotencyrecord_status_locked_until", + Unique: false, + Columns: []*schema.Column{IdempotencyRecordsColumns[6], IdempotencyRecordsColumns[10]}, + }, + }, + } // PromoCodesColumns holds the columns for the "promo_codes" table. PromoCodesColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt64, Increment: true}, @@ -1021,6 +1060,7 @@ var ( AnnouncementReadsTable, ErrorPassthroughRulesTable, GroupsTable, + IdempotencyRecordsTable, PromoCodesTable, PromoCodeUsagesTable, ProxiesTable, @@ -1066,6 +1106,9 @@ func init() { GroupsTable.Annotation = &entsql.Annotation{ Table: "groups", } + IdempotencyRecordsTable.Annotation = &entsql.Annotation{ + Table: "idempotency_records", + } PromoCodesTable.Annotation = &entsql.Annotation{ Table: "promo_codes", } diff --git a/backend/ent/mutation.go b/backend/ent/mutation.go index 7d5bf180..17a053fb 100644 --- a/backend/ent/mutation.go +++ b/backend/ent/mutation.go @@ -19,6 +19,7 @@ import ( "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule" "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/idempotencyrecord" "github.com/Wei-Shaw/sub2api/ent/predicate" "github.com/Wei-Shaw/sub2api/ent/promocode" "github.com/Wei-Shaw/sub2api/ent/promocodeusage" @@ -52,6 +53,7 @@ const ( TypeAnnouncementRead = "AnnouncementRead" TypeErrorPassthroughRule = "ErrorPassthroughRule" TypeGroup = "Group" + TypeIdempotencyRecord = "IdempotencyRecord" TypePromoCode = "PromoCode" TypePromoCodeUsage = "PromoCodeUsage" TypeProxy = "Proxy" @@ -7198,6 +7200,7 @@ type GroupMutation struct { appendsupported_model_scopes []string sort_order *int addsort_order *int + simulate_claude_max_enabled *bool clearedFields map[string]struct{} api_keys map[int64]struct{} removedapi_keys map[int64]struct{} @@ -8886,6 +8889,42 @@ func (m *GroupMutation) ResetSortOrder() { m.addsort_order = nil } +// SetSimulateClaudeMaxEnabled sets the "simulate_claude_max_enabled" field. +func (m *GroupMutation) SetSimulateClaudeMaxEnabled(b bool) { + m.simulate_claude_max_enabled = &b +} + +// SimulateClaudeMaxEnabled returns the value of the "simulate_claude_max_enabled" field in the mutation. +func (m *GroupMutation) SimulateClaudeMaxEnabled() (r bool, exists bool) { + v := m.simulate_claude_max_enabled + if v == nil { + return + } + return *v, true +} + +// OldSimulateClaudeMaxEnabled returns the old "simulate_claude_max_enabled" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldSimulateClaudeMaxEnabled(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSimulateClaudeMaxEnabled is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSimulateClaudeMaxEnabled requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSimulateClaudeMaxEnabled: %w", err) + } + return oldValue.SimulateClaudeMaxEnabled, nil +} + +// ResetSimulateClaudeMaxEnabled resets all changes to the "simulate_claude_max_enabled" field. +func (m *GroupMutation) ResetSimulateClaudeMaxEnabled() { + m.simulate_claude_max_enabled = nil +} + // AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by ids. func (m *GroupMutation) AddAPIKeyIDs(ids ...int64) { if m.api_keys == nil { @@ -9244,7 +9283,7 @@ func (m *GroupMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *GroupMutation) Fields() []string { - fields := make([]string, 0, 29) + fields := make([]string, 0, 30) if m.created_at != nil { fields = append(fields, group.FieldCreatedAt) } @@ -9332,6 +9371,9 @@ func (m *GroupMutation) Fields() []string { if m.sort_order != nil { fields = append(fields, group.FieldSortOrder) } + if m.simulate_claude_max_enabled != nil { + fields = append(fields, group.FieldSimulateClaudeMaxEnabled) + } return fields } @@ -9398,6 +9440,8 @@ func (m *GroupMutation) Field(name string) (ent.Value, bool) { return m.SupportedModelScopes() case group.FieldSortOrder: return m.SortOrder() + case group.FieldSimulateClaudeMaxEnabled: + return m.SimulateClaudeMaxEnabled() } return nil, false } @@ -9465,6 +9509,8 @@ func (m *GroupMutation) OldField(ctx context.Context, name string) (ent.Value, e return m.OldSupportedModelScopes(ctx) case group.FieldSortOrder: return m.OldSortOrder(ctx) + case group.FieldSimulateClaudeMaxEnabled: + return m.OldSimulateClaudeMaxEnabled(ctx) } return nil, fmt.Errorf("unknown Group field %s", name) } @@ -9677,6 +9723,13 @@ func (m *GroupMutation) SetField(name string, value ent.Value) error { } m.SetSortOrder(v) return nil + case group.FieldSimulateClaudeMaxEnabled: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSimulateClaudeMaxEnabled(v) + return nil } return fmt.Errorf("unknown Group field %s", name) } @@ -10089,6 +10142,9 @@ func (m *GroupMutation) ResetField(name string) error { case group.FieldSortOrder: m.ResetSortOrder() return nil + case group.FieldSimulateClaudeMaxEnabled: + m.ResetSimulateClaudeMaxEnabled() + return nil } return fmt.Errorf("unknown Group field %s", name) } @@ -10307,6 +10363,988 @@ func (m *GroupMutation) ResetEdge(name string) error { return fmt.Errorf("unknown Group edge %s", name) } +// IdempotencyRecordMutation represents an operation that mutates the IdempotencyRecord nodes in the graph. +type IdempotencyRecordMutation struct { + config + op Op + typ string + id *int64 + created_at *time.Time + updated_at *time.Time + scope *string + idempotency_key_hash *string + request_fingerprint *string + status *string + response_status *int + addresponse_status *int + response_body *string + error_reason *string + locked_until *time.Time + expires_at *time.Time + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*IdempotencyRecord, error) + predicates []predicate.IdempotencyRecord +} + +var _ ent.Mutation = (*IdempotencyRecordMutation)(nil) + +// idempotencyrecordOption allows management of the mutation configuration using functional options. +type idempotencyrecordOption func(*IdempotencyRecordMutation) + +// newIdempotencyRecordMutation creates new mutation for the IdempotencyRecord entity. +func newIdempotencyRecordMutation(c config, op Op, opts ...idempotencyrecordOption) *IdempotencyRecordMutation { + m := &IdempotencyRecordMutation{ + config: c, + op: op, + typ: TypeIdempotencyRecord, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withIdempotencyRecordID sets the ID field of the mutation. +func withIdempotencyRecordID(id int64) idempotencyrecordOption { + return func(m *IdempotencyRecordMutation) { + var ( + err error + once sync.Once + value *IdempotencyRecord + ) + m.oldValue = func(ctx context.Context) (*IdempotencyRecord, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().IdempotencyRecord.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withIdempotencyRecord sets the old IdempotencyRecord of the mutation. +func withIdempotencyRecord(node *IdempotencyRecord) idempotencyrecordOption { + return func(m *IdempotencyRecordMutation) { + m.oldValue = func(context.Context) (*IdempotencyRecord, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m IdempotencyRecordMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m IdempotencyRecordMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *IdempotencyRecordMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *IdempotencyRecordMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().IdempotencyRecord.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *IdempotencyRecordMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *IdempotencyRecordMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the IdempotencyRecord entity. +// If the IdempotencyRecord object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *IdempotencyRecordMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *IdempotencyRecordMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *IdempotencyRecordMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *IdempotencyRecordMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the IdempotencyRecord entity. +// If the IdempotencyRecord object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *IdempotencyRecordMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *IdempotencyRecordMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetScope sets the "scope" field. +func (m *IdempotencyRecordMutation) SetScope(s string) { + m.scope = &s +} + +// Scope returns the value of the "scope" field in the mutation. +func (m *IdempotencyRecordMutation) Scope() (r string, exists bool) { + v := m.scope + if v == nil { + return + } + return *v, true +} + +// OldScope returns the old "scope" field's value of the IdempotencyRecord entity. +// If the IdempotencyRecord object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *IdempotencyRecordMutation) OldScope(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldScope is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldScope requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldScope: %w", err) + } + return oldValue.Scope, nil +} + +// ResetScope resets all changes to the "scope" field. +func (m *IdempotencyRecordMutation) ResetScope() { + m.scope = nil +} + +// SetIdempotencyKeyHash sets the "idempotency_key_hash" field. +func (m *IdempotencyRecordMutation) SetIdempotencyKeyHash(s string) { + m.idempotency_key_hash = &s +} + +// IdempotencyKeyHash returns the value of the "idempotency_key_hash" field in the mutation. +func (m *IdempotencyRecordMutation) IdempotencyKeyHash() (r string, exists bool) { + v := m.idempotency_key_hash + if v == nil { + return + } + return *v, true +} + +// OldIdempotencyKeyHash returns the old "idempotency_key_hash" field's value of the IdempotencyRecord entity. +// If the IdempotencyRecord object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *IdempotencyRecordMutation) OldIdempotencyKeyHash(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldIdempotencyKeyHash is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldIdempotencyKeyHash requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIdempotencyKeyHash: %w", err) + } + return oldValue.IdempotencyKeyHash, nil +} + +// ResetIdempotencyKeyHash resets all changes to the "idempotency_key_hash" field. +func (m *IdempotencyRecordMutation) ResetIdempotencyKeyHash() { + m.idempotency_key_hash = nil +} + +// SetRequestFingerprint sets the "request_fingerprint" field. +func (m *IdempotencyRecordMutation) SetRequestFingerprint(s string) { + m.request_fingerprint = &s +} + +// RequestFingerprint returns the value of the "request_fingerprint" field in the mutation. +func (m *IdempotencyRecordMutation) RequestFingerprint() (r string, exists bool) { + v := m.request_fingerprint + if v == nil { + return + } + return *v, true +} + +// OldRequestFingerprint returns the old "request_fingerprint" field's value of the IdempotencyRecord entity. +// If the IdempotencyRecord object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *IdempotencyRecordMutation) OldRequestFingerprint(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRequestFingerprint is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRequestFingerprint requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRequestFingerprint: %w", err) + } + return oldValue.RequestFingerprint, nil +} + +// ResetRequestFingerprint resets all changes to the "request_fingerprint" field. +func (m *IdempotencyRecordMutation) ResetRequestFingerprint() { + m.request_fingerprint = nil +} + +// SetStatus sets the "status" field. +func (m *IdempotencyRecordMutation) SetStatus(s string) { + m.status = &s +} + +// Status returns the value of the "status" field in the mutation. +func (m *IdempotencyRecordMutation) Status() (r string, exists bool) { + v := m.status + if v == nil { + return + } + return *v, true +} + +// OldStatus returns the old "status" field's value of the IdempotencyRecord entity. +// If the IdempotencyRecord object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *IdempotencyRecordMutation) OldStatus(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStatus: %w", err) + } + return oldValue.Status, nil +} + +// ResetStatus resets all changes to the "status" field. +func (m *IdempotencyRecordMutation) ResetStatus() { + m.status = nil +} + +// SetResponseStatus sets the "response_status" field. +func (m *IdempotencyRecordMutation) SetResponseStatus(i int) { + m.response_status = &i + m.addresponse_status = nil +} + +// ResponseStatus returns the value of the "response_status" field in the mutation. +func (m *IdempotencyRecordMutation) ResponseStatus() (r int, exists bool) { + v := m.response_status + if v == nil { + return + } + return *v, true +} + +// OldResponseStatus returns the old "response_status" field's value of the IdempotencyRecord entity. +// If the IdempotencyRecord object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *IdempotencyRecordMutation) OldResponseStatus(ctx context.Context) (v *int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldResponseStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldResponseStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldResponseStatus: %w", err) + } + return oldValue.ResponseStatus, nil +} + +// AddResponseStatus adds i to the "response_status" field. +func (m *IdempotencyRecordMutation) AddResponseStatus(i int) { + if m.addresponse_status != nil { + *m.addresponse_status += i + } else { + m.addresponse_status = &i + } +} + +// AddedResponseStatus returns the value that was added to the "response_status" field in this mutation. +func (m *IdempotencyRecordMutation) AddedResponseStatus() (r int, exists bool) { + v := m.addresponse_status + if v == nil { + return + } + return *v, true +} + +// ClearResponseStatus clears the value of the "response_status" field. +func (m *IdempotencyRecordMutation) ClearResponseStatus() { + m.response_status = nil + m.addresponse_status = nil + m.clearedFields[idempotencyrecord.FieldResponseStatus] = struct{}{} +} + +// ResponseStatusCleared returns if the "response_status" field was cleared in this mutation. +func (m *IdempotencyRecordMutation) ResponseStatusCleared() bool { + _, ok := m.clearedFields[idempotencyrecord.FieldResponseStatus] + return ok +} + +// ResetResponseStatus resets all changes to the "response_status" field. +func (m *IdempotencyRecordMutation) ResetResponseStatus() { + m.response_status = nil + m.addresponse_status = nil + delete(m.clearedFields, idempotencyrecord.FieldResponseStatus) +} + +// SetResponseBody sets the "response_body" field. +func (m *IdempotencyRecordMutation) SetResponseBody(s string) { + m.response_body = &s +} + +// ResponseBody returns the value of the "response_body" field in the mutation. +func (m *IdempotencyRecordMutation) ResponseBody() (r string, exists bool) { + v := m.response_body + if v == nil { + return + } + return *v, true +} + +// OldResponseBody returns the old "response_body" field's value of the IdempotencyRecord entity. +// If the IdempotencyRecord object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *IdempotencyRecordMutation) OldResponseBody(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldResponseBody is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldResponseBody requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldResponseBody: %w", err) + } + return oldValue.ResponseBody, nil +} + +// ClearResponseBody clears the value of the "response_body" field. +func (m *IdempotencyRecordMutation) ClearResponseBody() { + m.response_body = nil + m.clearedFields[idempotencyrecord.FieldResponseBody] = struct{}{} +} + +// ResponseBodyCleared returns if the "response_body" field was cleared in this mutation. +func (m *IdempotencyRecordMutation) ResponseBodyCleared() bool { + _, ok := m.clearedFields[idempotencyrecord.FieldResponseBody] + return ok +} + +// ResetResponseBody resets all changes to the "response_body" field. +func (m *IdempotencyRecordMutation) ResetResponseBody() { + m.response_body = nil + delete(m.clearedFields, idempotencyrecord.FieldResponseBody) +} + +// SetErrorReason sets the "error_reason" field. +func (m *IdempotencyRecordMutation) SetErrorReason(s string) { + m.error_reason = &s +} + +// ErrorReason returns the value of the "error_reason" field in the mutation. +func (m *IdempotencyRecordMutation) ErrorReason() (r string, exists bool) { + v := m.error_reason + if v == nil { + return + } + return *v, true +} + +// OldErrorReason returns the old "error_reason" field's value of the IdempotencyRecord entity. +// If the IdempotencyRecord object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *IdempotencyRecordMutation) OldErrorReason(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldErrorReason is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldErrorReason requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldErrorReason: %w", err) + } + return oldValue.ErrorReason, nil +} + +// ClearErrorReason clears the value of the "error_reason" field. +func (m *IdempotencyRecordMutation) ClearErrorReason() { + m.error_reason = nil + m.clearedFields[idempotencyrecord.FieldErrorReason] = struct{}{} +} + +// ErrorReasonCleared returns if the "error_reason" field was cleared in this mutation. +func (m *IdempotencyRecordMutation) ErrorReasonCleared() bool { + _, ok := m.clearedFields[idempotencyrecord.FieldErrorReason] + return ok +} + +// ResetErrorReason resets all changes to the "error_reason" field. +func (m *IdempotencyRecordMutation) ResetErrorReason() { + m.error_reason = nil + delete(m.clearedFields, idempotencyrecord.FieldErrorReason) +} + +// SetLockedUntil sets the "locked_until" field. +func (m *IdempotencyRecordMutation) SetLockedUntil(t time.Time) { + m.locked_until = &t +} + +// LockedUntil returns the value of the "locked_until" field in the mutation. +func (m *IdempotencyRecordMutation) LockedUntil() (r time.Time, exists bool) { + v := m.locked_until + if v == nil { + return + } + return *v, true +} + +// OldLockedUntil returns the old "locked_until" field's value of the IdempotencyRecord entity. +// If the IdempotencyRecord object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *IdempotencyRecordMutation) OldLockedUntil(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldLockedUntil is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldLockedUntil requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldLockedUntil: %w", err) + } + return oldValue.LockedUntil, nil +} + +// ClearLockedUntil clears the value of the "locked_until" field. +func (m *IdempotencyRecordMutation) ClearLockedUntil() { + m.locked_until = nil + m.clearedFields[idempotencyrecord.FieldLockedUntil] = struct{}{} +} + +// LockedUntilCleared returns if the "locked_until" field was cleared in this mutation. +func (m *IdempotencyRecordMutation) LockedUntilCleared() bool { + _, ok := m.clearedFields[idempotencyrecord.FieldLockedUntil] + return ok +} + +// ResetLockedUntil resets all changes to the "locked_until" field. +func (m *IdempotencyRecordMutation) ResetLockedUntil() { + m.locked_until = nil + delete(m.clearedFields, idempotencyrecord.FieldLockedUntil) +} + +// SetExpiresAt sets the "expires_at" field. +func (m *IdempotencyRecordMutation) SetExpiresAt(t time.Time) { + m.expires_at = &t +} + +// ExpiresAt returns the value of the "expires_at" field in the mutation. +func (m *IdempotencyRecordMutation) ExpiresAt() (r time.Time, exists bool) { + v := m.expires_at + if v == nil { + return + } + return *v, true +} + +// OldExpiresAt returns the old "expires_at" field's value of the IdempotencyRecord entity. +// If the IdempotencyRecord object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *IdempotencyRecordMutation) OldExpiresAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldExpiresAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldExpiresAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldExpiresAt: %w", err) + } + return oldValue.ExpiresAt, nil +} + +// ResetExpiresAt resets all changes to the "expires_at" field. +func (m *IdempotencyRecordMutation) ResetExpiresAt() { + m.expires_at = nil +} + +// Where appends a list predicates to the IdempotencyRecordMutation builder. +func (m *IdempotencyRecordMutation) Where(ps ...predicate.IdempotencyRecord) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the IdempotencyRecordMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *IdempotencyRecordMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.IdempotencyRecord, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *IdempotencyRecordMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *IdempotencyRecordMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (IdempotencyRecord). +func (m *IdempotencyRecordMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *IdempotencyRecordMutation) Fields() []string { + fields := make([]string, 0, 11) + if m.created_at != nil { + fields = append(fields, idempotencyrecord.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, idempotencyrecord.FieldUpdatedAt) + } + if m.scope != nil { + fields = append(fields, idempotencyrecord.FieldScope) + } + if m.idempotency_key_hash != nil { + fields = append(fields, idempotencyrecord.FieldIdempotencyKeyHash) + } + if m.request_fingerprint != nil { + fields = append(fields, idempotencyrecord.FieldRequestFingerprint) + } + if m.status != nil { + fields = append(fields, idempotencyrecord.FieldStatus) + } + if m.response_status != nil { + fields = append(fields, idempotencyrecord.FieldResponseStatus) + } + if m.response_body != nil { + fields = append(fields, idempotencyrecord.FieldResponseBody) + } + if m.error_reason != nil { + fields = append(fields, idempotencyrecord.FieldErrorReason) + } + if m.locked_until != nil { + fields = append(fields, idempotencyrecord.FieldLockedUntil) + } + if m.expires_at != nil { + fields = append(fields, idempotencyrecord.FieldExpiresAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *IdempotencyRecordMutation) Field(name string) (ent.Value, bool) { + switch name { + case idempotencyrecord.FieldCreatedAt: + return m.CreatedAt() + case idempotencyrecord.FieldUpdatedAt: + return m.UpdatedAt() + case idempotencyrecord.FieldScope: + return m.Scope() + case idempotencyrecord.FieldIdempotencyKeyHash: + return m.IdempotencyKeyHash() + case idempotencyrecord.FieldRequestFingerprint: + return m.RequestFingerprint() + case idempotencyrecord.FieldStatus: + return m.Status() + case idempotencyrecord.FieldResponseStatus: + return m.ResponseStatus() + case idempotencyrecord.FieldResponseBody: + return m.ResponseBody() + case idempotencyrecord.FieldErrorReason: + return m.ErrorReason() + case idempotencyrecord.FieldLockedUntil: + return m.LockedUntil() + case idempotencyrecord.FieldExpiresAt: + return m.ExpiresAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *IdempotencyRecordMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case idempotencyrecord.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case idempotencyrecord.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case idempotencyrecord.FieldScope: + return m.OldScope(ctx) + case idempotencyrecord.FieldIdempotencyKeyHash: + return m.OldIdempotencyKeyHash(ctx) + case idempotencyrecord.FieldRequestFingerprint: + return m.OldRequestFingerprint(ctx) + case idempotencyrecord.FieldStatus: + return m.OldStatus(ctx) + case idempotencyrecord.FieldResponseStatus: + return m.OldResponseStatus(ctx) + case idempotencyrecord.FieldResponseBody: + return m.OldResponseBody(ctx) + case idempotencyrecord.FieldErrorReason: + return m.OldErrorReason(ctx) + case idempotencyrecord.FieldLockedUntil: + return m.OldLockedUntil(ctx) + case idempotencyrecord.FieldExpiresAt: + return m.OldExpiresAt(ctx) + } + return nil, fmt.Errorf("unknown IdempotencyRecord field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *IdempotencyRecordMutation) SetField(name string, value ent.Value) error { + switch name { + case idempotencyrecord.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case idempotencyrecord.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case idempotencyrecord.FieldScope: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetScope(v) + return nil + case idempotencyrecord.FieldIdempotencyKeyHash: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIdempotencyKeyHash(v) + return nil + case idempotencyrecord.FieldRequestFingerprint: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRequestFingerprint(v) + return nil + case idempotencyrecord.FieldStatus: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + case idempotencyrecord.FieldResponseStatus: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetResponseStatus(v) + return nil + case idempotencyrecord.FieldResponseBody: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetResponseBody(v) + return nil + case idempotencyrecord.FieldErrorReason: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetErrorReason(v) + return nil + case idempotencyrecord.FieldLockedUntil: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetLockedUntil(v) + return nil + case idempotencyrecord.FieldExpiresAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetExpiresAt(v) + return nil + } + return fmt.Errorf("unknown IdempotencyRecord field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *IdempotencyRecordMutation) AddedFields() []string { + var fields []string + if m.addresponse_status != nil { + fields = append(fields, idempotencyrecord.FieldResponseStatus) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *IdempotencyRecordMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case idempotencyrecord.FieldResponseStatus: + return m.AddedResponseStatus() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *IdempotencyRecordMutation) AddField(name string, value ent.Value) error { + switch name { + case idempotencyrecord.FieldResponseStatus: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddResponseStatus(v) + return nil + } + return fmt.Errorf("unknown IdempotencyRecord numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *IdempotencyRecordMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(idempotencyrecord.FieldResponseStatus) { + fields = append(fields, idempotencyrecord.FieldResponseStatus) + } + if m.FieldCleared(idempotencyrecord.FieldResponseBody) { + fields = append(fields, idempotencyrecord.FieldResponseBody) + } + if m.FieldCleared(idempotencyrecord.FieldErrorReason) { + fields = append(fields, idempotencyrecord.FieldErrorReason) + } + if m.FieldCleared(idempotencyrecord.FieldLockedUntil) { + fields = append(fields, idempotencyrecord.FieldLockedUntil) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *IdempotencyRecordMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *IdempotencyRecordMutation) ClearField(name string) error { + switch name { + case idempotencyrecord.FieldResponseStatus: + m.ClearResponseStatus() + return nil + case idempotencyrecord.FieldResponseBody: + m.ClearResponseBody() + return nil + case idempotencyrecord.FieldErrorReason: + m.ClearErrorReason() + return nil + case idempotencyrecord.FieldLockedUntil: + m.ClearLockedUntil() + return nil + } + return fmt.Errorf("unknown IdempotencyRecord nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *IdempotencyRecordMutation) ResetField(name string) error { + switch name { + case idempotencyrecord.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case idempotencyrecord.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case idempotencyrecord.FieldScope: + m.ResetScope() + return nil + case idempotencyrecord.FieldIdempotencyKeyHash: + m.ResetIdempotencyKeyHash() + return nil + case idempotencyrecord.FieldRequestFingerprint: + m.ResetRequestFingerprint() + return nil + case idempotencyrecord.FieldStatus: + m.ResetStatus() + return nil + case idempotencyrecord.FieldResponseStatus: + m.ResetResponseStatus() + return nil + case idempotencyrecord.FieldResponseBody: + m.ResetResponseBody() + return nil + case idempotencyrecord.FieldErrorReason: + m.ResetErrorReason() + return nil + case idempotencyrecord.FieldLockedUntil: + m.ResetLockedUntil() + return nil + case idempotencyrecord.FieldExpiresAt: + m.ResetExpiresAt() + return nil + } + return fmt.Errorf("unknown IdempotencyRecord field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *IdempotencyRecordMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *IdempotencyRecordMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *IdempotencyRecordMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *IdempotencyRecordMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *IdempotencyRecordMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *IdempotencyRecordMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *IdempotencyRecordMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown IdempotencyRecord unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *IdempotencyRecordMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown IdempotencyRecord edge %s", name) +} + // PromoCodeMutation represents an operation that mutates the PromoCode nodes in the graph. type PromoCodeMutation struct { config diff --git a/backend/ent/predicate/predicate.go b/backend/ent/predicate/predicate.go index 584b9606..89d933fc 100644 --- a/backend/ent/predicate/predicate.go +++ b/backend/ent/predicate/predicate.go @@ -27,6 +27,9 @@ type ErrorPassthroughRule func(*sql.Selector) // Group is the predicate function for group builders. type Group func(*sql.Selector) +// IdempotencyRecord is the predicate function for idempotencyrecord builders. +type IdempotencyRecord func(*sql.Selector) + // PromoCode is the predicate function for promocode builders. type PromoCode func(*sql.Selector) diff --git a/backend/ent/runtime/runtime.go b/backend/ent/runtime/runtime.go index ff3f8f26..f038ca0f 100644 --- a/backend/ent/runtime/runtime.go +++ b/backend/ent/runtime/runtime.go @@ -12,6 +12,7 @@ import ( "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule" "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/idempotencyrecord" "github.com/Wei-Shaw/sub2api/ent/promocode" "github.com/Wei-Shaw/sub2api/ent/promocodeusage" "github.com/Wei-Shaw/sub2api/ent/proxy" @@ -418,6 +419,45 @@ func init() { groupDescSortOrder := groupFields[25].Descriptor() // group.DefaultSortOrder holds the default value on creation for the sort_order field. group.DefaultSortOrder = groupDescSortOrder.Default.(int) + // groupDescSimulateClaudeMaxEnabled is the schema descriptor for simulate_claude_max_enabled field. + groupDescSimulateClaudeMaxEnabled := groupFields[26].Descriptor() + // group.DefaultSimulateClaudeMaxEnabled holds the default value on creation for the simulate_claude_max_enabled field. + group.DefaultSimulateClaudeMaxEnabled = groupDescSimulateClaudeMaxEnabled.Default.(bool) + idempotencyrecordMixin := schema.IdempotencyRecord{}.Mixin() + idempotencyrecordMixinFields0 := idempotencyrecordMixin[0].Fields() + _ = idempotencyrecordMixinFields0 + idempotencyrecordFields := schema.IdempotencyRecord{}.Fields() + _ = idempotencyrecordFields + // idempotencyrecordDescCreatedAt is the schema descriptor for created_at field. + idempotencyrecordDescCreatedAt := idempotencyrecordMixinFields0[0].Descriptor() + // idempotencyrecord.DefaultCreatedAt holds the default value on creation for the created_at field. + idempotencyrecord.DefaultCreatedAt = idempotencyrecordDescCreatedAt.Default.(func() time.Time) + // idempotencyrecordDescUpdatedAt is the schema descriptor for updated_at field. + idempotencyrecordDescUpdatedAt := idempotencyrecordMixinFields0[1].Descriptor() + // idempotencyrecord.DefaultUpdatedAt holds the default value on creation for the updated_at field. + idempotencyrecord.DefaultUpdatedAt = idempotencyrecordDescUpdatedAt.Default.(func() time.Time) + // idempotencyrecord.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + idempotencyrecord.UpdateDefaultUpdatedAt = idempotencyrecordDescUpdatedAt.UpdateDefault.(func() time.Time) + // idempotencyrecordDescScope is the schema descriptor for scope field. + idempotencyrecordDescScope := idempotencyrecordFields[0].Descriptor() + // idempotencyrecord.ScopeValidator is a validator for the "scope" field. It is called by the builders before save. + idempotencyrecord.ScopeValidator = idempotencyrecordDescScope.Validators[0].(func(string) error) + // idempotencyrecordDescIdempotencyKeyHash is the schema descriptor for idempotency_key_hash field. + idempotencyrecordDescIdempotencyKeyHash := idempotencyrecordFields[1].Descriptor() + // idempotencyrecord.IdempotencyKeyHashValidator is a validator for the "idempotency_key_hash" field. It is called by the builders before save. + idempotencyrecord.IdempotencyKeyHashValidator = idempotencyrecordDescIdempotencyKeyHash.Validators[0].(func(string) error) + // idempotencyrecordDescRequestFingerprint is the schema descriptor for request_fingerprint field. + idempotencyrecordDescRequestFingerprint := idempotencyrecordFields[2].Descriptor() + // idempotencyrecord.RequestFingerprintValidator is a validator for the "request_fingerprint" field. It is called by the builders before save. + idempotencyrecord.RequestFingerprintValidator = idempotencyrecordDescRequestFingerprint.Validators[0].(func(string) error) + // idempotencyrecordDescStatus is the schema descriptor for status field. + idempotencyrecordDescStatus := idempotencyrecordFields[3].Descriptor() + // idempotencyrecord.StatusValidator is a validator for the "status" field. It is called by the builders before save. + idempotencyrecord.StatusValidator = idempotencyrecordDescStatus.Validators[0].(func(string) error) + // idempotencyrecordDescErrorReason is the schema descriptor for error_reason field. + idempotencyrecordDescErrorReason := idempotencyrecordFields[6].Descriptor() + // idempotencyrecord.ErrorReasonValidator is a validator for the "error_reason" field. It is called by the builders before save. + idempotencyrecord.ErrorReasonValidator = idempotencyrecordDescErrorReason.Validators[0].(func(string) error) promocodeFields := schema.PromoCode{}.Fields() _ = promocodeFields // promocodeDescCode is the schema descriptor for code field. diff --git a/backend/ent/schema/group.go b/backend/ent/schema/group.go index fddf23ce..dafa700a 100644 --- a/backend/ent/schema/group.go +++ b/backend/ent/schema/group.go @@ -33,8 +33,6 @@ func (Group) Mixin() []ent.Mixin { func (Group) Fields() []ent.Field { return []ent.Field{ - // 唯一约束通过部分索引实现(WHERE deleted_at IS NULL),支持软删除后重用 - // 见迁移文件 016_soft_delete_partial_unique_indexes.sql field.String("name"). MaxLen(100). NotEmpty(), @@ -51,7 +49,6 @@ func (Group) Fields() []ent.Field { MaxLen(20). Default(domain.StatusActive), - // Subscription-related fields (added by migration 003) field.String("platform"). MaxLen(50). Default(domain.PlatformAnthropic), @@ -73,7 +70,6 @@ func (Group) Fields() []ent.Field { field.Int("default_validity_days"). Default(30), - // 图片生成计费配置(antigravity 和 gemini 平台使用) field.Float("image_price_1k"). Optional(). Nillable(). @@ -87,7 +83,6 @@ func (Group) Fields() []ent.Field { Nillable(). SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}), - // Sora 按次计费配置(阶段 1) field.Float("sora_image_price_360"). Optional(). Nillable(). @@ -105,45 +100,41 @@ func (Group) Fields() []ent.Field { Nillable(). SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}), - // Claude Code 客户端限制 (added by migration 029) field.Bool("claude_code_only"). Default(false). - Comment("是否仅允许 Claude Code 客户端"), + Comment("allow Claude Code client only"), field.Int64("fallback_group_id"). Optional(). Nillable(). - Comment("非 Claude Code 请求降级使用的分组 ID"), + Comment("fallback group for non-Claude-Code requests"), field.Int64("fallback_group_id_on_invalid_request"). Optional(). Nillable(). - Comment("无效请求兜底使用的分组 ID"), + Comment("fallback group for invalid request"), - // 模型路由配置 (added by migration 040) field.JSON("model_routing", map[string][]int64{}). Optional(). SchemaType(map[string]string{dialect.Postgres: "jsonb"}). - Comment("模型路由配置:模型模式 -> 优先账号ID列表"), - - // 模型路由开关 (added by migration 041) + Comment("model routing config: pattern -> account ids"), field.Bool("model_routing_enabled"). Default(false). - Comment("是否启用模型路由配置"), + Comment("whether model routing is enabled"), - // MCP XML 协议注入开关 (added by migration 042) field.Bool("mcp_xml_inject"). Default(true). - Comment("是否注入 MCP XML 调用协议提示词(仅 antigravity 平台)"), + Comment("whether MCP XML prompt injection is enabled"), - // 支持的模型系列 (added by migration 046) field.JSON("supported_model_scopes", []string{}). Default([]string{"claude", "gemini_text", "gemini_image"}). SchemaType(map[string]string{dialect.Postgres: "jsonb"}). - Comment("支持的模型系列:claude, gemini_text, gemini_image"), + Comment("supported model scopes: claude, gemini_text, gemini_image"), - // 分组排序 (added by migration 052) field.Int("sort_order"). Default(0). - Comment("分组显示排序,数值越小越靠前"), + Comment("group display order, lower comes first"), + field.Bool("simulate_claude_max_enabled"). + Default(false). + Comment("simulate claude usage as claude-max style (1h cache write)"), } } @@ -159,14 +150,11 @@ func (Group) Edges() []ent.Edge { edge.From("allowed_users", User.Type). Ref("allowed_groups"). Through("user_allowed_groups", UserAllowedGroup.Type), - // 注意:fallback_group_id 直接作为字段使用,不定义 edge - // 这样允许多个分组指向同一个降级分组(M2O 关系) } } func (Group) Indexes() []ent.Index { return []ent.Index{ - // name 字段已在 Fields() 中声明 Unique(),无需重复索引 index.Fields("status"), index.Fields("platform"), index.Fields("subscription_type"), diff --git a/backend/ent/tx.go b/backend/ent/tx.go index 4fbe9bb4..cd3b2296 100644 --- a/backend/ent/tx.go +++ b/backend/ent/tx.go @@ -28,6 +28,8 @@ type Tx struct { ErrorPassthroughRule *ErrorPassthroughRuleClient // Group is the client for interacting with the Group builders. Group *GroupClient + // IdempotencyRecord is the client for interacting with the IdempotencyRecord builders. + IdempotencyRecord *IdempotencyRecordClient // PromoCode is the client for interacting with the PromoCode builders. PromoCode *PromoCodeClient // PromoCodeUsage is the client for interacting with the PromoCodeUsage builders. @@ -192,6 +194,7 @@ func (tx *Tx) init() { tx.AnnouncementRead = NewAnnouncementReadClient(tx.config) tx.ErrorPassthroughRule = NewErrorPassthroughRuleClient(tx.config) tx.Group = NewGroupClient(tx.config) + tx.IdempotencyRecord = NewIdempotencyRecordClient(tx.config) tx.PromoCode = NewPromoCodeClient(tx.config) tx.PromoCodeUsage = NewPromoCodeUsageClient(tx.config) tx.Proxy = NewProxyClient(tx.config) diff --git a/backend/go.sum b/backend/go.sum index d1728e48..9e9fc545 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -139,6 +139,8 @@ github.com/icholy/digest v1.1.0 h1:HfGg9Irj7i+IX1o1QAmPfIBNu/Q5A5Tu3n/MED9k9H4= github.com/icholy/digest v1.1.0/go.mod h1:QNrsSGQ5v7v9cReDI0+eyjsXGUoRSUZQHeQ5C4XLa0Y= github.com/imroc/req/v3 v3.57.0 h1:LMTUjNRUybUkTPn8oJDq8Kg3JRBOBTcnDhKu7mzupKI= github.com/imroc/req/v3 v3.57.0/go.mod h1:JL62ey1nvSLq81HORNcosvlf7SxZStONNqOprg0Pz00= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= @@ -174,6 +176,8 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= +github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM= github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mdelapenya/tlscert v0.2.0 h1:7H81W6Z/4weDvZBNOfQte5GpIMo0lGYEeWbkGp5LJHI= @@ -207,6 +211,8 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w= github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= @@ -236,6 +242,8 @@ github.com/refraction-networking/utls v1.8.2 h1:j4Q1gJj0xngdeH+Ox/qND11aEfhpgoEv github.com/refraction-networking/utls v1.8.2/go.mod h1:jkSOEkLqn+S/jtpEHPOsVv/4V4EVnelwbMQl4vCWXAM= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= @@ -258,6 +266,8 @@ github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= diff --git a/backend/internal/handler/admin/group_handler.go b/backend/internal/handler/admin/group_handler.go index 25ff3c96..e7368cb8 100644 --- a/backend/internal/handler/admin/group_handler.go +++ b/backend/internal/handler/admin/group_handler.go @@ -46,9 +46,10 @@ type CreateGroupRequest struct { FallbackGroupID *int64 `json:"fallback_group_id"` FallbackGroupIDOnInvalidRequest *int64 `json:"fallback_group_id_on_invalid_request"` // 模型路由配置(仅 anthropic 平台使用) - ModelRouting map[string][]int64 `json:"model_routing"` - ModelRoutingEnabled bool `json:"model_routing_enabled"` - MCPXMLInject *bool `json:"mcp_xml_inject"` + ModelRouting map[string][]int64 `json:"model_routing"` + ModelRoutingEnabled bool `json:"model_routing_enabled"` + MCPXMLInject *bool `json:"mcp_xml_inject"` + SimulateClaudeMaxEnabled *bool `json:"simulate_claude_max_enabled"` // 支持的模型系列(仅 antigravity 平台使用) SupportedModelScopes []string `json:"supported_model_scopes"` // 从指定分组复制账号(创建后自动绑定) @@ -79,9 +80,10 @@ type UpdateGroupRequest struct { FallbackGroupID *int64 `json:"fallback_group_id"` FallbackGroupIDOnInvalidRequest *int64 `json:"fallback_group_id_on_invalid_request"` // 模型路由配置(仅 anthropic 平台使用) - ModelRouting map[string][]int64 `json:"model_routing"` - ModelRoutingEnabled *bool `json:"model_routing_enabled"` - MCPXMLInject *bool `json:"mcp_xml_inject"` + ModelRouting map[string][]int64 `json:"model_routing"` + ModelRoutingEnabled *bool `json:"model_routing_enabled"` + MCPXMLInject *bool `json:"mcp_xml_inject"` + SimulateClaudeMaxEnabled *bool `json:"simulate_claude_max_enabled"` // 支持的模型系列(仅 antigravity 平台使用) SupportedModelScopes *[]string `json:"supported_model_scopes"` // 从指定分组复制账号(同步操作:先清空当前分组的账号绑定,再绑定源分组的账号) @@ -197,6 +199,7 @@ func (h *GroupHandler) Create(c *gin.Context) { ModelRouting: req.ModelRouting, ModelRoutingEnabled: req.ModelRoutingEnabled, MCPXMLInject: req.MCPXMLInject, + SimulateClaudeMaxEnabled: req.SimulateClaudeMaxEnabled, SupportedModelScopes: req.SupportedModelScopes, CopyAccountsFromGroupIDs: req.CopyAccountsFromGroupIDs, }) @@ -247,6 +250,7 @@ func (h *GroupHandler) Update(c *gin.Context) { ModelRouting: req.ModelRouting, ModelRoutingEnabled: req.ModelRoutingEnabled, MCPXMLInject: req.MCPXMLInject, + SimulateClaudeMaxEnabled: req.SimulateClaudeMaxEnabled, SupportedModelScopes: req.SupportedModelScopes, CopyAccountsFromGroupIDs: req.CopyAccountsFromGroupIDs, }) diff --git a/backend/internal/handler/dto/mappers.go b/backend/internal/handler/dto/mappers.go index 42ff4a84..cc481279 100644 --- a/backend/internal/handler/dto/mappers.go +++ b/backend/internal/handler/dto/mappers.go @@ -111,13 +111,14 @@ func GroupFromServiceAdmin(g *service.Group) *AdminGroup { return nil } out := &AdminGroup{ - Group: groupFromServiceBase(g), - ModelRouting: g.ModelRouting, - ModelRoutingEnabled: g.ModelRoutingEnabled, - MCPXMLInject: g.MCPXMLInject, - SupportedModelScopes: g.SupportedModelScopes, - AccountCount: g.AccountCount, - SortOrder: g.SortOrder, + Group: groupFromServiceBase(g), + ModelRouting: g.ModelRouting, + ModelRoutingEnabled: g.ModelRoutingEnabled, + MCPXMLInject: g.MCPXMLInject, + SimulateClaudeMaxEnabled: g.SimulateClaudeMaxEnabled, + SupportedModelScopes: g.SupportedModelScopes, + AccountCount: g.AccountCount, + SortOrder: g.SortOrder, } if len(g.AccountGroups) > 0 { out.AccountGroups = make([]AccountGroup, 0, len(g.AccountGroups)) diff --git a/backend/internal/handler/dto/types.go b/backend/internal/handler/dto/types.go index 0cd1b241..e99d9587 100644 --- a/backend/internal/handler/dto/types.go +++ b/backend/internal/handler/dto/types.go @@ -95,6 +95,8 @@ type AdminGroup struct { // MCP XML 协议注入(仅 antigravity 平台使用) MCPXMLInject bool `json:"mcp_xml_inject"` + // Claude usage 模拟开关(仅管理员可见) + SimulateClaudeMaxEnabled bool `json:"simulate_claude_max_enabled"` // 支持的模型系列(仅 antigravity 平台使用) SupportedModelScopes []string `json:"supported_model_scopes"` diff --git a/backend/internal/handler/gateway_handler.go b/backend/internal/handler/gateway_handler.go index fe40e9d2..103bd086 100644 --- a/backend/internal/handler/gateway_handler.go +++ b/backend/internal/handler/gateway_handler.go @@ -405,6 +405,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { h.submitUsageRecordTask(func(ctx context.Context) { if err := h.gatewayService.RecordUsage(ctx, &service.RecordUsageInput{ Result: result, + ParsedRequest: parsedReq, APIKey: apiKey, User: apiKey.User, Account: account, @@ -631,6 +632,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { h.submitUsageRecordTask(func(ctx context.Context) { if err := h.gatewayService.RecordUsage(ctx, &service.RecordUsageInput{ Result: result, + ParsedRequest: parsedReq, APIKey: currentAPIKey, User: currentAPIKey.User, Account: account, diff --git a/backend/internal/repository/api_key_repo.go b/backend/internal/repository/api_key_repo.go index cdccd4fc..2b4a0e5b 100644 --- a/backend/internal/repository/api_key_repo.go +++ b/backend/internal/repository/api_key_repo.go @@ -152,6 +152,7 @@ func (r *apiKeyRepository) GetByKeyForAuth(ctx context.Context, key string) (*se group.FieldModelRoutingEnabled, group.FieldModelRouting, group.FieldMcpXMLInject, + group.FieldSimulateClaudeMaxEnabled, group.FieldSupportedModelScopes, ) }). @@ -493,6 +494,7 @@ func groupEntityToService(g *dbent.Group) *service.Group { ModelRouting: g.ModelRouting, ModelRoutingEnabled: g.ModelRoutingEnabled, MCPXMLInject: g.McpXMLInject, + SimulateClaudeMaxEnabled: g.SimulateClaudeMaxEnabled, SupportedModelScopes: g.SupportedModelScopes, SortOrder: g.SortOrder, CreatedAt: g.CreatedAt, diff --git a/backend/internal/repository/group_repo.go b/backend/internal/repository/group_repo.go index fd239996..9dffc4b9 100644 --- a/backend/internal/repository/group_repo.go +++ b/backend/internal/repository/group_repo.go @@ -56,7 +56,8 @@ func (r *groupRepository) Create(ctx context.Context, groupIn *service.Group) er SetNillableFallbackGroupID(groupIn.FallbackGroupID). SetNillableFallbackGroupIDOnInvalidRequest(groupIn.FallbackGroupIDOnInvalidRequest). SetModelRoutingEnabled(groupIn.ModelRoutingEnabled). - SetMcpXMLInject(groupIn.MCPXMLInject) + SetMcpXMLInject(groupIn.MCPXMLInject). + SetSimulateClaudeMaxEnabled(groupIn.SimulateClaudeMaxEnabled) // 设置模型路由配置 if groupIn.ModelRouting != nil { @@ -121,7 +122,8 @@ func (r *groupRepository) Update(ctx context.Context, groupIn *service.Group) er SetDefaultValidityDays(groupIn.DefaultValidityDays). SetClaudeCodeOnly(groupIn.ClaudeCodeOnly). SetModelRoutingEnabled(groupIn.ModelRoutingEnabled). - SetMcpXMLInject(groupIn.MCPXMLInject) + SetMcpXMLInject(groupIn.MCPXMLInject). + SetSimulateClaudeMaxEnabled(groupIn.SimulateClaudeMaxEnabled) // 处理 FallbackGroupID:nil 时清除,否则设置 if groupIn.FallbackGroupID != nil { diff --git a/backend/internal/service/admin_service.go b/backend/internal/service/admin_service.go index 47339661..8404e8d3 100644 --- a/backend/internal/service/admin_service.go +++ b/backend/internal/service/admin_service.go @@ -130,9 +130,10 @@ type CreateGroupInput struct { // 无效请求兜底分组 ID(仅 anthropic 平台使用) FallbackGroupIDOnInvalidRequest *int64 // 模型路由配置(仅 anthropic 平台使用) - ModelRouting map[string][]int64 - ModelRoutingEnabled bool // 是否启用模型路由 - MCPXMLInject *bool + ModelRouting map[string][]int64 + ModelRoutingEnabled bool // 是否启用模型路由 + MCPXMLInject *bool + SimulateClaudeMaxEnabled *bool // 支持的模型系列(仅 antigravity 平台使用) SupportedModelScopes []string // 从指定分组复制账号(创建分组后在同一事务内绑定) @@ -164,9 +165,10 @@ type UpdateGroupInput struct { // 无效请求兜底分组 ID(仅 anthropic 平台使用) FallbackGroupIDOnInvalidRequest *int64 // 模型路由配置(仅 anthropic 平台使用) - ModelRouting map[string][]int64 - ModelRoutingEnabled *bool // 是否启用模型路由 - MCPXMLInject *bool + ModelRouting map[string][]int64 + ModelRoutingEnabled *bool // 是否启用模型路由 + MCPXMLInject *bool + SimulateClaudeMaxEnabled *bool // 支持的模型系列(仅 antigravity 平台使用) SupportedModelScopes *[]string // 从指定分组复制账号(同步操作:先清空当前分组的账号绑定,再绑定源分组的账号) @@ -763,6 +765,13 @@ func (s *adminServiceImpl) CreateGroup(ctx context.Context, input *CreateGroupIn if input.MCPXMLInject != nil { mcpXMLInject = *input.MCPXMLInject } + simulateClaudeMaxEnabled := false + if input.SimulateClaudeMaxEnabled != nil { + if platform != PlatformAnthropic && *input.SimulateClaudeMaxEnabled { + return nil, fmt.Errorf("simulate_claude_max_enabled only supported for anthropic groups") + } + simulateClaudeMaxEnabled = *input.SimulateClaudeMaxEnabled + } // 如果指定了复制账号的源分组,先获取账号 ID 列表 var accountIDsToCopy []int64 @@ -819,6 +828,7 @@ func (s *adminServiceImpl) CreateGroup(ctx context.Context, input *CreateGroupIn FallbackGroupIDOnInvalidRequest: fallbackOnInvalidRequest, ModelRouting: input.ModelRouting, MCPXMLInject: mcpXMLInject, + SimulateClaudeMaxEnabled: simulateClaudeMaxEnabled, SupportedModelScopes: input.SupportedModelScopes, } if err := s.groupRepo.Create(ctx, group); err != nil { @@ -1024,6 +1034,15 @@ func (s *adminServiceImpl) UpdateGroup(ctx context.Context, id int64, input *Upd if input.MCPXMLInject != nil { group.MCPXMLInject = *input.MCPXMLInject } + if input.SimulateClaudeMaxEnabled != nil { + if group.Platform != PlatformAnthropic && *input.SimulateClaudeMaxEnabled { + return nil, fmt.Errorf("simulate_claude_max_enabled only supported for anthropic groups") + } + group.SimulateClaudeMaxEnabled = *input.SimulateClaudeMaxEnabled + } + if group.Platform != PlatformAnthropic { + group.SimulateClaudeMaxEnabled = false + } // 支持的模型系列(仅 antigravity 平台使用) if input.SupportedModelScopes != nil { diff --git a/backend/internal/service/admin_service_group_test.go b/backend/internal/service/admin_service_group_test.go index ef77a980..0e6fe084 100644 --- a/backend/internal/service/admin_service_group_test.go +++ b/backend/internal/service/admin_service_group_test.go @@ -785,3 +785,57 @@ func TestAdminService_UpdateGroup_InvalidRequestFallbackAllowsAntigravity(t *tes require.NotNil(t, repo.updated) require.Equal(t, fallbackID, *repo.updated.FallbackGroupIDOnInvalidRequest) } + +func TestAdminService_CreateGroup_SimulateClaudeMaxRequiresAnthropic(t *testing.T) { + repo := &groupRepoStubForAdmin{} + svc := &adminServiceImpl{groupRepo: repo} + + enabled := true + _, err := svc.CreateGroup(context.Background(), &CreateGroupInput{ + Name: "openai-group", + Platform: PlatformOpenAI, + SimulateClaudeMaxEnabled: &enabled, + }) + require.Error(t, err) + require.Contains(t, err.Error(), "simulate_claude_max_enabled only supported for anthropic groups") + require.Nil(t, repo.created) +} + +func TestAdminService_UpdateGroup_SimulateClaudeMaxRequiresAnthropic(t *testing.T) { + existingGroup := &Group{ + ID: 1, + Name: "openai-group", + Platform: PlatformOpenAI, + Status: StatusActive, + } + repo := &groupRepoStubForAdmin{getByID: existingGroup} + svc := &adminServiceImpl{groupRepo: repo} + + enabled := true + _, err := svc.UpdateGroup(context.Background(), 1, &UpdateGroupInput{ + SimulateClaudeMaxEnabled: &enabled, + }) + require.Error(t, err) + require.Contains(t, err.Error(), "simulate_claude_max_enabled only supported for anthropic groups") + require.Nil(t, repo.updated) +} + +func TestAdminService_UpdateGroup_ClearsSimulateClaudeMaxWhenPlatformChanges(t *testing.T) { + existingGroup := &Group{ + ID: 1, + Name: "anthropic-group", + Platform: PlatformAnthropic, + Status: StatusActive, + SimulateClaudeMaxEnabled: true, + } + repo := &groupRepoStubForAdmin{getByID: existingGroup} + svc := &adminServiceImpl{groupRepo: repo} + + group, err := svc.UpdateGroup(context.Background(), 1, &UpdateGroupInput{ + Platform: PlatformOpenAI, + }) + require.NoError(t, err) + require.NotNil(t, group) + require.NotNil(t, repo.updated) + require.False(t, repo.updated.SimulateClaudeMaxEnabled) +} diff --git a/backend/internal/service/api_key_auth_cache.go b/backend/internal/service/api_key_auth_cache.go index 4240be23..4b736903 100644 --- a/backend/internal/service/api_key_auth_cache.go +++ b/backend/internal/service/api_key_auth_cache.go @@ -54,9 +54,10 @@ type APIKeyAuthGroupSnapshot struct { // Model routing is used by gateway account selection, so it must be part of auth cache snapshot. // Only anthropic groups use these fields; others may leave them empty. - ModelRouting map[string][]int64 `json:"model_routing,omitempty"` - ModelRoutingEnabled bool `json:"model_routing_enabled"` - MCPXMLInject bool `json:"mcp_xml_inject"` + ModelRouting map[string][]int64 `json:"model_routing,omitempty"` + ModelRoutingEnabled bool `json:"model_routing_enabled"` + MCPXMLInject bool `json:"mcp_xml_inject"` + SimulateClaudeMaxEnabled bool `json:"simulate_claude_max_enabled"` // 支持的模型系列(仅 antigravity 平台使用) SupportedModelScopes []string `json:"supported_model_scopes,omitempty"` diff --git a/backend/internal/service/api_key_auth_cache_impl.go b/backend/internal/service/api_key_auth_cache_impl.go index 77a75674..3614d2e6 100644 --- a/backend/internal/service/api_key_auth_cache_impl.go +++ b/backend/internal/service/api_key_auth_cache_impl.go @@ -241,6 +241,7 @@ func (s *APIKeyService) snapshotFromAPIKey(apiKey *APIKey) *APIKeyAuthSnapshot { ModelRouting: apiKey.Group.ModelRouting, ModelRoutingEnabled: apiKey.Group.ModelRoutingEnabled, MCPXMLInject: apiKey.Group.MCPXMLInject, + SimulateClaudeMaxEnabled: apiKey.Group.SimulateClaudeMaxEnabled, SupportedModelScopes: apiKey.Group.SupportedModelScopes, } } @@ -295,6 +296,7 @@ func (s *APIKeyService) snapshotToAPIKey(key string, snapshot *APIKeyAuthSnapsho ModelRouting: snapshot.Group.ModelRouting, ModelRoutingEnabled: snapshot.Group.ModelRoutingEnabled, MCPXMLInject: snapshot.Group.MCPXMLInject, + SimulateClaudeMaxEnabled: snapshot.Group.SimulateClaudeMaxEnabled, SupportedModelScopes: snapshot.Group.SupportedModelScopes, } } diff --git a/backend/internal/service/claude_max_simulation_test.go b/backend/internal/service/claude_max_simulation_test.go new file mode 100644 index 00000000..8f4690a0 --- /dev/null +++ b/backend/internal/service/claude_max_simulation_test.go @@ -0,0 +1,92 @@ +package service + +import "testing" + +func TestProjectUsageToClaudeMax1H_Conservation(t *testing.T) { + usage := &ClaudeUsage{ + InputTokens: 1200, + CacheCreationInputTokens: 0, + CacheCreation5mTokens: 0, + CacheCreation1hTokens: 0, + } + parsed := &ParsedRequest{ + Model: "claude-sonnet-4-5", + Messages: []any{ + map[string]any{ + "role": "user", + "content": "请帮我总结这段代码并给出优化建议", + }, + }, + } + + changed := projectUsageToClaudeMax1H(usage, parsed) + if !changed { + t.Fatalf("expected usage to be projected") + } + + total := usage.InputTokens + usage.CacheCreation5mTokens + usage.CacheCreation1hTokens + if total != 1200 { + t.Fatalf("total tokens changed: got=%d want=%d", total, 1200) + } + if usage.CacheCreation5mTokens != 0 { + t.Fatalf("cache_creation_5m should be 0, got=%d", usage.CacheCreation5mTokens) + } + if usage.InputTokens <= 0 || usage.InputTokens >= 1200 { + t.Fatalf("simulated input out of range, got=%d", usage.InputTokens) + } + if usage.CacheCreation1hTokens <= 0 { + t.Fatalf("cache_creation_1h should be > 0, got=%d", usage.CacheCreation1hTokens) + } + if usage.CacheCreationInputTokens != usage.CacheCreation1hTokens { + t.Fatalf("cache_creation_input_tokens mismatch: got=%d want=%d", usage.CacheCreationInputTokens, usage.CacheCreation1hTokens) + } +} + +func TestComputeClaudeMaxSimulatedInputTokens_Deterministic(t *testing.T) { + parsed := &ParsedRequest{ + Model: "claude-opus-4-5", + Messages: []any{ + map[string]any{ + "role": "user", + "content": []any{ + map[string]any{"type": "text", "text": "请整理以下日志并定位错误根因"}, + map[string]any{"type": "tool_use", "name": "grep_logs"}, + }, + }, + }, + } + + got1 := computeClaudeMaxSimulatedInputTokens(4096, parsed) + got2 := computeClaudeMaxSimulatedInputTokens(4096, parsed) + if got1 != got2 { + t.Fatalf("non-deterministic input tokens: %d != %d", got1, got2) + } +} + +func TestShouldSimulateClaudeMaxUsage(t *testing.T) { + group := &Group{ + Platform: PlatformAnthropic, + SimulateClaudeMaxEnabled: true, + } + input := &RecordUsageInput{ + Result: &ForwardResult{ + Model: "claude-sonnet-4-5", + Usage: ClaudeUsage{ + InputTokens: 3000, + CacheCreationInputTokens: 0, + CacheCreation5mTokens: 0, + CacheCreation1hTokens: 0, + }, + }, + APIKey: &APIKey{Group: group}, + } + + if !shouldSimulateClaudeMaxUsage(input) { + t.Fatalf("expected simulate=true for claude group without cache creation") + } + + input.Result.Usage.CacheCreationInputTokens = 100 + if shouldSimulateClaudeMaxUsage(input) { + t.Fatalf("expected simulate=false when cache creation already exists") + } +} diff --git a/backend/internal/service/gateway_record_usage_claude_max_test.go b/backend/internal/service/gateway_record_usage_claude_max_test.go new file mode 100644 index 00000000..a4c8850c --- /dev/null +++ b/backend/internal/service/gateway_record_usage_claude_max_test.go @@ -0,0 +1,140 @@ +package service + +import ( + "context" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/stretchr/testify/require" +) + +type usageLogRepoRecordUsageStub struct { + UsageLogRepository + + last *UsageLog + inserted bool + err error +} + +func (s *usageLogRepoRecordUsageStub) Create(_ context.Context, log *UsageLog) (bool, error) { + copied := *log + s.last = &copied + return s.inserted, s.err +} + +func newGatewayServiceForRecordUsageTest(repo UsageLogRepository) *GatewayService { + return &GatewayService{ + usageLogRepo: repo, + billingService: NewBillingService(&config.Config{}, nil), + cfg: &config.Config{RunMode: config.RunModeSimple}, + deferredService: &DeferredService{}, + } +} + +func TestRecordUsage_SimulateClaudeMaxEnabled_ProjectsAndSkipsTTLOverride(t *testing.T) { + repo := &usageLogRepoRecordUsageStub{inserted: true} + svc := newGatewayServiceForRecordUsageTest(repo) + + groupID := int64(11) + input := &RecordUsageInput{ + Result: &ForwardResult{ + RequestID: "req-sim-1", + Model: "claude-sonnet-4", + Duration: time.Second, + Usage: ClaudeUsage{ + InputTokens: 160, + }, + }, + ParsedRequest: &ParsedRequest{ + Model: "claude-sonnet-4", + Messages: []any{ + map[string]any{ + "role": "user", + "content": "please summarize the logs and provide root cause analysis", + }, + }, + }, + APIKey: &APIKey{ + ID: 1, + GroupID: &groupID, + Group: &Group{ + ID: groupID, + Platform: PlatformAnthropic, + RateMultiplier: 1, + SimulateClaudeMaxEnabled: true, + }, + }, + User: &User{ID: 2}, + Account: &Account{ + ID: 3, + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + Extra: map[string]any{ + "cache_ttl_override_enabled": true, + "cache_ttl_override_target": "5m", + }, + }, + } + + err := svc.RecordUsage(context.Background(), input) + require.NoError(t, err) + require.NotNil(t, repo.last) + + log := repo.last + total := log.InputTokens + log.CacheCreation5mTokens + log.CacheCreation1hTokens + require.Equal(t, 160, total, "token 总量应保持不变") + require.Greater(t, log.CacheCreation1hTokens, 0, "应映射为 1h cache creation") + require.Equal(t, 0, log.CacheCreation5mTokens, "模拟成功后不应再被 TTL override 改写为 5m") + require.Equal(t, log.CacheCreation1hTokens, log.CacheCreationTokens, "聚合 cache_creation_tokens 应与 1h 一致") + require.False(t, log.CacheTTLOverridden, "模拟成功时应跳过 TTL override 标记") +} + +func TestRecordUsage_SimulateClaudeMaxDisabled_AppliesTTLOverride(t *testing.T) { + repo := &usageLogRepoRecordUsageStub{inserted: true} + svc := newGatewayServiceForRecordUsageTest(repo) + + groupID := int64(12) + input := &RecordUsageInput{ + Result: &ForwardResult{ + RequestID: "req-sim-2", + Model: "claude-sonnet-4", + Duration: time.Second, + Usage: ClaudeUsage{ + InputTokens: 40, + CacheCreationInputTokens: 120, + CacheCreation1hTokens: 120, + }, + }, + APIKey: &APIKey{ + ID: 2, + GroupID: &groupID, + Group: &Group{ + ID: groupID, + Platform: PlatformAnthropic, + RateMultiplier: 1, + SimulateClaudeMaxEnabled: false, + }, + }, + User: &User{ID: 3}, + Account: &Account{ + ID: 4, + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + Extra: map[string]any{ + "cache_ttl_override_enabled": true, + "cache_ttl_override_target": "5m", + }, + }, + } + + err := svc.RecordUsage(context.Background(), input) + require.NoError(t, err) + require.NotNil(t, repo.last) + + log := repo.last + require.Equal(t, 120, log.CacheCreationTokens) + require.Equal(t, 120, log.CacheCreation5mTokens, "关闭模拟时应执行 TTL override 到 5m") + require.Equal(t, 0, log.CacheCreation1hTokens) + require.True(t, log.CacheTTLOverridden, "TTL override 生效时应打标") +} diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 5c14e7f9..32fda175 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -56,6 +56,15 @@ const ( claudeMimicDebugInfoKey = "claude_mimic_debug_info" ) +const ( + claudeMaxSimInputMinTokens = 8 + claudeMaxSimInputMaxTokens = 96 + claudeMaxSimBaseOverheadTokens = 8 + claudeMaxSimPerBlockOverhead = 2 + claudeMaxSimSummaryMaxRunes = 160 + claudeMaxSimContextDivisor = 16 +) + // ForceCacheBillingContextKey 强制缓存计费上下文键 // 用于粘性会话切换时,将 input_tokens 转为 cache_read_input_tokens 计费 type forceCacheBillingKeyType struct{} @@ -5566,9 +5575,228 @@ func (s *GatewayService) getUserGroupRateMultiplier(ctx context.Context, userID, return multiplier } +func isClaudeFamilyModel(model string) bool { + normalized := strings.ToLower(strings.TrimSpace(claude.NormalizeModelID(model))) + if normalized == "" { + return false + } + return strings.Contains(normalized, "claude-") +} + +func shouldSimulateClaudeMaxUsage(input *RecordUsageInput) bool { + if input == nil || input.Result == nil || input.APIKey == nil || input.APIKey.Group == nil { + return false + } + group := input.APIKey.Group + if !group.SimulateClaudeMaxEnabled || group.Platform != PlatformAnthropic { + return false + } + + model := input.Result.Model + if model == "" && input.ParsedRequest != nil { + model = input.ParsedRequest.Model + } + if !isClaudeFamilyModel(model) { + return false + } + + usage := input.Result.Usage + if usage.InputTokens <= 0 { + return false + } + if usage.CacheCreationInputTokens > 0 || usage.CacheCreation5mTokens > 0 || usage.CacheCreation1hTokens > 0 { + return false + } + return true +} + +func applyClaudeMaxUsageSimulation(result *ForwardResult, parsed *ParsedRequest) bool { + if result == nil { + return false + } + return projectUsageToClaudeMax1H(&result.Usage, parsed) +} + +func projectUsageToClaudeMax1H(usage *ClaudeUsage, parsed *ParsedRequest) bool { + if usage == nil { + return false + } + totalWindowTokens := usage.InputTokens + usage.CacheCreation5mTokens + usage.CacheCreation1hTokens + if totalWindowTokens <= 1 { + return false + } + + simulatedInputTokens := computeClaudeMaxSimulatedInputTokens(totalWindowTokens, parsed) + if simulatedInputTokens <= 0 { + simulatedInputTokens = 1 + } + if simulatedInputTokens >= totalWindowTokens { + simulatedInputTokens = totalWindowTokens - 1 + } + + cacheCreation1hTokens := totalWindowTokens - simulatedInputTokens + if usage.InputTokens == simulatedInputTokens && + usage.CacheCreation5mTokens == 0 && + usage.CacheCreation1hTokens == cacheCreation1hTokens && + usage.CacheCreationInputTokens == cacheCreation1hTokens { + return false + } + + usage.InputTokens = simulatedInputTokens + usage.CacheCreation5mTokens = 0 + usage.CacheCreation1hTokens = cacheCreation1hTokens + usage.CacheCreationInputTokens = cacheCreation1hTokens + return true +} + +func computeClaudeMaxSimulatedInputTokens(totalWindowTokens int, parsed *ParsedRequest) int { + if totalWindowTokens <= 1 { + return totalWindowTokens + } + + summary, blockCount := extractTailUserMessageSummary(parsed) + if blockCount <= 0 { + blockCount = 1 + } + + asciiChars := 0 + nonASCIIChars := 0 + for _, r := range summary { + if r <= 127 { + asciiChars++ + continue + } + nonASCIIChars++ + } + + lexicalTokens := nonASCIIChars + if asciiChars > 0 { + lexicalTokens += (asciiChars + 3) / 4 + } + wordCount := len(strings.Fields(summary)) + if wordCount > lexicalTokens { + lexicalTokens = wordCount + } + if lexicalTokens == 0 { + lexicalTokens = 1 + } + + structuralTokens := claudeMaxSimBaseOverheadTokens + blockCount*claudeMaxSimPerBlockOverhead + rawInputTokens := structuralTokens + lexicalTokens + + maxInputTokens := clampInt(totalWindowTokens/claudeMaxSimContextDivisor, claudeMaxSimInputMinTokens, claudeMaxSimInputMaxTokens) + if totalWindowTokens <= claudeMaxSimInputMinTokens+1 { + maxInputTokens = totalWindowTokens - 1 + } + if maxInputTokens <= 0 { + return totalWindowTokens + } + + minInputTokens := 1 + if totalWindowTokens > claudeMaxSimInputMinTokens+1 { + minInputTokens = claudeMaxSimInputMinTokens + } + return clampInt(rawInputTokens, minInputTokens, maxInputTokens) +} + +func extractTailUserMessageSummary(parsed *ParsedRequest) (string, int) { + if parsed == nil || len(parsed.Messages) == 0 { + return "", 1 + } + for i := len(parsed.Messages) - 1; i >= 0; i-- { + message, ok := parsed.Messages[i].(map[string]any) + if !ok { + continue + } + role, _ := message["role"].(string) + if !strings.EqualFold(strings.TrimSpace(role), "user") { + continue + } + summary, blockCount := summarizeUserContentBlocks(message["content"]) + if blockCount <= 0 { + blockCount = 1 + } + return summary, blockCount + } + return "", 1 +} + +func summarizeUserContentBlocks(content any) (string, int) { + appendSegment := func(segments []string, raw string) []string { + normalized := strings.Join(strings.Fields(strings.TrimSpace(raw)), " ") + if normalized == "" { + return segments + } + return append(segments, normalized) + } + + switch value := content.(type) { + case string: + return trimClaudeMaxSummary(value), 1 + case []any: + if len(value) == 0 { + return "", 1 + } + segments := make([]string, 0, len(value)) + for _, blockRaw := range value { + block, ok := blockRaw.(map[string]any) + if !ok { + continue + } + blockType, _ := block["type"].(string) + switch blockType { + case "text": + if text, ok := block["text"].(string); ok { + segments = appendSegment(segments, text) + } + case "tool_result": + nestedSummary, _ := summarizeUserContentBlocks(block["content"]) + segments = appendSegment(segments, nestedSummary) + case "tool_use": + if name, ok := block["name"].(string); ok { + segments = appendSegment(segments, name) + } + default: + if text, ok := block["text"].(string); ok { + segments = appendSegment(segments, text) + } + } + } + return trimClaudeMaxSummary(strings.Join(segments, " ")), len(value) + default: + return "", 1 + } +} + +func trimClaudeMaxSummary(summary string) string { + normalized := strings.Join(strings.Fields(strings.TrimSpace(summary)), " ") + if normalized == "" { + return "" + } + runes := []rune(normalized) + if len(runes) > claudeMaxSimSummaryMaxRunes { + return string(runes[:claudeMaxSimSummaryMaxRunes]) + } + return normalized +} + +func clampInt(v, minValue, maxValue int) int { + if minValue > maxValue { + return minValue + } + if v < minValue { + return minValue + } + if v > maxValue { + return maxValue + } + return v +} + // RecordUsageInput 记录使用量的输入参数 type RecordUsageInput struct { Result *ForwardResult + ParsedRequest *ParsedRequest APIKey *APIKey User *User Account *Account @@ -5601,9 +5829,25 @@ func (s *GatewayService) RecordUsage(ctx context.Context, input *RecordUsageInpu result.Usage.InputTokens = 0 } + // Claude 分组模拟:将无写缓存 usage 映射为 claude-max 风格的 1h cache creation。 + simulatedClaudeMax := false + if shouldSimulateClaudeMaxUsage(input) { + beforeInputTokens := result.Usage.InputTokens + simulatedClaudeMax = applyClaudeMaxUsageSimulation(result, input.ParsedRequest) + if simulatedClaudeMax { + logger.LegacyPrintf("service.gateway", "simulate_claude_max_usage: model=%s account=%d input_tokens:%d->%d cache_creation_1h=%d", + result.Model, + account.ID, + beforeInputTokens, + result.Usage.InputTokens, + result.Usage.CacheCreation1hTokens, + ) + } + } + // Cache TTL Override: 确保计费时 token 分类与账号设置一致 cacheTTLOverridden := false - if account.IsCacheTTLOverrideEnabled() { + if account.IsCacheTTLOverrideEnabled() && !simulatedClaudeMax { applyCacheTTLOverride(&result.Usage, account.GetCacheTTLOverrideTarget()) cacheTTLOverridden = (result.Usage.CacheCreation5mTokens + result.Usage.CacheCreation1hTokens) > 0 } diff --git a/backend/internal/service/group.go b/backend/internal/service/group.go index 86ece03f..ba06a52d 100644 --- a/backend/internal/service/group.go +++ b/backend/internal/service/group.go @@ -47,6 +47,9 @@ type Group struct { // MCP XML 协议注入开关(仅 antigravity 平台使用) MCPXMLInject bool + // Claude usage 模拟开关:将无写缓存 usage 模拟为 claude-max 风格 + SimulateClaudeMaxEnabled bool + // 支持的模型系列(仅 antigravity 平台使用) // 可选值: claude, gemini_text, gemini_image SupportedModelScopes []string diff --git a/backend/migrations/060_add_group_simulate_claude_max.sql b/backend/migrations/060_add_group_simulate_claude_max.sql new file mode 100644 index 00000000..55662dfd --- /dev/null +++ b/backend/migrations/060_add_group_simulate_claude_max.sql @@ -0,0 +1,3 @@ +ALTER TABLE groups + ADD COLUMN IF NOT EXISTS simulate_claude_max_enabled BOOLEAN NOT NULL DEFAULT FALSE; + diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index cdd9ad19..794c4d4b 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -1191,6 +1191,14 @@ export default { enabled: 'Enabled', disabled: 'Disabled' }, + claudeMaxSimulation: { + title: 'Claude Max Usage Simulation', + tooltip: + 'When enabled, for Claude models without upstream cache-write usage, the system deterministically maps tokens to a small input plus 1h cache creation while keeping total tokens unchanged.', + enabled: 'Enabled (simulate 1h cache)', + disabled: 'Disabled', + hint: 'Only token categories in usage billing logs are adjusted. No per-request mapping state is persisted.' + }, supportedScopes: { title: 'Supported Model Families', tooltip: 'Select the model families this group supports. Unchecked families will not be routed to this group.', diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index 8ef50267..837af3a4 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -1280,6 +1280,14 @@ export default { enabled: '已启用', disabled: '已禁用' }, + claudeMaxSimulation: { + title: 'Claude Max 用量模拟', + tooltip: + '启用后,针对 Claude 模型且上游未返回写缓存时,系统会按确定性算法把输入 token 映射为少量 input,并将其余归入 1h cache creation,保持总 token 不变。', + enabled: '已启用(模拟 1h 缓存)', + disabled: '已禁用', + hint: '仅影响 usage 计费记录中的 token 分类,不保存请求级映射状态。' + }, supportedScopes: { title: '支持的模型系列', tooltip: '选择此分组支持的模型系列。未勾选的系列将不会被路由到此分组。', diff --git a/frontend/src/types/index.ts b/frontend/src/types/index.ts index a54cfcef..aa7fb0be 100644 --- a/frontend/src/types/index.ts +++ b/frontend/src/types/index.ts @@ -378,6 +378,8 @@ export interface AdminGroup extends Group { // MCP XML 协议注入(仅 antigravity 平台使用) mcp_xml_inject: boolean + // Claude usage 模拟开关(仅 anthropic 平台使用) + simulate_claude_max_enabled: boolean // 支持的模型系列(仅 antigravity 平台使用) supported_model_scopes?: string[] @@ -449,6 +451,7 @@ export interface CreateGroupRequest { fallback_group_id?: number | null fallback_group_id_on_invalid_request?: number | null mcp_xml_inject?: boolean + simulate_claude_max_enabled?: boolean supported_model_scopes?: string[] // 从指定分组复制账号 copy_accounts_from_group_ids?: number[] @@ -476,6 +479,7 @@ export interface UpdateGroupRequest { fallback_group_id?: number | null fallback_group_id_on_invalid_request?: number | null mcp_xml_inject?: boolean + simulate_claude_max_enabled?: boolean supported_model_scopes?: string[] copy_accounts_from_group_ids?: number[] } diff --git a/frontend/src/views/admin/GroupsView.vue b/frontend/src/views/admin/GroupsView.vue index 4d6dccf6..016762e4 100644 --- a/frontend/src/views/admin/GroupsView.vue +++ b/frontend/src/views/admin/GroupsView.vue @@ -691,6 +691,58 @@
+ +
+
+ +
+ +
+
+

+ {{ t('admin.groups.claudeMaxSimulation.tooltip') }} +

+
+
+
+
+
+
+ + + {{ + createForm.simulate_claude_max_enabled + ? t('admin.groups.claudeMaxSimulation.enabled') + : t('admin.groups.claudeMaxSimulation.disabled') + }} + +
+

+ {{ t('admin.groups.claudeMaxSimulation.hint') }} +

+
+
+ +
+
+ +
+ +
+
+

+ {{ t('admin.groups.claudeMaxSimulation.tooltip') }} +

+
+
+
+
+
+
+ + + {{ + editForm.simulate_claude_max_enabled + ? t('admin.groups.claudeMaxSimulation.enabled') + : t('admin.groups.claudeMaxSimulation.disabled') + }} + +
+

+ {{ t('admin.groups.claudeMaxSimulation.hint') }} +

+
+
{ createForm.sora_video_price_per_request = null createForm.sora_video_price_per_request_hd = null createForm.claude_code_only = false + createForm.simulate_claude_max_enabled = false createForm.fallback_group_id = null createForm.fallback_group_id_on_invalid_request = null createForm.supported_model_scopes = ['claude', 'gemini_text', 'gemini_image'] @@ -2239,6 +2348,8 @@ const handleCreateGroup = async () => { // 构建请求数据,包含模型路由配置 const requestData = { ...createForm, + simulate_claude_max_enabled: + createForm.platform === 'anthropic' ? createForm.simulate_claude_max_enabled : false, model_routing: convertRoutingRulesToApiFormat(createModelRoutingRules.value) } await adminAPI.groups.create(requestData) @@ -2278,6 +2389,7 @@ const handleEdit = async (group: AdminGroup) => { editForm.sora_video_price_per_request = group.sora_video_price_per_request editForm.sora_video_price_per_request_hd = group.sora_video_price_per_request_hd editForm.claude_code_only = group.claude_code_only || false + editForm.simulate_claude_max_enabled = group.simulate_claude_max_enabled || false editForm.fallback_group_id = group.fallback_group_id editForm.fallback_group_id_on_invalid_request = group.fallback_group_id_on_invalid_request editForm.model_routing_enabled = group.model_routing_enabled || false @@ -2297,6 +2409,7 @@ const closeEditModal = () => { showEditModal.value = false editingGroup.value = null editModelRoutingRules.value = [] + editForm.simulate_claude_max_enabled = false editForm.copy_accounts_from_group_ids = [] } @@ -2312,6 +2425,8 @@ const handleUpdateGroup = async () => { // 转换 fallback_group_id: null -> 0 (后端使用 0 表示清除) const payload = { ...editForm, + simulate_claude_max_enabled: + editForm.platform === 'anthropic' ? editForm.simulate_claude_max_enabled : false, fallback_group_id: editForm.fallback_group_id === null ? 0 : editForm.fallback_group_id, fallback_group_id_on_invalid_request: editForm.fallback_group_id_on_invalid_request === null @@ -2368,6 +2483,21 @@ watch( if (!['anthropic', 'antigravity'].includes(newVal)) { createForm.fallback_group_id_on_invalid_request = null } + if (newVal !== 'anthropic') { + createForm.simulate_claude_max_enabled = false + } + } +) + +watch( + () => editForm.platform, + (newVal) => { + if (!['anthropic', 'antigravity'].includes(newVal)) { + editForm.fallback_group_id_on_invalid_request = null + } + if (newVal !== 'anthropic') { + editForm.simulate_claude_max_enabled = false + } } ) From f4d3fadd6fbc074b024ac850a60ea750587fbd07 Mon Sep 17 00:00:00 2001 From: erio Date: Fri, 27 Feb 2026 01:55:25 +0800 Subject: [PATCH 093/175] chore: bump version to 0.1.86.2 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index 8e56ec6d..9adbe2fa 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.86.1 +0.1.86.2 From 756b09b6b812c0acf9b606e959eb68ae31d405a0 Mon Sep 17 00:00:00 2001 From: erio Date: Fri, 27 Feb 2026 09:30:44 +0800 Subject: [PATCH 094/175] feat: replace gemini-3-pro-image with gemini-3.1-flash-image - Add migration 060 to update model_mapping for all antigravity accounts - Remove gemini-3-pro-image and gemini-3-pro-image-preview mappings - Add gemini-3.1-flash-image and gemini-3.1-flash-image-preview mappings - Update frontend usage window to show GImage for new model - Update isImageGenerationModel to support new model --- backend/internal/domain/constants.go | 6 ++- .../service/antigravity_gateway_service.go | 7 ++- backend/internal/service/gateway_service.go | 2 +- ..._gemini31_flash_image_to_model_mapping.sql | 46 +++++++++++++++++++ .../account/AccountStatusIndicator.vue | 1 + .../components/account/AccountUsageCell.vue | 4 +- frontend/src/components/keys/UseKeyModal.vue | 2 +- frontend/src/composables/useModelWhitelist.ts | 2 +- frontend/src/i18n/locales/en.ts | 4 +- frontend/src/i18n/locales/zh.ts | 4 +- 10 files changed, 65 insertions(+), 13 deletions(-) create mode 100644 backend/migrations/060_add_gemini31_flash_image_to_model_mapping.sql diff --git a/backend/internal/domain/constants.go b/backend/internal/domain/constants.go index c41aa65f..2c9ddfb9 100644 --- a/backend/internal/domain/constants.go +++ b/backend/internal/domain/constants.go @@ -92,16 +92,18 @@ var DefaultAntigravityModelMapping = map[string]string{ "gemini-3-flash": "gemini-3-flash", "gemini-3-pro-high": "gemini-3-pro-high", "gemini-3-pro-low": "gemini-3-pro-low", - "gemini-3-pro-image": "gemini-3-pro-image", // Gemini 3 preview 映射 "gemini-3-flash-preview": "gemini-3-flash", "gemini-3-pro-preview": "gemini-3-pro-high", - "gemini-3-pro-image-preview": "gemini-3-pro-image", // Gemini 3.1 白名单 "gemini-3.1-pro-high": "gemini-3.1-pro-high", "gemini-3.1-pro-low": "gemini-3.1-pro-low", // Gemini 3.1 preview 映射 "gemini-3.1-pro-preview": "gemini-3.1-pro-high", + // Gemini 3.1 image 白名单 + "gemini-3.1-flash-image": "gemini-3.1-flash-image", + // Gemini 3.1 image preview 映射 + "gemini-3.1-flash-image-preview": "gemini-3.1-flash-image", // 其他官方模型 "gpt-oss-120b-medium": "gpt-oss-120b-medium", "tab_flash_lite_preview": "tab_flash_lite_preview", diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 108ff9ab..2bd6195a 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -3757,14 +3757,17 @@ func (s *AntigravityGatewayService) extractImageSize(body []byte) string { } // isImageGenerationModel 判断模型是否为图片生成模型 -// 支持的模型:gemini-3-pro-image, gemini-3-pro-image-preview, gemini-2.5-flash-image 等 +// 支持的模型:gemini-3.1-flash-image, gemini-3-pro-image, gemini-2.5-flash-image 等 func isImageGenerationModel(model string) bool { modelLower := strings.ToLower(model) // 移除 models/ 前缀 modelLower = strings.TrimPrefix(modelLower, "models/") // 精确匹配或前缀匹配 - return modelLower == "gemini-3-pro-image" || + return modelLower == "gemini-3.1-flash-image" || + modelLower == "gemini-3.1-flash-image-preview" || + strings.HasPrefix(modelLower, "gemini-3.1-flash-image-") || + modelLower == "gemini-3-pro-image" || modelLower == "gemini-3-pro-image-preview" || strings.HasPrefix(modelLower, "gemini-3-pro-image-") || modelLower == "gemini-2.5-flash-image" || diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 5c14e7f9..23ace552 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -470,7 +470,7 @@ type ForwardResult struct { FirstTokenMs *int // 首字时间(流式请求) ClientDisconnect bool // 客户端是否在流式传输过程中断开 - // 图片生成计费字段(仅 gemini-3-pro-image 使用) + // 图片生成计费字段(图片生成模型使用) ImageCount int // 生成的图片数量 ImageSize string // 图片尺寸 "1K", "2K", "4K" diff --git a/backend/migrations/060_add_gemini31_flash_image_to_model_mapping.sql b/backend/migrations/060_add_gemini31_flash_image_to_model_mapping.sql new file mode 100644 index 00000000..de9d5776 --- /dev/null +++ b/backend/migrations/060_add_gemini31_flash_image_to_model_mapping.sql @@ -0,0 +1,46 @@ +-- Add gemini-3.1-flash-image and gemini-3.1-flash-image-preview to model_mapping +-- +-- Background: +-- Antigravity now supports gemini-3.1-flash-image as the latest image generation model, +-- replacing the previous gemini-3-pro-image. +-- +-- Strategy: +-- Directly overwrite the entire model_mapping with updated mappings +-- This ensures consistency with DefaultAntigravityModelMapping in constants.go + +UPDATE accounts +SET credentials = jsonb_set( + credentials, + '{model_mapping}', + '{ + "claude-opus-4-6-thinking": "claude-opus-4-6-thinking", + "claude-opus-4-6": "claude-opus-4-6-thinking", + "claude-opus-4-5-thinking": "claude-opus-4-6-thinking", + "claude-opus-4-5-20251101": "claude-opus-4-6-thinking", + "claude-sonnet-4-6": "claude-sonnet-4-6", + "claude-sonnet-4-5": "claude-sonnet-4-5", + "claude-sonnet-4-5-thinking": "claude-sonnet-4-5-thinking", + "claude-sonnet-4-5-20250929": "claude-sonnet-4-5", + "claude-haiku-4-5": "claude-sonnet-4-5", + "claude-haiku-4-5-20251001": "claude-sonnet-4-5", + "gemini-2.5-flash": "gemini-2.5-flash", + "gemini-2.5-flash-lite": "gemini-2.5-flash-lite", + "gemini-2.5-flash-thinking": "gemini-2.5-flash-thinking", + "gemini-2.5-pro": "gemini-2.5-pro", + "gemini-3-flash": "gemini-3-flash", + "gemini-3-pro-high": "gemini-3-pro-high", + "gemini-3-pro-low": "gemini-3-pro-low", + "gemini-3-flash-preview": "gemini-3-flash", + "gemini-3-pro-preview": "gemini-3-pro-high", + "gemini-3.1-pro-high": "gemini-3.1-pro-high", + "gemini-3.1-pro-low": "gemini-3.1-pro-low", + "gemini-3.1-pro-preview": "gemini-3.1-pro-high", + "gemini-3.1-flash-image": "gemini-3.1-flash-image", + "gemini-3.1-flash-image-preview": "gemini-3.1-flash-image", + "gpt-oss-120b-medium": "gpt-oss-120b-medium", + "tab_flash_lite_preview": "tab_flash_lite_preview" + }'::jsonb +) +WHERE platform = 'antigravity' + AND deleted_at IS NULL + AND credentials->'model_mapping' IS NOT NULL; diff --git a/frontend/src/components/account/AccountStatusIndicator.vue b/frontend/src/components/account/AccountStatusIndicator.vue index 93de5959..45e75e92 100644 --- a/frontend/src/components/account/AccountStatusIndicator.vue +++ b/frontend/src/components/account/AccountStatusIndicator.vue @@ -180,6 +180,7 @@ const formatScopeName = (scope: string): string => { 'gemini-3.1-pro-high': 'G3PH', 'gemini-3.1-pro-low': 'G3PL', 'gemini-3-pro-image': 'G3PI', + 'gemini-3.1-flash-image': 'GImage', // 其他 'gpt-oss-120b-medium': 'GPT120', 'tab_flash_lite_preview': 'TabFL', diff --git a/frontend/src/components/account/AccountUsageCell.vue b/frontend/src/components/account/AccountUsageCell.vue index b47b4115..9ff58d64 100644 --- a/frontend/src/components/account/AccountUsageCell.vue +++ b/frontend/src/components/account/AccountUsageCell.vue @@ -397,8 +397,8 @@ const antigravity3ProUsageFromAPI = computed(() => // Gemini 3 Flash from API const antigravity3FlashUsageFromAPI = computed(() => getAntigravityUsageFromAPI(['gemini-3-flash'])) -// Gemini 3 Image from API -const antigravity3ImageUsageFromAPI = computed(() => getAntigravityUsageFromAPI(['gemini-3-pro-image'])) +// Gemini Image from API +const antigravity3ImageUsageFromAPI = computed(() => getAntigravityUsageFromAPI(['gemini-3.1-flash-image'])) // Claude from API (all Claude model variants) const antigravityClaudeUsageFromAPI = computed(() => diff --git a/frontend/src/components/keys/UseKeyModal.vue b/frontend/src/components/keys/UseKeyModal.vue index fc97fe90..abe4e255 100644 --- a/frontend/src/components/keys/UseKeyModal.vue +++ b/frontend/src/components/keys/UseKeyModal.vue @@ -575,7 +575,7 @@ function generateOpenCodeConfig(platform: string, baseUrl: string, apiKey: strin 'gemini-3-pro-low': { name: 'Gemini 3 Pro Low' }, 'gemini-3-pro-high': { name: 'Gemini 3 Pro High' }, 'gemini-3-pro-preview': { name: 'Gemini 3 Pro Preview' }, - 'gemini-3-pro-image': { name: 'Gemini 3 Pro Image' } + 'gemini-3.1-flash-image': { name: 'Gemini 3.1 Flash Image' } } const claudeModels = { 'claude-opus-4-5-thinking': { name: 'Claude Opus 4.5 Thinking' }, diff --git a/frontend/src/composables/useModelWhitelist.ts b/frontend/src/composables/useModelWhitelist.ts index ddc5661b..aa1ba3d2 100644 --- a/frontend/src/composables/useModelWhitelist.ts +++ b/frontend/src/composables/useModelWhitelist.ts @@ -88,10 +88,10 @@ const antigravityModels = [ 'gemini-3-flash', 'gemini-3-pro-high', 'gemini-3-pro-low', - 'gemini-3-pro-image', // Gemini 3.1 系列 'gemini-3.1-pro-high', 'gemini-3.1-pro-low', + 'gemini-3.1-flash-image', // 其他 'gpt-oss-120b-medium', 'tab_flash_lite_preview' diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index cdd9ad19..45071c1d 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -1133,7 +1133,7 @@ export default { }, imagePricing: { title: 'Image Generation Pricing', - description: 'Configure pricing for gemini-3-pro-image model. Leave empty to use default prices.' + description: 'Configure pricing for image generation models. Leave empty to use default prices.' }, soraPricing: { title: 'Sora Per-Request Pricing', @@ -2046,7 +2046,7 @@ export default { geminiFlashDaily: 'Flash', gemini3Pro: 'G3P', gemini3Flash: 'G3F', - gemini3Image: 'G3I', + gemini3Image: 'GImage', claude: 'Claude' }, tier: { diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index 8ef50267..02383cc1 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -1220,7 +1220,7 @@ export default { }, imagePricing: { title: '图片生成计费', - description: '配置 gemini-3-pro-image 模型的图片生成价格,留空则使用默认价格' + description: '配置图片生成模型的图片生成价格,留空则使用默认价格' }, soraPricing: { title: 'Sora 按次计费', @@ -1582,7 +1582,7 @@ export default { geminiFlashDaily: 'Flash', gemini3Pro: 'G3P', gemini3Flash: 'G3F', - gemini3Image: 'G3I', + gemini3Image: 'GImage', claude: 'Claude' }, tier: { From 3d1520212410a64a564a88735e826c12ea88d04f Mon Sep 17 00:00:00 2001 From: erio Date: Fri, 27 Feb 2026 09:30:58 +0800 Subject: [PATCH 095/175] chore: bump version to 0.1.86.3 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index 8e56ec6d..91c269e5 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.86.1 +0.1.86.3 From 396044e3549c0cb5bb5bd5957fcaacc3aa85a65c Mon Sep 17 00:00:00 2001 From: erio Date: Fri, 27 Feb 2026 09:36:23 +0800 Subject: [PATCH 096/175] fix: gofmt alignment in constants.go --- backend/internal/domain/constants.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/backend/internal/domain/constants.go b/backend/internal/domain/constants.go index 2c9ddfb9..d56dfa86 100644 --- a/backend/internal/domain/constants.go +++ b/backend/internal/domain/constants.go @@ -89,12 +89,12 @@ var DefaultAntigravityModelMapping = map[string]string{ "gemini-2.5-flash-thinking": "gemini-2.5-flash-thinking", "gemini-2.5-pro": "gemini-2.5-pro", // Gemini 3 白名单 - "gemini-3-flash": "gemini-3-flash", - "gemini-3-pro-high": "gemini-3-pro-high", - "gemini-3-pro-low": "gemini-3-pro-low", + "gemini-3-flash": "gemini-3-flash", + "gemini-3-pro-high": "gemini-3-pro-high", + "gemini-3-pro-low": "gemini-3-pro-low", // Gemini 3 preview 映射 - "gemini-3-flash-preview": "gemini-3-flash", - "gemini-3-pro-preview": "gemini-3-pro-high", + "gemini-3-flash-preview": "gemini-3-flash", + "gemini-3-pro-preview": "gemini-3-pro-high", // Gemini 3.1 白名单 "gemini-3.1-pro-high": "gemini-3.1-pro-high", "gemini-3.1-pro-low": "gemini-3.1-pro-low", From 6da2f54e50db0723bdb847d763d0767ca7079c49 Mon Sep 17 00:00:00 2001 From: erio Date: Fri, 27 Feb 2026 12:18:22 +0800 Subject: [PATCH 097/175] refactor: decouple claude max cache policy and add tokenizer --- backend/go.mod | 3 + backend/go.sum | 6 + .../claude_max_cache_billing_policy.go | 500 ++++++++++++++++++ .../service/claude_max_simulation_test.go | 82 ++- backend/internal/service/claude_tokenizer.go | 41 ++ .../gateway_record_usage_claude_max_test.go | 64 ++- backend/internal/service/gateway_service.go | 251 +-------- 7 files changed, 695 insertions(+), 252 deletions(-) create mode 100644 backend/internal/service/claude_max_cache_billing_policy.go create mode 100644 backend/internal/service/claude_tokenizer.go diff --git a/backend/go.mod b/backend/go.mod index ec3cf509..70b675fa 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -59,6 +59,7 @@ require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/distribution/reference v0.6.0 // indirect + github.com/dlclark/regexp2 v1.10.0 // indirect github.com/docker/docker v28.5.1+incompatible // indirect github.com/docker/go-connections v0.6.0 // indirect github.com/docker/go-units v0.5.0 // indirect @@ -109,6 +110,8 @@ require ( github.com/opencontainers/image-spec v1.1.1 // indirect github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/pkoukk/tiktoken-go v0.1.8 // indirect + github.com/pkoukk/tiktoken-go-loader v0.0.2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/quic-go/qpack v0.6.0 // indirect diff --git a/backend/go.sum b/backend/go.sum index 9e9fc545..6b4c2f7c 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -64,6 +64,8 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/dlclark/regexp2 v1.10.0 h1:+/GIL799phkJqYW+3YbOd8LCcbHzT0Pbo8zl70MHsq0= +github.com/dlclark/regexp2 v1.10.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/docker/docker v28.5.1+incompatible h1:Bm8DchhSD2J6PsFzxC35TZo4TLGR2PdW/E69rU45NhM= github.com/docker/docker v28.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= @@ -223,6 +225,10 @@ github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6 github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkoukk/tiktoken-go v0.1.8 h1:85ENo+3FpWgAACBaEUVp+lctuTcYUO7BtmfhlN/QTRo= +github.com/pkoukk/tiktoken-go v0.1.8/go.mod h1:9NiV+i9mJKGj1rYOT+njbv+ZwA/zJxYdewGl6qVatpg= +github.com/pkoukk/tiktoken-go-loader v0.0.2 h1:LUKws63GV3pVHwH1srkBplBv+7URgmOmhSkRxsIvsK4= +github.com/pkoukk/tiktoken-go-loader v0.0.2/go.mod h1:4mIkYyZooFlnenDlormIo6cd5wrlUKNr97wp9nGgEKo= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= diff --git a/backend/internal/service/claude_max_cache_billing_policy.go b/backend/internal/service/claude_max_cache_billing_policy.go new file mode 100644 index 00000000..5f2e2def --- /dev/null +++ b/backend/internal/service/claude_max_cache_billing_policy.go @@ -0,0 +1,500 @@ +package service + +import ( + "encoding/json" + "strings" + + "github.com/Wei-Shaw/sub2api/internal/pkg/claude" + "github.com/Wei-Shaw/sub2api/internal/pkg/logger" + "github.com/tidwall/gjson" +) + +type claudeMaxCacheBillingOutcome struct { + Simulated bool + ForcedCache1H bool +} + +func applyClaudeMaxCacheBillingPolicy(input *RecordUsageInput) claudeMaxCacheBillingOutcome { + var out claudeMaxCacheBillingOutcome + if !shouldApplyClaudeMaxBillingRules(input) { + return out + } + + if input == nil || input.Result == nil { + return out + } + result := input.Result + usage := &result.Usage + accountID := int64(0) + if input.Account != nil { + accountID = input.Account.ID + } + + if hasCacheCreationTokens(*usage) { + before5m := usage.CacheCreation5mTokens + before1h := usage.CacheCreation1hTokens + out.ForcedCache1H = safelyForceCacheCreationTo1H(usage) + if out.ForcedCache1H { + logger.LegacyPrintf("service.gateway", "force_claude_max_cache_1h: model=%s account=%d cache_creation_5m:%d->%d cache_creation_1h:%d->%d", + result.Model, + accountID, + before5m, + usage.CacheCreation5mTokens, + before1h, + usage.CacheCreation1hTokens, + ) + } + return out + } + + if !shouldSimulateClaudeMaxUsage(input) { + return out + } + beforeInputTokens := usage.InputTokens + out.Simulated = safelyApplyClaudeMaxUsageSimulation(result, input.ParsedRequest) + if out.Simulated { + logger.LegacyPrintf("service.gateway", "simulate_claude_max_usage: model=%s account=%d input_tokens:%d->%d cache_creation_1h=%d", + result.Model, + accountID, + beforeInputTokens, + usage.InputTokens, + usage.CacheCreation1hTokens, + ) + } + return out +} + +func isClaudeFamilyModel(model string) bool { + normalized := strings.ToLower(strings.TrimSpace(claude.NormalizeModelID(model))) + if normalized == "" { + return false + } + return strings.Contains(normalized, "claude-") +} + +func shouldApplyClaudeMaxBillingRules(input *RecordUsageInput) bool { + if input == nil || input.Result == nil || input.APIKey == nil || input.APIKey.Group == nil { + return false + } + group := input.APIKey.Group + if !group.SimulateClaudeMaxEnabled || group.Platform != PlatformAnthropic { + return false + } + + model := input.Result.Model + if model == "" && input.ParsedRequest != nil { + model = input.ParsedRequest.Model + } + if !isClaudeFamilyModel(model) { + return false + } + return true +} + +func hasCacheCreationTokens(usage ClaudeUsage) bool { + return usage.CacheCreationInputTokens > 0 || usage.CacheCreation5mTokens > 0 || usage.CacheCreation1hTokens > 0 +} + +func shouldSimulateClaudeMaxUsage(input *RecordUsageInput) bool { + if !shouldApplyClaudeMaxBillingRules(input) { + return false + } + if !hasClaudeCacheSignals(input.ParsedRequest) { + return false + } + usage := input.Result.Usage + if usage.InputTokens <= 0 { + return false + } + if hasCacheCreationTokens(usage) { + return false + } + return true +} + +func forceCacheCreationTo1H(usage *ClaudeUsage) bool { + if usage == nil || !hasCacheCreationTokens(*usage) { + return false + } + + before5m := usage.CacheCreation5mTokens + before1h := usage.CacheCreation1hTokens + beforeAgg := usage.CacheCreationInputTokens + + _ = applyCacheTTLOverride(usage, "1h") + total := usage.CacheCreation5mTokens + usage.CacheCreation1hTokens + if total <= 0 { + total = usage.CacheCreationInputTokens + } + if total <= 0 { + return false + } + + usage.CacheCreation5mTokens = 0 + usage.CacheCreation1hTokens = total + usage.CacheCreationInputTokens = total + + return before5m != usage.CacheCreation5mTokens || + before1h != usage.CacheCreation1hTokens || + beforeAgg != usage.CacheCreationInputTokens +} + +func safelyApplyClaudeMaxUsageSimulation(result *ForwardResult, parsed *ParsedRequest) (changed bool) { + defer func() { + if r := recover(); r != nil { + logger.LegacyPrintf("service.gateway", "simulate_claude_max_usage skipped: panic=%v", r) + changed = false + } + }() + return applyClaudeMaxUsageSimulation(result, parsed) +} + +func safelyForceCacheCreationTo1H(usage *ClaudeUsage) (changed bool) { + defer func() { + if r := recover(); r != nil { + logger.LegacyPrintf("service.gateway", "force_cache_creation_1h skipped: panic=%v", r) + changed = false + } + }() + return forceCacheCreationTo1H(usage) +} + +func applyClaudeMaxUsageSimulation(result *ForwardResult, parsed *ParsedRequest) bool { + if result == nil { + return false + } + return projectUsageToClaudeMax1H(&result.Usage, parsed) +} + +func projectUsageToClaudeMax1H(usage *ClaudeUsage, parsed *ParsedRequest) bool { + if usage == nil { + return false + } + totalWindowTokens := usage.InputTokens + usage.CacheCreation5mTokens + usage.CacheCreation1hTokens + if totalWindowTokens <= 1 { + return false + } + + simulatedInputTokens := computeClaudeMaxProjectedInputTokens(totalWindowTokens, parsed) + if simulatedInputTokens <= 0 { + simulatedInputTokens = 1 + } + if simulatedInputTokens >= totalWindowTokens { + simulatedInputTokens = totalWindowTokens - 1 + } + + cacheCreation1hTokens := totalWindowTokens - simulatedInputTokens + if usage.InputTokens == simulatedInputTokens && + usage.CacheCreation5mTokens == 0 && + usage.CacheCreation1hTokens == cacheCreation1hTokens && + usage.CacheCreationInputTokens == cacheCreation1hTokens { + return false + } + + usage.InputTokens = simulatedInputTokens + usage.CacheCreation5mTokens = 0 + usage.CacheCreation1hTokens = cacheCreation1hTokens + usage.CacheCreationInputTokens = cacheCreation1hTokens + return true +} + +type claudeCacheProjection struct { + HasBreakpoint bool + BreakpointCount int + TotalEstimatedTokens int + TailEstimatedTokens int +} + +func computeClaudeMaxProjectedInputTokens(totalWindowTokens int, parsed *ParsedRequest) int { + if totalWindowTokens <= 1 { + return totalWindowTokens + } + + projection := analyzeClaudeCacheProjection(parsed) + if !projection.HasBreakpoint || projection.TotalEstimatedTokens <= 0 || projection.TailEstimatedTokens <= 0 { + return totalWindowTokens + } + + totalEstimate := int64(projection.TotalEstimatedTokens) + tailEstimate := int64(projection.TailEstimatedTokens) + if tailEstimate > totalEstimate { + tailEstimate = totalEstimate + } + + scaled := (int64(totalWindowTokens)*tailEstimate + totalEstimate/2) / totalEstimate + if scaled <= 0 { + scaled = 1 + } + if scaled >= int64(totalWindowTokens) { + scaled = int64(totalWindowTokens - 1) + } + return int(scaled) +} + +func hasClaudeCacheSignals(parsed *ParsedRequest) bool { + if parsed == nil { + return false + } + if hasTopLevelEphemeralCacheControl(parsed) { + return true + } + return countExplicitCacheBreakpoints(parsed) > 0 +} + +func hasTopLevelEphemeralCacheControl(parsed *ParsedRequest) bool { + if parsed == nil || len(parsed.Body) == 0 { + return false + } + cacheType := strings.TrimSpace(gjson.GetBytes(parsed.Body, "cache_control.type").String()) + return strings.EqualFold(cacheType, "ephemeral") +} + +func analyzeClaudeCacheProjection(parsed *ParsedRequest) claudeCacheProjection { + var projection claudeCacheProjection + if parsed == nil { + return projection + } + + total := 0 + lastBreakpointAt := -1 + + switch system := parsed.System.(type) { + case string: + total += claudeMaxMessageOverheadTokens + estimateClaudeTextTokens(system) + case []any: + for _, raw := range system { + block, ok := raw.(map[string]any) + if !ok { + total += claudeMaxUnknownContentTokens + continue + } + total += estimateClaudeBlockTokens(block) + if hasEphemeralCacheControl(block) { + lastBreakpointAt = total + projection.BreakpointCount++ + projection.HasBreakpoint = true + } + } + } + + for _, rawMsg := range parsed.Messages { + total += claudeMaxMessageOverheadTokens + msg, ok := rawMsg.(map[string]any) + if !ok { + total += claudeMaxUnknownContentTokens + continue + } + content, exists := msg["content"] + if !exists { + continue + } + msgTokens, msgLastBreak, msgBreakCount := estimateClaudeContentTokens(content) + total += msgTokens + if msgBreakCount > 0 { + lastBreakpointAt = total - msgTokens + msgLastBreak + projection.BreakpointCount += msgBreakCount + projection.HasBreakpoint = true + } + } + + if total <= 0 { + total = 1 + } + projection.TotalEstimatedTokens = total + + if projection.HasBreakpoint && lastBreakpointAt >= 0 { + tail := total - lastBreakpointAt + if tail <= 0 { + tail = 1 + } + projection.TailEstimatedTokens = tail + return projection + } + + if hasTopLevelEphemeralCacheControl(parsed) { + tail := estimateLastUserMessageTokens(parsed) + if tail <= 0 { + tail = 1 + } + projection.HasBreakpoint = true + projection.BreakpointCount = 1 + projection.TailEstimatedTokens = tail + } + return projection +} + +func countExplicitCacheBreakpoints(parsed *ParsedRequest) int { + if parsed == nil { + return 0 + } + total := 0 + if system, ok := parsed.System.([]any); ok { + for _, raw := range system { + if block, ok := raw.(map[string]any); ok && hasEphemeralCacheControl(block) { + total++ + } + } + } + for _, rawMsg := range parsed.Messages { + msg, ok := rawMsg.(map[string]any) + if !ok { + continue + } + content, ok := msg["content"].([]any) + if !ok { + continue + } + for _, raw := range content { + if block, ok := raw.(map[string]any); ok && hasEphemeralCacheControl(block) { + total++ + } + } + } + return total +} + +func hasEphemeralCacheControl(block map[string]any) bool { + if block == nil { + return false + } + raw, ok := block["cache_control"] + if !ok || raw == nil { + return false + } + switch cc := raw.(type) { + case map[string]any: + cacheType, _ := cc["type"].(string) + return strings.EqualFold(strings.TrimSpace(cacheType), "ephemeral") + case map[string]string: + return strings.EqualFold(strings.TrimSpace(cc["type"]), "ephemeral") + default: + return false + } +} + +func estimateClaudeContentTokens(content any) (tokens int, lastBreakAt int, breakpointCount int) { + switch value := content.(type) { + case string: + return estimateClaudeTextTokens(value), -1, 0 + case []any: + total := 0 + lastBreak := -1 + breaks := 0 + for _, raw := range value { + block, ok := raw.(map[string]any) + if !ok { + total += claudeMaxUnknownContentTokens + continue + } + total += estimateClaudeBlockTokens(block) + if hasEphemeralCacheControl(block) { + lastBreak = total + breaks++ + } + } + return total, lastBreak, breaks + default: + return estimateStructuredTokens(value), -1, 0 + } +} + +func estimateClaudeBlockTokens(block map[string]any) int { + if block == nil { + return claudeMaxUnknownContentTokens + } + tokens := claudeMaxBlockOverheadTokens + blockType, _ := block["type"].(string) + switch blockType { + case "text": + if text, ok := block["text"].(string); ok { + tokens += estimateClaudeTextTokens(text) + } + case "tool_result": + if content, ok := block["content"]; ok { + nested, _, _ := estimateClaudeContentTokens(content) + tokens += nested + } + case "tool_use": + if name, ok := block["name"].(string); ok { + tokens += estimateClaudeTextTokens(name) + } + if input, ok := block["input"]; ok { + tokens += estimateStructuredTokens(input) + } + default: + if text, ok := block["text"].(string); ok { + tokens += estimateClaudeTextTokens(text) + } else if content, ok := block["content"]; ok { + nested, _, _ := estimateClaudeContentTokens(content) + tokens += nested + } + } + if tokens <= claudeMaxBlockOverheadTokens { + tokens += claudeMaxUnknownContentTokens + } + return tokens +} + +func estimateLastUserMessageTokens(parsed *ParsedRequest) int { + if parsed == nil || len(parsed.Messages) == 0 { + return 0 + } + for i := len(parsed.Messages) - 1; i >= 0; i-- { + msg, ok := parsed.Messages[i].(map[string]any) + if !ok { + continue + } + role, _ := msg["role"].(string) + if !strings.EqualFold(strings.TrimSpace(role), "user") { + continue + } + tokens, _, _ := estimateClaudeContentTokens(msg["content"]) + return claudeMaxMessageOverheadTokens + tokens + } + return 0 +} + +func estimateStructuredTokens(v any) int { + if v == nil { + return 0 + } + raw, err := json.Marshal(v) + if err != nil { + return claudeMaxUnknownContentTokens + } + return estimateClaudeTextTokens(string(raw)) +} + +func estimateClaudeTextTokens(text string) int { + if tokens, ok := estimateTokensByThirdPartyTokenizer(text); ok { + return tokens + } + return estimateClaudeTextTokensHeuristic(text) +} + +func estimateClaudeTextTokensHeuristic(text string) int { + normalized := strings.Join(strings.Fields(strings.TrimSpace(text)), " ") + if normalized == "" { + return 0 + } + asciiChars := 0 + nonASCIIChars := 0 + for _, r := range normalized { + if r <= 127 { + asciiChars++ + } else { + nonASCIIChars++ + } + } + tokens := nonASCIIChars + if asciiChars > 0 { + tokens += (asciiChars + 3) / 4 + } + if words := len(strings.Fields(normalized)); words > tokens { + tokens = words + } + if tokens <= 0 { + return 1 + } + return tokens +} diff --git a/backend/internal/service/claude_max_simulation_test.go b/backend/internal/service/claude_max_simulation_test.go index 8f4690a0..3d2ae2e6 100644 --- a/backend/internal/service/claude_max_simulation_test.go +++ b/backend/internal/service/claude_max_simulation_test.go @@ -1,6 +1,9 @@ package service -import "testing" +import ( + "strings" + "testing" +) func TestProjectUsageToClaudeMax1H_Conservation(t *testing.T) { usage := &ClaudeUsage{ @@ -13,8 +16,18 @@ func TestProjectUsageToClaudeMax1H_Conservation(t *testing.T) { Model: "claude-sonnet-4-5", Messages: []any{ map[string]any{ - "role": "user", - "content": "请帮我总结这段代码并给出优化建议", + "role": "user", + "content": []any{ + map[string]any{ + "type": "text", + "text": strings.Repeat("cached context ", 200), + "cache_control": map[string]any{"type": "ephemeral"}, + }, + map[string]any{ + "type": "text", + "text": "summarize quickly", + }, + }, }, }, } @@ -34,6 +47,9 @@ func TestProjectUsageToClaudeMax1H_Conservation(t *testing.T) { if usage.InputTokens <= 0 || usage.InputTokens >= 1200 { t.Fatalf("simulated input out of range, got=%d", usage.InputTokens) } + if usage.InputTokens > 100 { + t.Fatalf("simulated input should stay near cache breakpoint tail, got=%d", usage.InputTokens) + } if usage.CacheCreation1hTokens <= 0 { t.Fatalf("cache_creation_1h should be > 0, got=%d", usage.CacheCreation1hTokens) } @@ -42,22 +58,29 @@ func TestProjectUsageToClaudeMax1H_Conservation(t *testing.T) { } } -func TestComputeClaudeMaxSimulatedInputTokens_Deterministic(t *testing.T) { +func TestComputeClaudeMaxProjectedInputTokens_Deterministic(t *testing.T) { parsed := &ParsedRequest{ Model: "claude-opus-4-5", Messages: []any{ map[string]any{ "role": "user", "content": []any{ - map[string]any{"type": "text", "text": "请整理以下日志并定位错误根因"}, - map[string]any{"type": "tool_use", "name": "grep_logs"}, + map[string]any{ + "type": "text", + "text": "build context", + "cache_control": map[string]any{"type": "ephemeral"}, + }, + map[string]any{ + "type": "text", + "text": "what is failing now", + }, }, }, }, } - got1 := computeClaudeMaxSimulatedInputTokens(4096, parsed) - got2 := computeClaudeMaxSimulatedInputTokens(4096, parsed) + got1 := computeClaudeMaxProjectedInputTokens(4096, parsed) + got2 := computeClaudeMaxProjectedInputTokens(4096, parsed) if got1 != got2 { t.Fatalf("non-deterministic input tokens: %d != %d", got1, got2) } @@ -78,13 +101,54 @@ func TestShouldSimulateClaudeMaxUsage(t *testing.T) { CacheCreation1hTokens: 0, }, }, + ParsedRequest: &ParsedRequest{ + Messages: []any{ + map[string]any{ + "role": "user", + "content": []any{ + map[string]any{ + "type": "text", + "text": "cached", + "cache_control": map[string]any{"type": "ephemeral"}, + }, + map[string]any{ + "type": "text", + "text": "tail", + }, + }, + }, + }, + }, APIKey: &APIKey{Group: group}, } if !shouldSimulateClaudeMaxUsage(input) { - t.Fatalf("expected simulate=true for claude group without cache creation") + t.Fatalf("expected simulate=true for claude group with cache signal") } + input.ParsedRequest = &ParsedRequest{ + Messages: []any{ + map[string]any{"role": "user", "content": "no cache signal"}, + }, + } + if shouldSimulateClaudeMaxUsage(input) { + t.Fatalf("expected simulate=false when request has no cache signal") + } + + input.ParsedRequest = &ParsedRequest{ + Messages: []any{ + map[string]any{ + "role": "user", + "content": []any{ + map[string]any{ + "type": "text", + "text": "cached", + "cache_control": map[string]any{"type": "ephemeral"}, + }, + }, + }, + }, + } input.Result.Usage.CacheCreationInputTokens = 100 if shouldSimulateClaudeMaxUsage(input) { t.Fatalf("expected simulate=false when cache creation already exists") diff --git a/backend/internal/service/claude_tokenizer.go b/backend/internal/service/claude_tokenizer.go new file mode 100644 index 00000000..61f5e961 --- /dev/null +++ b/backend/internal/service/claude_tokenizer.go @@ -0,0 +1,41 @@ +package service + +import ( + "sync" + + tiktoken "github.com/pkoukk/tiktoken-go" + tiktokenloader "github.com/pkoukk/tiktoken-go-loader" +) + +var ( + claudeTokenizerOnce sync.Once + claudeTokenizer *tiktoken.Tiktoken +) + +func getClaudeTokenizer() *tiktoken.Tiktoken { + claudeTokenizerOnce.Do(func() { + // Use offline loader to avoid runtime dictionary download. + tiktoken.SetBpeLoader(tiktokenloader.NewOfflineLoader()) + // Use a high-capacity tokenizer as the default approximation for Claude payloads. + enc, err := tiktoken.GetEncoding(tiktoken.MODEL_O200K_BASE) + if err != nil { + enc, err = tiktoken.GetEncoding(tiktoken.MODEL_CL100K_BASE) + } + if err == nil { + claudeTokenizer = enc + } + }) + return claudeTokenizer +} + +func estimateTokensByThirdPartyTokenizer(text string) (int, bool) { + enc := getClaudeTokenizer() + if enc == nil { + return 0, false + } + tokens := len(enc.EncodeOrdinary(text)) + if tokens <= 0 { + return 0, false + } + return tokens, true +} diff --git a/backend/internal/service/gateway_record_usage_claude_max_test.go b/backend/internal/service/gateway_record_usage_claude_max_test.go index a4c8850c..445519f8 100644 --- a/backend/internal/service/gateway_record_usage_claude_max_test.go +++ b/backend/internal/service/gateway_record_usage_claude_max_test.go @@ -50,8 +50,18 @@ func TestRecordUsage_SimulateClaudeMaxEnabled_ProjectsAndSkipsTTLOverride(t *tes Model: "claude-sonnet-4", Messages: []any{ map[string]any{ - "role": "user", - "content": "please summarize the logs and provide root cause analysis", + "role": "user", + "content": []any{ + map[string]any{ + "type": "text", + "text": "long cached context for prior turns", + "cache_control": map[string]any{"type": "ephemeral"}, + }, + map[string]any{ + "type": "text", + "text": "please summarize the logs and provide root cause analysis", + }, + }, }, }, }, @@ -138,3 +148,53 @@ func TestRecordUsage_SimulateClaudeMaxDisabled_AppliesTTLOverride(t *testing.T) require.Equal(t, 0, log.CacheCreation1hTokens) require.True(t, log.CacheTTLOverridden, "TTL override 生效时应打标") } + +func TestRecordUsage_SimulateClaudeMaxEnabled_ExistingCacheCreationForce1H(t *testing.T) { + repo := &usageLogRepoRecordUsageStub{inserted: true} + svc := newGatewayServiceForRecordUsageTest(repo) + + groupID := int64(13) + input := &RecordUsageInput{ + Result: &ForwardResult{ + RequestID: "req-sim-3", + Model: "claude-sonnet-4", + Duration: time.Second, + Usage: ClaudeUsage{ + InputTokens: 20, + CacheCreationInputTokens: 120, + CacheCreation5mTokens: 120, + }, + }, + APIKey: &APIKey{ + ID: 3, + GroupID: &groupID, + Group: &Group{ + ID: groupID, + Platform: PlatformAnthropic, + RateMultiplier: 1, + SimulateClaudeMaxEnabled: true, + }, + }, + User: &User{ID: 4}, + Account: &Account{ + ID: 5, + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + Extra: map[string]any{ + "cache_ttl_override_enabled": true, + "cache_ttl_override_target": "5m", + }, + }, + } + + err := svc.RecordUsage(context.Background(), input) + require.NoError(t, err) + require.NotNil(t, repo.last) + + log := repo.last + require.Equal(t, 20, log.InputTokens, "existing cache creation should not project input tokens") + require.Equal(t, 0, log.CacheCreation5mTokens, "existing cache creation should be forced to 1h") + require.Equal(t, 120, log.CacheCreation1hTokens) + require.Equal(t, 120, log.CacheCreationTokens) + require.True(t, log.CacheTTLOverridden, "force-to-1h should mark cache ttl overridden") +} diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 71d69561..53b1fd28 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -57,12 +57,9 @@ const ( ) const ( - claudeMaxSimInputMinTokens = 8 - claudeMaxSimInputMaxTokens = 96 - claudeMaxSimBaseOverheadTokens = 8 - claudeMaxSimPerBlockOverhead = 2 - claudeMaxSimSummaryMaxRunes = 160 - claudeMaxSimContextDivisor = 16 + claudeMaxMessageOverheadTokens = 3 + claudeMaxBlockOverheadTokens = 1 + claudeMaxUnknownContentTokens = 4 ) // ForceCacheBillingContextKey 强制缓存计费上下文键 @@ -5575,224 +5572,6 @@ func (s *GatewayService) getUserGroupRateMultiplier(ctx context.Context, userID, return multiplier } -func isClaudeFamilyModel(model string) bool { - normalized := strings.ToLower(strings.TrimSpace(claude.NormalizeModelID(model))) - if normalized == "" { - return false - } - return strings.Contains(normalized, "claude-") -} - -func shouldSimulateClaudeMaxUsage(input *RecordUsageInput) bool { - if input == nil || input.Result == nil || input.APIKey == nil || input.APIKey.Group == nil { - return false - } - group := input.APIKey.Group - if !group.SimulateClaudeMaxEnabled || group.Platform != PlatformAnthropic { - return false - } - - model := input.Result.Model - if model == "" && input.ParsedRequest != nil { - model = input.ParsedRequest.Model - } - if !isClaudeFamilyModel(model) { - return false - } - - usage := input.Result.Usage - if usage.InputTokens <= 0 { - return false - } - if usage.CacheCreationInputTokens > 0 || usage.CacheCreation5mTokens > 0 || usage.CacheCreation1hTokens > 0 { - return false - } - return true -} - -func applyClaudeMaxUsageSimulation(result *ForwardResult, parsed *ParsedRequest) bool { - if result == nil { - return false - } - return projectUsageToClaudeMax1H(&result.Usage, parsed) -} - -func projectUsageToClaudeMax1H(usage *ClaudeUsage, parsed *ParsedRequest) bool { - if usage == nil { - return false - } - totalWindowTokens := usage.InputTokens + usage.CacheCreation5mTokens + usage.CacheCreation1hTokens - if totalWindowTokens <= 1 { - return false - } - - simulatedInputTokens := computeClaudeMaxSimulatedInputTokens(totalWindowTokens, parsed) - if simulatedInputTokens <= 0 { - simulatedInputTokens = 1 - } - if simulatedInputTokens >= totalWindowTokens { - simulatedInputTokens = totalWindowTokens - 1 - } - - cacheCreation1hTokens := totalWindowTokens - simulatedInputTokens - if usage.InputTokens == simulatedInputTokens && - usage.CacheCreation5mTokens == 0 && - usage.CacheCreation1hTokens == cacheCreation1hTokens && - usage.CacheCreationInputTokens == cacheCreation1hTokens { - return false - } - - usage.InputTokens = simulatedInputTokens - usage.CacheCreation5mTokens = 0 - usage.CacheCreation1hTokens = cacheCreation1hTokens - usage.CacheCreationInputTokens = cacheCreation1hTokens - return true -} - -func computeClaudeMaxSimulatedInputTokens(totalWindowTokens int, parsed *ParsedRequest) int { - if totalWindowTokens <= 1 { - return totalWindowTokens - } - - summary, blockCount := extractTailUserMessageSummary(parsed) - if blockCount <= 0 { - blockCount = 1 - } - - asciiChars := 0 - nonASCIIChars := 0 - for _, r := range summary { - if r <= 127 { - asciiChars++ - continue - } - nonASCIIChars++ - } - - lexicalTokens := nonASCIIChars - if asciiChars > 0 { - lexicalTokens += (asciiChars + 3) / 4 - } - wordCount := len(strings.Fields(summary)) - if wordCount > lexicalTokens { - lexicalTokens = wordCount - } - if lexicalTokens == 0 { - lexicalTokens = 1 - } - - structuralTokens := claudeMaxSimBaseOverheadTokens + blockCount*claudeMaxSimPerBlockOverhead - rawInputTokens := structuralTokens + lexicalTokens - - maxInputTokens := clampInt(totalWindowTokens/claudeMaxSimContextDivisor, claudeMaxSimInputMinTokens, claudeMaxSimInputMaxTokens) - if totalWindowTokens <= claudeMaxSimInputMinTokens+1 { - maxInputTokens = totalWindowTokens - 1 - } - if maxInputTokens <= 0 { - return totalWindowTokens - } - - minInputTokens := 1 - if totalWindowTokens > claudeMaxSimInputMinTokens+1 { - minInputTokens = claudeMaxSimInputMinTokens - } - return clampInt(rawInputTokens, minInputTokens, maxInputTokens) -} - -func extractTailUserMessageSummary(parsed *ParsedRequest) (string, int) { - if parsed == nil || len(parsed.Messages) == 0 { - return "", 1 - } - for i := len(parsed.Messages) - 1; i >= 0; i-- { - message, ok := parsed.Messages[i].(map[string]any) - if !ok { - continue - } - role, _ := message["role"].(string) - if !strings.EqualFold(strings.TrimSpace(role), "user") { - continue - } - summary, blockCount := summarizeUserContentBlocks(message["content"]) - if blockCount <= 0 { - blockCount = 1 - } - return summary, blockCount - } - return "", 1 -} - -func summarizeUserContentBlocks(content any) (string, int) { - appendSegment := func(segments []string, raw string) []string { - normalized := strings.Join(strings.Fields(strings.TrimSpace(raw)), " ") - if normalized == "" { - return segments - } - return append(segments, normalized) - } - - switch value := content.(type) { - case string: - return trimClaudeMaxSummary(value), 1 - case []any: - if len(value) == 0 { - return "", 1 - } - segments := make([]string, 0, len(value)) - for _, blockRaw := range value { - block, ok := blockRaw.(map[string]any) - if !ok { - continue - } - blockType, _ := block["type"].(string) - switch blockType { - case "text": - if text, ok := block["text"].(string); ok { - segments = appendSegment(segments, text) - } - case "tool_result": - nestedSummary, _ := summarizeUserContentBlocks(block["content"]) - segments = appendSegment(segments, nestedSummary) - case "tool_use": - if name, ok := block["name"].(string); ok { - segments = appendSegment(segments, name) - } - default: - if text, ok := block["text"].(string); ok { - segments = appendSegment(segments, text) - } - } - } - return trimClaudeMaxSummary(strings.Join(segments, " ")), len(value) - default: - return "", 1 - } -} - -func trimClaudeMaxSummary(summary string) string { - normalized := strings.Join(strings.Fields(strings.TrimSpace(summary)), " ") - if normalized == "" { - return "" - } - runes := []rune(normalized) - if len(runes) > claudeMaxSimSummaryMaxRunes { - return string(runes[:claudeMaxSimSummaryMaxRunes]) - } - return normalized -} - -func clampInt(v, minValue, maxValue int) int { - if minValue > maxValue { - return minValue - } - if v < minValue { - return minValue - } - if v > maxValue { - return maxValue - } - return v -} - // RecordUsageInput 记录使用量的输入参数 type RecordUsageInput struct { Result *ForwardResult @@ -5829,25 +5608,15 @@ func (s *GatewayService) RecordUsage(ctx context.Context, input *RecordUsageInpu result.Usage.InputTokens = 0 } - // Claude 分组模拟:将无写缓存 usage 映射为 claude-max 风格的 1h cache creation。 - simulatedClaudeMax := false - if shouldSimulateClaudeMaxUsage(input) { - beforeInputTokens := result.Usage.InputTokens - simulatedClaudeMax = applyClaudeMaxUsageSimulation(result, input.ParsedRequest) - if simulatedClaudeMax { - logger.LegacyPrintf("service.gateway", "simulate_claude_max_usage: model=%s account=%d input_tokens:%d->%d cache_creation_1h=%d", - result.Model, - account.ID, - beforeInputTokens, - result.Usage.InputTokens, - result.Usage.CacheCreation1hTokens, - ) - } - } + // Claude Max cache billing policy (group-level): force existing cache creation to 1h, + // otherwise simulate projection only when request carries cache signals. + claudeMaxOutcome := applyClaudeMaxCacheBillingPolicy(input) + simulatedClaudeMax := claudeMaxOutcome.Simulated + forcedClaudeMax1H := claudeMaxOutcome.ForcedCache1H // Cache TTL Override: 确保计费时 token 分类与账号设置一致 - cacheTTLOverridden := false - if account.IsCacheTTLOverrideEnabled() && !simulatedClaudeMax { + cacheTTLOverridden := forcedClaudeMax1H + if account.IsCacheTTLOverrideEnabled() && !simulatedClaudeMax && !forcedClaudeMax1H { applyCacheTTLOverride(&result.Usage, account.GetCacheTTLOverrideTarget()) cacheTTLOverridden = (result.Usage.CacheCreation5mTokens + result.Usage.CacheCreation1hTokens) > 0 } From 574359f1df282ed3858151907c3dfdf4469eaa20 Mon Sep 17 00:00:00 2001 From: erio Date: Fri, 27 Feb 2026 12:24:02 +0800 Subject: [PATCH 098/175] chore: bump version to 0.1.86.4 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index 91c269e5..d08acc15 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.86.3 +0.1.86.4 From a70d3ff82d66fa8574443eee30c539e6825234d8 Mon Sep 17 00:00:00 2001 From: erio Date: Fri, 27 Feb 2026 12:36:50 +0800 Subject: [PATCH 099/175] fix: update antigravity user-agent version to 1.19.6 --- backend/internal/pkg/antigravity/oauth.go | 4 ++-- backend/internal/pkg/antigravity/oauth_test.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/backend/internal/pkg/antigravity/oauth.go b/backend/internal/pkg/antigravity/oauth.go index 47c75142..afffe9b1 100644 --- a/backend/internal/pkg/antigravity/oauth.go +++ b/backend/internal/pkg/antigravity/oauth.go @@ -49,8 +49,8 @@ const ( antigravityDailyBaseURL = "https://daily-cloudcode-pa.sandbox.googleapis.com" ) -// defaultUserAgentVersion 可通过环境变量 ANTIGRAVITY_USER_AGENT_VERSION 配置,默认 1.18.4 -var defaultUserAgentVersion = "1.18.4" +// defaultUserAgentVersion 可通过环境变量 ANTIGRAVITY_USER_AGENT_VERSION 配置,默认 1.19.6 +var defaultUserAgentVersion = "1.19.6" // defaultClientSecret 可通过环境变量 ANTIGRAVITY_OAUTH_CLIENT_SECRET 配置 var defaultClientSecret = "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" diff --git a/backend/internal/pkg/antigravity/oauth_test.go b/backend/internal/pkg/antigravity/oauth_test.go index 351708a5..8417416a 100644 --- a/backend/internal/pkg/antigravity/oauth_test.go +++ b/backend/internal/pkg/antigravity/oauth_test.go @@ -690,7 +690,7 @@ func TestConstants_值正确(t *testing.T) { if RedirectURI != "http://localhost:8085/callback" { t.Errorf("RedirectURI 不匹配: got %s", RedirectURI) } - if GetUserAgent() != "antigravity/1.18.4 windows/amd64" { + if GetUserAgent() != "antigravity/1.19.6 windows/amd64" { t.Errorf("UserAgent 不匹配: got %s", GetUserAgent()) } if SessionTTL != 30*time.Minute { From d21fe54d5535608285f0e56faf67acacfdc0c014 Mon Sep 17 00:00:00 2001 From: erio Date: Fri, 27 Feb 2026 12:55:05 +0800 Subject: [PATCH 100/175] chore: bump version to 0.1.86.5 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index d08acc15..03b0e2ca 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.86.4 +0.1.86.5 From 0302c0386451f89130145f81fc6872a790a4e036 Mon Sep 17 00:00:00 2001 From: erio Date: Fri, 27 Feb 2026 15:00:18 +0800 Subject: [PATCH 101/175] fix: add 2K image pricing at 1.5x base price --- backend/internal/service/billing_service.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/backend/internal/service/billing_service.go b/backend/internal/service/billing_service.go index a523001c..6abd1e53 100644 --- a/backend/internal/service/billing_service.go +++ b/backend/internal/service/billing_service.go @@ -543,7 +543,10 @@ func (s *BillingService) getDefaultImagePrice(model string, imageSize string) fl basePrice = 0.134 } - // 4K 尺寸翻倍 + // 2K 尺寸 1.5 倍,4K 尺寸翻倍 + if imageSize == "2K" { + return basePrice * 1.5 + } if imageSize == "4K" { return basePrice * 2 } From e71be7e0f1b86bceb2086feea645f44c78ff9b8f Mon Sep 17 00:00:00 2001 From: erio Date: Fri, 27 Feb 2026 15:13:05 +0800 Subject: [PATCH 102/175] fix: update image pricing tests for 2K tier and refactor claude max billing policy - Update 5 test assertions to match new 2K default price ($0.201 = base * 1.5) - Refactor claude max cache billing policy into reusable functions --- .../service/billing_service_image_test.go | 28 ++--- .../claude_max_cache_billing_policy.go | 100 ++++++++++++++++-- 2 files changed, 107 insertions(+), 21 deletions(-) diff --git a/backend/internal/service/billing_service_image_test.go b/backend/internal/service/billing_service_image_test.go index 18a6b74d..59125814 100644 --- a/backend/internal/service/billing_service_image_test.go +++ b/backend/internal/service/billing_service_image_test.go @@ -12,14 +12,14 @@ import ( func TestCalculateImageCost_DefaultPricing(t *testing.T) { svc := &BillingService{} // pricingService 为 nil,使用硬编码默认值 - // 2K 尺寸,默认价格 $0.134 + // 2K 尺寸,默认价格 $0.134 * 1.5 = $0.201 cost := svc.CalculateImageCost("gemini-3-pro-image", "2K", 1, nil, 1.0) - require.InDelta(t, 0.134, cost.TotalCost, 0.0001) - require.InDelta(t, 0.134, cost.ActualCost, 0.0001) + require.InDelta(t, 0.201, cost.TotalCost, 0.0001) + require.InDelta(t, 0.201, cost.ActualCost, 0.0001) // 多张图片 cost = svc.CalculateImageCost("gemini-3-pro-image", "2K", 3, nil, 1.0) - require.InDelta(t, 0.402, cost.TotalCost, 0.0001) + require.InDelta(t, 0.603, cost.TotalCost, 0.0001) } // TestCalculateImageCost_GroupCustomPricing 测试分组自定义价格 @@ -63,13 +63,13 @@ func TestCalculateImageCost_RateMultiplier(t *testing.T) { // 费率倍数 1.5x cost := svc.CalculateImageCost("gemini-3-pro-image", "2K", 1, nil, 1.5) - require.InDelta(t, 0.134, cost.TotalCost, 0.0001) // TotalCost 不变 - require.InDelta(t, 0.201, cost.ActualCost, 0.0001) // ActualCost = 0.134 * 1.5 + require.InDelta(t, 0.201, cost.TotalCost, 0.0001) // TotalCost = 0.134 * 1.5 + require.InDelta(t, 0.3015, cost.ActualCost, 0.0001) // ActualCost = 0.201 * 1.5 // 费率倍数 2.0x cost = svc.CalculateImageCost("gemini-3-pro-image", "2K", 2, nil, 2.0) - require.InDelta(t, 0.268, cost.TotalCost, 0.0001) - require.InDelta(t, 0.536, cost.ActualCost, 0.0001) + require.InDelta(t, 0.402, cost.TotalCost, 0.0001) + require.InDelta(t, 0.804, cost.ActualCost, 0.0001) } // TestCalculateImageCost_ZeroCount 测试 imageCount=0 @@ -95,8 +95,8 @@ func TestCalculateImageCost_ZeroRateMultiplier(t *testing.T) { svc := &BillingService{} cost := svc.CalculateImageCost("gemini-3-pro-image", "2K", 1, nil, 0) - require.InDelta(t, 0.134, cost.TotalCost, 0.0001) - require.InDelta(t, 0.134, cost.ActualCost, 0.0001) // 0 倍率当作 1.0 处理 + require.InDelta(t, 0.201, cost.TotalCost, 0.0001) + require.InDelta(t, 0.201, cost.ActualCost, 0.0001) // 0 倍率当作 1.0 处理 } // TestGetImageUnitPrice_GroupPriorityOverDefault 测试分组价格优先于默认价格 @@ -127,9 +127,9 @@ func TestGetImageUnitPrice_PartialGroupConfig(t *testing.T) { cost := svc.CalculateImageCost("gemini-3-pro-image", "1K", 1, groupConfig, 1.0) require.InDelta(t, 0.10, cost.TotalCost, 0.0001) - // 2K 回退默认价格 $0.134 + // 2K 回退默认价格 $0.201 (1.5倍) cost = svc.CalculateImageCost("gemini-3-pro-image", "2K", 1, groupConfig, 1.0) - require.InDelta(t, 0.134, cost.TotalCost, 0.0001) + require.InDelta(t, 0.201, cost.TotalCost, 0.0001) // 4K 回退默认价格 $0.268 (翻倍) cost = svc.CalculateImageCost("gemini-3-pro-image", "4K", 1, groupConfig, 1.0) @@ -140,10 +140,10 @@ func TestGetImageUnitPrice_PartialGroupConfig(t *testing.T) { func TestGetDefaultImagePrice_FallbackHardcoded(t *testing.T) { svc := &BillingService{} // pricingService 为 nil - // 1K 和 2K 使用相同的默认价格 $0.134 + // 1K 默认价格 $0.134,2K 默认价格 $0.201 (1.5倍) cost := svc.CalculateImageCost("gemini-3-pro-image", "1K", 1, nil, 1.0) require.InDelta(t, 0.134, cost.TotalCost, 0.0001) cost = svc.CalculateImageCost("gemini-3-pro-image", "2K", 1, nil, 1.0) - require.InDelta(t, 0.134, cost.TotalCost, 0.0001) + require.InDelta(t, 0.201, cost.TotalCost, 0.0001) } diff --git a/backend/internal/service/claude_max_cache_billing_policy.go b/backend/internal/service/claude_max_cache_billing_policy.go index 5f2e2def..021d968c 100644 --- a/backend/internal/service/claude_max_cache_billing_policy.go +++ b/backend/internal/service/claude_max_cache_billing_policy.go @@ -64,6 +64,70 @@ func applyClaudeMaxCacheBillingPolicy(input *RecordUsageInput) claudeMaxCacheBil return out } +// detectClaudeMaxCacheBillingOutcomeForUsage only returns whether Claude Max policy +// should influence downstream override decisions. It does not mutate usage. +func detectClaudeMaxCacheBillingOutcomeForUsage(usage ClaudeUsage, parsed *ParsedRequest, group *Group, model string) claudeMaxCacheBillingOutcome { + var out claudeMaxCacheBillingOutcome + if !shouldApplyClaudeMaxBillingRulesForUsage(group, model, parsed) { + return out + } + if hasCacheCreationTokens(usage) { + out.ForcedCache1H = true + return out + } + if shouldSimulateClaudeMaxUsageForUsage(usage, parsed) { + out.Simulated = true + } + return out +} + +func applyClaudeMaxCacheBillingPolicyToUsage(usage *ClaudeUsage, parsed *ParsedRequest, group *Group, model string, accountID int64) claudeMaxCacheBillingOutcome { + var out claudeMaxCacheBillingOutcome + if usage == nil || !shouldApplyClaudeMaxBillingRulesForUsage(group, model, parsed) { + return out + } + + resolvedModel := strings.TrimSpace(model) + if resolvedModel == "" && parsed != nil { + resolvedModel = strings.TrimSpace(parsed.Model) + } + + if hasCacheCreationTokens(*usage) { + before5m := usage.CacheCreation5mTokens + before1h := usage.CacheCreation1hTokens + changed := safelyForceCacheCreationTo1H(usage) + // Even when value is already 1h, still mark forced to skip account TTL override. + out.ForcedCache1H = true + if changed { + logger.LegacyPrintf("service.gateway", "force_claude_max_cache_1h: model=%s account=%d cache_creation_5m:%d->%d cache_creation_1h:%d->%d", + resolvedModel, + accountID, + before5m, + usage.CacheCreation5mTokens, + before1h, + usage.CacheCreation1hTokens, + ) + } + return out + } + + if !shouldSimulateClaudeMaxUsageForUsage(*usage, parsed) { + return out + } + beforeInputTokens := usage.InputTokens + out.Simulated = safelyProjectUsageToClaudeMax1H(usage, parsed) + if out.Simulated { + logger.LegacyPrintf("service.gateway", "simulate_claude_max_usage: model=%s account=%d input_tokens:%d->%d cache_creation_1h=%d", + resolvedModel, + accountID, + beforeInputTokens, + usage.InputTokens, + usage.CacheCreation1hTokens, + ) + } + return out +} + func isClaudeFamilyModel(model string) bool { normalized := strings.ToLower(strings.TrimSpace(claude.NormalizeModelID(model))) if normalized == "" { @@ -76,16 +140,22 @@ func shouldApplyClaudeMaxBillingRules(input *RecordUsageInput) bool { if input == nil || input.Result == nil || input.APIKey == nil || input.APIKey.Group == nil { return false } - group := input.APIKey.Group + return shouldApplyClaudeMaxBillingRulesForUsage(input.APIKey.Group, input.Result.Model, input.ParsedRequest) +} + +func shouldApplyClaudeMaxBillingRulesForUsage(group *Group, model string, parsed *ParsedRequest) bool { + if group == nil { + return false + } if !group.SimulateClaudeMaxEnabled || group.Platform != PlatformAnthropic { return false } - model := input.Result.Model - if model == "" && input.ParsedRequest != nil { - model = input.ParsedRequest.Model + resolvedModel := model + if resolvedModel == "" && parsed != nil { + resolvedModel = parsed.Model } - if !isClaudeFamilyModel(model) { + if !isClaudeFamilyModel(resolvedModel) { return false } return true @@ -96,13 +166,19 @@ func hasCacheCreationTokens(usage ClaudeUsage) bool { } func shouldSimulateClaudeMaxUsage(input *RecordUsageInput) bool { + if input == nil || input.Result == nil { + return false + } if !shouldApplyClaudeMaxBillingRules(input) { return false } - if !hasClaudeCacheSignals(input.ParsedRequest) { + return shouldSimulateClaudeMaxUsageForUsage(input.Result.Usage, input.ParsedRequest) +} + +func shouldSimulateClaudeMaxUsageForUsage(usage ClaudeUsage, parsed *ParsedRequest) bool { + if !hasClaudeCacheSignals(parsed) { return false } - usage := input.Result.Usage if usage.InputTokens <= 0 { return false } @@ -149,6 +225,16 @@ func safelyApplyClaudeMaxUsageSimulation(result *ForwardResult, parsed *ParsedRe return applyClaudeMaxUsageSimulation(result, parsed) } +func safelyProjectUsageToClaudeMax1H(usage *ClaudeUsage, parsed *ParsedRequest) (changed bool) { + defer func() { + if r := recover(); r != nil { + logger.LegacyPrintf("service.gateway", "simulate_claude_max_usage skipped: panic=%v", r) + changed = false + } + }() + return projectUsageToClaudeMax1H(usage, parsed) +} + func safelyForceCacheCreationTo1H(usage *ClaudeUsage) (changed bool) { defer func() { if r := recover(); r != nil { From 61ef73cb12dbbd997e6fc7ef8c50d8fa130476b4 Mon Sep 17 00:00:00 2001 From: erio Date: Fri, 27 Feb 2026 16:14:07 +0800 Subject: [PATCH 103/175] refactor: isolate claude max response usage simulation by group toggle --- .../claude_max_cache_billing_policy.go | 32 +--- .../gateway_claude_max_response_helpers.go | 147 +++++++++++++++ .../gateway_record_usage_claude_max_test.go | 27 ++- .../gateway_response_usage_sync_test.go | 170 ++++++++++++++++++ backend/internal/service/gateway_service.go | 28 ++- 5 files changed, 356 insertions(+), 48 deletions(-) create mode 100644 backend/internal/service/gateway_claude_max_response_helpers.go create mode 100644 backend/internal/service/gateway_response_usage_sync_test.go diff --git a/backend/internal/service/claude_max_cache_billing_policy.go b/backend/internal/service/claude_max_cache_billing_policy.go index 021d968c..398c9ec8 100644 --- a/backend/internal/service/claude_max_cache_billing_policy.go +++ b/backend/internal/service/claude_max_cache_billing_policy.go @@ -31,19 +31,7 @@ func applyClaudeMaxCacheBillingPolicy(input *RecordUsageInput) claudeMaxCacheBil } if hasCacheCreationTokens(*usage) { - before5m := usage.CacheCreation5mTokens - before1h := usage.CacheCreation1hTokens - out.ForcedCache1H = safelyForceCacheCreationTo1H(usage) - if out.ForcedCache1H { - logger.LegacyPrintf("service.gateway", "force_claude_max_cache_1h: model=%s account=%d cache_creation_5m:%d->%d cache_creation_1h:%d->%d", - result.Model, - accountID, - before5m, - usage.CacheCreation5mTokens, - before1h, - usage.CacheCreation1hTokens, - ) - } + // Upstream already returned cache creation usage; keep original usage. return out } @@ -72,7 +60,7 @@ func detectClaudeMaxCacheBillingOutcomeForUsage(usage ClaudeUsage, parsed *Parse return out } if hasCacheCreationTokens(usage) { - out.ForcedCache1H = true + // Upstream already returned cache creation usage; keep original usage. return out } if shouldSimulateClaudeMaxUsageForUsage(usage, parsed) { @@ -93,21 +81,7 @@ func applyClaudeMaxCacheBillingPolicyToUsage(usage *ClaudeUsage, parsed *ParsedR } if hasCacheCreationTokens(*usage) { - before5m := usage.CacheCreation5mTokens - before1h := usage.CacheCreation1hTokens - changed := safelyForceCacheCreationTo1H(usage) - // Even when value is already 1h, still mark forced to skip account TTL override. - out.ForcedCache1H = true - if changed { - logger.LegacyPrintf("service.gateway", "force_claude_max_cache_1h: model=%s account=%d cache_creation_5m:%d->%d cache_creation_1h:%d->%d", - resolvedModel, - accountID, - before5m, - usage.CacheCreation5mTokens, - before1h, - usage.CacheCreation1hTokens, - ) - } + // Upstream already returned cache creation usage; keep original usage. return out } diff --git a/backend/internal/service/gateway_claude_max_response_helpers.go b/backend/internal/service/gateway_claude_max_response_helpers.go new file mode 100644 index 00000000..b4c7e819 --- /dev/null +++ b/backend/internal/service/gateway_claude_max_response_helpers.go @@ -0,0 +1,147 @@ +package service + +import ( + "context" + "encoding/json" + + "github.com/gin-gonic/gin" + "github.com/tidwall/sjson" +) + +type claudeMaxResponseRewriteContext struct { + Parsed *ParsedRequest + Group *Group +} + +type claudeMaxResponseRewriteContextKeyType struct{} + +var claudeMaxResponseRewriteContextKey = claudeMaxResponseRewriteContextKeyType{} + +func withClaudeMaxResponseRewriteContext(ctx context.Context, c *gin.Context, parsed *ParsedRequest) context.Context { + if ctx == nil { + ctx = context.Background() + } + value := claudeMaxResponseRewriteContext{ + Parsed: parsed, + Group: claudeMaxGroupFromGinContext(c), + } + return context.WithValue(ctx, claudeMaxResponseRewriteContextKey, value) +} + +func claudeMaxResponseRewriteContextFromContext(ctx context.Context) claudeMaxResponseRewriteContext { + if ctx == nil { + return claudeMaxResponseRewriteContext{} + } + value, _ := ctx.Value(claudeMaxResponseRewriteContextKey).(claudeMaxResponseRewriteContext) + return value +} + +func claudeMaxGroupFromGinContext(c *gin.Context) *Group { + if c == nil { + return nil + } + raw, exists := c.Get("api_key") + if !exists { + return nil + } + apiKey, ok := raw.(*APIKey) + if !ok || apiKey == nil { + return nil + } + return apiKey.Group +} + +func applyClaudeMaxSimulationToUsage(ctx context.Context, usage *ClaudeUsage, model string, accountID int64) claudeMaxCacheBillingOutcome { + var out claudeMaxCacheBillingOutcome + if usage == nil { + return out + } + rewriteCtx := claudeMaxResponseRewriteContextFromContext(ctx) + return applyClaudeMaxCacheBillingPolicyToUsage(usage, rewriteCtx.Parsed, rewriteCtx.Group, model, accountID) +} + +func applyClaudeMaxSimulationToUsageJSONMap(ctx context.Context, usageObj map[string]any, model string, accountID int64) claudeMaxCacheBillingOutcome { + var out claudeMaxCacheBillingOutcome + if usageObj == nil { + return out + } + usage := claudeUsageFromJSONMap(usageObj) + out = applyClaudeMaxSimulationToUsage(ctx, &usage, model, accountID) + if out.Simulated { + rewriteClaudeUsageJSONMap(usageObj, usage) + } + return out +} + +func rewriteClaudeUsageJSONBytes(body []byte, usage ClaudeUsage) []byte { + updated := body + var err error + + updated, err = sjson.SetBytes(updated, "usage.input_tokens", usage.InputTokens) + if err != nil { + return body + } + updated, err = sjson.SetBytes(updated, "usage.cache_creation_input_tokens", usage.CacheCreationInputTokens) + if err != nil { + return body + } + updated, err = sjson.SetBytes(updated, "usage.cache_creation.ephemeral_5m_input_tokens", usage.CacheCreation5mTokens) + if err != nil { + return body + } + updated, err = sjson.SetBytes(updated, "usage.cache_creation.ephemeral_1h_input_tokens", usage.CacheCreation1hTokens) + if err != nil { + return body + } + return updated +} + +func claudeUsageFromJSONMap(usageObj map[string]any) ClaudeUsage { + var usage ClaudeUsage + if usageObj == nil { + return usage + } + + usage.InputTokens = usageIntFromAny(usageObj["input_tokens"]) + usage.OutputTokens = usageIntFromAny(usageObj["output_tokens"]) + usage.CacheCreationInputTokens = usageIntFromAny(usageObj["cache_creation_input_tokens"]) + usage.CacheReadInputTokens = usageIntFromAny(usageObj["cache_read_input_tokens"]) + + if ccObj, ok := usageObj["cache_creation"].(map[string]any); ok { + usage.CacheCreation5mTokens = usageIntFromAny(ccObj["ephemeral_5m_input_tokens"]) + usage.CacheCreation1hTokens = usageIntFromAny(ccObj["ephemeral_1h_input_tokens"]) + } + return usage +} + +func rewriteClaudeUsageJSONMap(usageObj map[string]any, usage ClaudeUsage) { + if usageObj == nil { + return + } + usageObj["input_tokens"] = usage.InputTokens + usageObj["cache_creation_input_tokens"] = usage.CacheCreationInputTokens + + ccObj, _ := usageObj["cache_creation"].(map[string]any) + if ccObj == nil { + ccObj = make(map[string]any, 2) + usageObj["cache_creation"] = ccObj + } + ccObj["ephemeral_5m_input_tokens"] = usage.CacheCreation5mTokens + ccObj["ephemeral_1h_input_tokens"] = usage.CacheCreation1hTokens +} + +func usageIntFromAny(v any) int { + switch value := v.(type) { + case int: + return value + case int64: + return int(value) + case float64: + return int(value) + case json.Number: + if n, err := value.Int64(); err == nil { + return int(n) + } + } + return 0 +} diff --git a/backend/internal/service/gateway_record_usage_claude_max_test.go b/backend/internal/service/gateway_record_usage_claude_max_test.go index 445519f8..7bee1b0f 100644 --- a/backend/internal/service/gateway_record_usage_claude_max_test.go +++ b/backend/internal/service/gateway_record_usage_claude_max_test.go @@ -32,7 +32,7 @@ func newGatewayServiceForRecordUsageTest(repo UsageLogRepository) *GatewayServic } } -func TestRecordUsage_SimulateClaudeMaxEnabled_ProjectsAndSkipsTTLOverride(t *testing.T) { +func TestRecordUsage_SimulateClaudeMaxEnabled_DoesNotProjectAndSkipsTTLOverride(t *testing.T) { repo := &usageLogRepoRecordUsageStub{inserted: true} svc := newGatewayServiceForRecordUsageTest(repo) @@ -92,12 +92,11 @@ func TestRecordUsage_SimulateClaudeMaxEnabled_ProjectsAndSkipsTTLOverride(t *tes require.NotNil(t, repo.last) log := repo.last - total := log.InputTokens + log.CacheCreation5mTokens + log.CacheCreation1hTokens - require.Equal(t, 160, total, "token 总量应保持不变") - require.Greater(t, log.CacheCreation1hTokens, 0, "应映射为 1h cache creation") - require.Equal(t, 0, log.CacheCreation5mTokens, "模拟成功后不应再被 TTL override 改写为 5m") - require.Equal(t, log.CacheCreation1hTokens, log.CacheCreationTokens, "聚合 cache_creation_tokens 应与 1h 一致") - require.False(t, log.CacheTTLOverridden, "模拟成功时应跳过 TTL override 标记") + require.Equal(t, 160, log.InputTokens) + require.Equal(t, 0, log.CacheCreationTokens) + require.Equal(t, 0, log.CacheCreation5mTokens) + require.Equal(t, 0, log.CacheCreation1hTokens) + require.False(t, log.CacheTTLOverridden, "simulate outcome should skip account ttl override") } func TestRecordUsage_SimulateClaudeMaxDisabled_AppliesTTLOverride(t *testing.T) { @@ -144,12 +143,12 @@ func TestRecordUsage_SimulateClaudeMaxDisabled_AppliesTTLOverride(t *testing.T) log := repo.last require.Equal(t, 120, log.CacheCreationTokens) - require.Equal(t, 120, log.CacheCreation5mTokens, "关闭模拟时应执行 TTL override 到 5m") + require.Equal(t, 120, log.CacheCreation5mTokens) require.Equal(t, 0, log.CacheCreation1hTokens) - require.True(t, log.CacheTTLOverridden, "TTL override 生效时应打标") + require.True(t, log.CacheTTLOverridden) } -func TestRecordUsage_SimulateClaudeMaxEnabled_ExistingCacheCreationForce1H(t *testing.T) { +func TestRecordUsage_SimulateClaudeMaxEnabled_ExistingCacheCreationBypassesSimulation(t *testing.T) { repo := &usageLogRepoRecordUsageStub{inserted: true} svc := newGatewayServiceForRecordUsageTest(repo) @@ -192,9 +191,9 @@ func TestRecordUsage_SimulateClaudeMaxEnabled_ExistingCacheCreationForce1H(t *te require.NotNil(t, repo.last) log := repo.last - require.Equal(t, 20, log.InputTokens, "existing cache creation should not project input tokens") - require.Equal(t, 0, log.CacheCreation5mTokens, "existing cache creation should be forced to 1h") - require.Equal(t, 120, log.CacheCreation1hTokens) + require.Equal(t, 20, log.InputTokens) + require.Equal(t, 120, log.CacheCreation5mTokens) + require.Equal(t, 0, log.CacheCreation1hTokens) require.Equal(t, 120, log.CacheCreationTokens) - require.True(t, log.CacheTTLOverridden, "force-to-1h should mark cache ttl overridden") + require.True(t, log.CacheTTLOverridden, "existing cache_creation should remain under normal account ttl flow") } diff --git a/backend/internal/service/gateway_response_usage_sync_test.go b/backend/internal/service/gateway_response_usage_sync_test.go new file mode 100644 index 00000000..445ee8ad --- /dev/null +++ b/backend/internal/service/gateway_response_usage_sync_test.go @@ -0,0 +1,170 @@ +package service + +import ( + "bytes" + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/require" + "github.com/tidwall/gjson" +) + +func TestHandleNonStreamingResponse_UsageAlignedWithClaudeMaxSimulation(t *testing.T) { + gin.SetMode(gin.TestMode) + + svc := &GatewayService{ + cfg: &config.Config{}, + rateLimitService: &RateLimitService{}, + } + + account := &Account{ + ID: 11, + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + Extra: map[string]any{ + "cache_ttl_override_enabled": true, + "cache_ttl_override_target": "5m", + }, + } + group := &Group{ + ID: 99, + Platform: PlatformAnthropic, + SimulateClaudeMaxEnabled: true, + } + parsed := &ParsedRequest{ + Model: "claude-sonnet-4", + Messages: []any{ + map[string]any{ + "role": "user", + "content": []any{ + map[string]any{ + "type": "text", + "text": "long cached context", + "cache_control": map[string]any{"type": "ephemeral"}, + }, + map[string]any{ + "type": "text", + "text": "new user question", + }, + }, + }, + }, + } + + upstreamBody := []byte(`{"id":"msg_1","model":"claude-sonnet-4","usage":{"input_tokens":120,"output_tokens":8}}`) + resp := &http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: ioNopCloserBytes(upstreamBody), + } + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/v1/messages", bytes.NewReader(nil)) + c.Set("api_key", &APIKey{Group: group}) + requestCtx := withClaudeMaxResponseRewriteContext(context.Background(), c, parsed) + + usage, err := svc.handleNonStreamingResponse(requestCtx, resp, c, account, "claude-sonnet-4", "claude-sonnet-4") + require.NoError(t, err) + require.NotNil(t, usage) + + var rendered struct { + Usage ClaudeUsage `json:"usage"` + } + require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &rendered)) + rendered.Usage.CacheCreation5mTokens = int(gjson.GetBytes(rec.Body.Bytes(), "usage.cache_creation.ephemeral_5m_input_tokens").Int()) + rendered.Usage.CacheCreation1hTokens = int(gjson.GetBytes(rec.Body.Bytes(), "usage.cache_creation.ephemeral_1h_input_tokens").Int()) + + require.Equal(t, rendered.Usage.InputTokens, usage.InputTokens) + require.Equal(t, rendered.Usage.OutputTokens, usage.OutputTokens) + require.Equal(t, rendered.Usage.CacheCreationInputTokens, usage.CacheCreationInputTokens) + require.Equal(t, rendered.Usage.CacheCreation5mTokens, usage.CacheCreation5mTokens) + require.Equal(t, rendered.Usage.CacheCreation1hTokens, usage.CacheCreation1hTokens) + require.Equal(t, rendered.Usage.CacheReadInputTokens, usage.CacheReadInputTokens) + + require.Greater(t, usage.CacheCreation1hTokens, 0) + require.Equal(t, 0, usage.CacheCreation5mTokens) + require.Less(t, usage.InputTokens, 120) +} + +func TestHandleNonStreamingResponse_ClaudeMaxDisabled_NoSimulationIntercept(t *testing.T) { + gin.SetMode(gin.TestMode) + + svc := &GatewayService{ + cfg: &config.Config{}, + rateLimitService: &RateLimitService{}, + } + + account := &Account{ + ID: 12, + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + Extra: map[string]any{ + "cache_ttl_override_enabled": true, + "cache_ttl_override_target": "5m", + }, + } + group := &Group{ + ID: 100, + Platform: PlatformAnthropic, + SimulateClaudeMaxEnabled: false, + } + parsed := &ParsedRequest{ + Model: "claude-sonnet-4", + Messages: []any{ + map[string]any{ + "role": "user", + "content": []any{ + map[string]any{ + "type": "text", + "text": "long cached context", + "cache_control": map[string]any{"type": "ephemeral"}, + }, + map[string]any{ + "type": "text", + "text": "new user question", + }, + }, + }, + }, + } + + upstreamBody := []byte(`{"id":"msg_2","model":"claude-sonnet-4","usage":{"input_tokens":120,"output_tokens":8}}`) + resp := &http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: ioNopCloserBytes(upstreamBody), + } + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/v1/messages", bytes.NewReader(nil)) + c.Set("api_key", &APIKey{Group: group}) + requestCtx := withClaudeMaxResponseRewriteContext(context.Background(), c, parsed) + + usage, err := svc.handleNonStreamingResponse(requestCtx, resp, c, account, "claude-sonnet-4", "claude-sonnet-4") + require.NoError(t, err) + require.NotNil(t, usage) + + require.Equal(t, 120, usage.InputTokens) + require.Equal(t, 0, usage.CacheCreationInputTokens) + require.Equal(t, 0, usage.CacheCreation5mTokens) + require.Equal(t, 0, usage.CacheCreation1hTokens) +} + +func ioNopCloserBytes(b []byte) *readCloserFromBytes { + return &readCloserFromBytes{Reader: bytes.NewReader(b)} +} + +type readCloserFromBytes struct { + *bytes.Reader +} + +func (r *readCloserFromBytes) Close() error { + return nil +} diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 53b1fd28..e025c6d9 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -3709,6 +3709,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A } // 处理正常响应 + ctx = withClaudeMaxResponseRewriteContext(ctx, c, parsed) var usage *ClaudeUsage var firstTokenMs *int var clientDisconnect bool @@ -5105,6 +5106,7 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http needModelReplace := originalModel != mappedModel clientDisconnected := false // 客户端断开标志,断开后继续读取上游以获取完整usage + skipAccountTTLOverride := false pendingEventLines := make([]string, 0, 4) @@ -5164,17 +5166,25 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http if msg, ok := event["message"].(map[string]any); ok { if u, ok := msg["usage"].(map[string]any); ok { reconcileCachedTokens(u) + claudeMaxOutcome := applyClaudeMaxSimulationToUsageJSONMap(ctx, u, originalModel, account.ID) + if claudeMaxOutcome.Simulated { + skipAccountTTLOverride = true + } } } } if eventType == "message_delta" { if u, ok := event["usage"].(map[string]any); ok { reconcileCachedTokens(u) + claudeMaxOutcome := applyClaudeMaxSimulationToUsageJSONMap(ctx, u, originalModel, account.ID) + if claudeMaxOutcome.Simulated { + skipAccountTTLOverride = true + } } } // Cache TTL Override: 重写 SSE 事件中的 cache_creation 分类 - if account.IsCacheTTLOverrideEnabled() { + if account.IsCacheTTLOverrideEnabled() && !skipAccountTTLOverride { overrideTarget := account.GetCacheTTLOverrideTarget() if eventType == "message_start" { if msg, ok := event["message"].(map[string]any); ok { @@ -5465,8 +5475,13 @@ func (s *GatewayService) handleNonStreamingResponse(ctx context.Context, resp *h } } + claudeMaxOutcome := applyClaudeMaxSimulationToUsage(ctx, &response.Usage, originalModel, account.ID) + if claudeMaxOutcome.Simulated { + body = rewriteClaudeUsageJSONBytes(body, response.Usage) + } + // Cache TTL Override: 重写 non-streaming 响应中的 cache_creation 分类 - if account.IsCacheTTLOverrideEnabled() { + if account.IsCacheTTLOverrideEnabled() && !claudeMaxOutcome.Simulated && !claudeMaxOutcome.ForcedCache1H { overrideTarget := account.GetCacheTTLOverrideTarget() if applyCacheTTLOverride(&response.Usage, overrideTarget) { // 同步更新 body JSON 中的嵌套 cache_creation 对象 @@ -5608,9 +5623,12 @@ func (s *GatewayService) RecordUsage(ctx context.Context, input *RecordUsageInpu result.Usage.InputTokens = 0 } - // Claude Max cache billing policy (group-level): force existing cache creation to 1h, - // otherwise simulate projection only when request carries cache signals. - claudeMaxOutcome := applyClaudeMaxCacheBillingPolicy(input) + // Claude Max cache billing policy (group-level): RecordUsage only checks outcome. + var apiKeyGroup *Group + if apiKey != nil { + apiKeyGroup = apiKey.Group + } + claudeMaxOutcome := detectClaudeMaxCacheBillingOutcomeForUsage(result.Usage, input.ParsedRequest, apiKeyGroup, result.Model) simulatedClaudeMax := claudeMaxOutcome.Simulated forcedClaudeMax1H := claudeMaxOutcome.ForcedCache1H From b789333b68acbb0bb326c22477295415809e2b78 Mon Sep 17 00:00:00 2001 From: erio Date: Fri, 27 Feb 2026 16:35:37 +0800 Subject: [PATCH 104/175] test: stabilize subscription progress day assertion --- .../service/subscription_calculate_progress_test.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/backend/internal/service/subscription_calculate_progress_test.go b/backend/internal/service/subscription_calculate_progress_test.go index 22018bcd..6a6a1c12 100644 --- a/backend/internal/service/subscription_calculate_progress_test.go +++ b/backend/internal/service/subscription_calculate_progress_test.go @@ -34,9 +34,10 @@ func TestCalculateProgress_BasicFields(t *testing.T) { assert.Equal(t, int64(100), progress.ID) assert.Equal(t, "Premium", progress.GroupName) assert.Equal(t, sub.ExpiresAt, progress.ExpiresAt) - assert.Equal(t, 29, progress.ExpiresInDays) // 约 30 天 - assert.Nil(t, progress.Daily, "无日限额时 Daily 应为 nil") - assert.Nil(t, progress.Weekly, "无周限额时 Weekly 应为 nil") + assert.GreaterOrEqual(t, progress.ExpiresInDays, 29) + assert.LessOrEqual(t, progress.ExpiresInDays, 30) + assert.Nil(t, progress.Daily) + assert.Nil(t, progress.Weekly) assert.Nil(t, progress.Monthly, "无月限额时 Monthly 应为 nil") } From c1033c12bdb21a907003d54d9befed5408db7a23 Mon Sep 17 00:00:00 2001 From: erio Date: Fri, 27 Feb 2026 16:38:35 +0800 Subject: [PATCH 105/175] chore: bump version to 0.1.86.6 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index 03b0e2ca..66d2dcfd 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.86.5 +0.1.86.6 From 505494b37874deadd5afcf5e4954f02f175efe98 Mon Sep 17 00:00:00 2001 From: erio Date: Fri, 27 Feb 2026 17:18:24 +0800 Subject: [PATCH 106/175] fix: update 2K image price placeholder from 0.134 to 0.201 --- backend/cmd/server/VERSION | 2 +- frontend/src/views/admin/GroupsView.vue | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index 66d2dcfd..131dd491 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.86.6 +0.1.86.7 diff --git a/frontend/src/views/admin/GroupsView.vue b/frontend/src/views/admin/GroupsView.vue index 016762e4..c159a879 100644 --- a/frontend/src/views/admin/GroupsView.vue +++ b/frontend/src/views/admin/GroupsView.vue @@ -459,7 +459,7 @@ step="0.001" min="0" class="input" - placeholder="0.134" + placeholder="0.201" />
@@ -1191,7 +1191,7 @@ step="0.001" min="0" class="input" - placeholder="0.134" + placeholder="0.201" />
From 741eae59bbea3d972b39ae90d504d7dd9b1aa270 Mon Sep 17 00:00:00 2001 From: erio Date: Fri, 27 Feb 2026 19:59:36 +0800 Subject: [PATCH 107/175] refactor: decouple claude max cache simulation from RecordUsage Extract setupClaudeMaxStreamingHook and applyClaudeMaxNonStreamingRewrite facade functions to helpers file. RecordUsage now uses detect-only (no mutation), client response rewriting handled at Forward layer. --- backend/internal/handler/gateway_handler.go | 1 + .../pkg/antigravity/stream_transformer.go | 41 ++++++- .../service/antigravity_gateway_service.go | 13 ++- .../antigravity_gateway_service_test.go | 8 +- .../claude_max_cache_billing_policy.go | 101 +----------------- .../gateway_claude_max_response_helpers.go | 49 +++++++++ .../gateway_record_usage_claude_max_test.go | 4 +- backend/internal/service/gateway_service.go | 12 +-- 8 files changed, 114 insertions(+), 115 deletions(-) diff --git a/backend/internal/handler/gateway_handler.go b/backend/internal/handler/gateway_handler.go index 103bd086..127715dd 100644 --- a/backend/internal/handler/gateway_handler.go +++ b/backend/internal/handler/gateway_handler.go @@ -545,6 +545,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { accountReleaseFunc = wrapReleaseOnDone(c.Request.Context(), accountReleaseFunc) // 转发请求 - 根据账号平台分流 + c.Set("parsed_request", parsedReq) var result *service.ForwardResult requestCtx := c.Request.Context() if fs.SwitchCount > 0 { diff --git a/backend/internal/pkg/antigravity/stream_transformer.go b/backend/internal/pkg/antigravity/stream_transformer.go index 677435ad..54f7e282 100644 --- a/backend/internal/pkg/antigravity/stream_transformer.go +++ b/backend/internal/pkg/antigravity/stream_transformer.go @@ -18,6 +18,9 @@ const ( BlockTypeFunction ) +// UsageMapHook is a callback that can modify usage data before it's emitted in SSE events. +type UsageMapHook func(usageMap map[string]any) + // StreamingProcessor 流式响应处理器 type StreamingProcessor struct { blockType BlockType @@ -30,6 +33,7 @@ type StreamingProcessor struct { originalModel string webSearchQueries []string groundingChunks []GeminiGroundingChunk + usageMapHook UsageMapHook // 累计 usage inputTokens int @@ -45,6 +49,25 @@ func NewStreamingProcessor(originalModel string) *StreamingProcessor { } } +// SetUsageMapHook sets an optional hook that modifies usage maps before they are emitted. +func (p *StreamingProcessor) SetUsageMapHook(fn UsageMapHook) { + p.usageMapHook = fn +} + +func usageToMap(u ClaudeUsage) map[string]any { + m := map[string]any{ + "input_tokens": u.InputTokens, + "output_tokens": u.OutputTokens, + } + if u.CacheCreationInputTokens > 0 { + m["cache_creation_input_tokens"] = u.CacheCreationInputTokens + } + if u.CacheReadInputTokens > 0 { + m["cache_read_input_tokens"] = u.CacheReadInputTokens + } + return m +} + // ProcessLine 处理 SSE 行,返回 Claude SSE 事件 func (p *StreamingProcessor) ProcessLine(line string) []byte { line = strings.TrimSpace(line) @@ -158,6 +181,13 @@ func (p *StreamingProcessor) emitMessageStart(v1Resp *V1InternalResponse) []byte responseID = "msg_" + generateRandomID() } + var usageValue any = usage + if p.usageMapHook != nil { + usageMap := usageToMap(usage) + p.usageMapHook(usageMap) + usageValue = usageMap + } + message := map[string]any{ "id": responseID, "type": "message", @@ -166,7 +196,7 @@ func (p *StreamingProcessor) emitMessageStart(v1Resp *V1InternalResponse) []byte "model": p.originalModel, "stop_reason": nil, "stop_sequence": nil, - "usage": usage, + "usage": usageValue, } event := map[string]any{ @@ -477,13 +507,20 @@ func (p *StreamingProcessor) emitFinish(finishReason string) []byte { CacheReadInputTokens: p.cacheReadTokens, } + var usageValue any = usage + if p.usageMapHook != nil { + usageMap := usageToMap(usage) + p.usageMapHook(usageMap) + usageValue = usageMap + } + deltaEvent := map[string]any{ "type": "message_delta", "delta": map[string]any{ "stop_reason": stopReason, "stop_sequence": nil, }, - "usage": usage, + "usage": usageValue, } _, _ = result.Write(p.formatSSE("message_delta", deltaEvent)) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 2bd6195a..4922de3c 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -1600,7 +1600,7 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, var clientDisconnect bool if claudeReq.Stream { // 客户端要求流式,直接透传转换 - streamRes, err := s.handleClaudeStreamingResponse(c, resp, startTime, originalModel) + streamRes, err := s.handleClaudeStreamingResponse(c, resp, startTime, originalModel, account.ID) if err != nil { logger.LegacyPrintf("service.antigravity_gateway", "%s status=stream_error error=%v", prefix, err) return nil, err @@ -1610,7 +1610,7 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, clientDisconnect = streamRes.clientDisconnect } else { // 客户端要求非流式,收集流式响应后转换返回 - streamRes, err := s.handleClaudeStreamToNonStreaming(c, resp, startTime, originalModel) + streamRes, err := s.handleClaudeStreamToNonStreaming(c, resp, startTime, originalModel, account.ID) if err != nil { logger.LegacyPrintf("service.antigravity_gateway", "%s status=stream_collect_error error=%v", prefix, err) return nil, err @@ -3416,7 +3416,7 @@ func (s *AntigravityGatewayService) writeGoogleError(c *gin.Context, status int, // handleClaudeStreamToNonStreaming 收集上游流式响应,转换为 Claude 非流式格式返回 // 用于处理客户端非流式请求但上游只支持流式的情况 -func (s *AntigravityGatewayService) handleClaudeStreamToNonStreaming(c *gin.Context, resp *http.Response, startTime time.Time, originalModel string) (*antigravityStreamResult, error) { +func (s *AntigravityGatewayService) handleClaudeStreamToNonStreaming(c *gin.Context, resp *http.Response, startTime time.Time, originalModel string, accountID int64) (*antigravityStreamResult, error) { scanner := bufio.NewScanner(resp.Body) maxLineSize := defaultMaxLineSize if s.settingService.cfg != nil && s.settingService.cfg.Gateway.MaxLineSize > 0 { @@ -3574,6 +3574,9 @@ returnResponse: return nil, s.writeClaudeError(c, http.StatusBadGateway, "upstream_error", "Failed to parse upstream response") } + // Claude Max cache billing simulation (non-streaming) + claudeResp = applyClaudeMaxNonStreamingRewrite(c, claudeResp, agUsage, originalModel, accountID) + c.Data(http.StatusOK, "application/json", claudeResp) // 转换为 service.ClaudeUsage @@ -3588,7 +3591,7 @@ returnResponse: } // handleClaudeStreamingResponse 处理 Claude 流式响应(Gemini SSE → Claude SSE 转换) -func (s *AntigravityGatewayService) handleClaudeStreamingResponse(c *gin.Context, resp *http.Response, startTime time.Time, originalModel string) (*antigravityStreamResult, error) { +func (s *AntigravityGatewayService) handleClaudeStreamingResponse(c *gin.Context, resp *http.Response, startTime time.Time, originalModel string, accountID int64) (*antigravityStreamResult, error) { c.Header("Content-Type", "text/event-stream") c.Header("Cache-Control", "no-cache") c.Header("Connection", "keep-alive") @@ -3601,6 +3604,8 @@ func (s *AntigravityGatewayService) handleClaudeStreamingResponse(c *gin.Context } processor := antigravity.NewStreamingProcessor(originalModel) + setupClaudeMaxStreamingHook(c, processor, originalModel, accountID) + var firstTokenMs *int // 使用 Scanner 并限制单行大小,避免 ReadString 无上限导致 OOM scanner := bufio.NewScanner(resp.Body) diff --git a/backend/internal/service/antigravity_gateway_service_test.go b/backend/internal/service/antigravity_gateway_service_test.go index 84b65adc..cbecfee5 100644 --- a/backend/internal/service/antigravity_gateway_service_test.go +++ b/backend/internal/service/antigravity_gateway_service_test.go @@ -710,7 +710,7 @@ func TestHandleClaudeStreamingResponse_NormalComplete(t *testing.T) { fmt.Fprintln(pw, "") }() - result, err := svc.handleClaudeStreamingResponse(c, resp, time.Now(), "claude-sonnet-4-5") + result, err := svc.handleClaudeStreamingResponse(c, resp, time.Now(), "claude-sonnet-4-5", 0) _ = pr.Close() require.NoError(t, err) @@ -787,7 +787,7 @@ func TestHandleClaudeStreamingResponse_ThoughtsTokenCount(t *testing.T) { fmt.Fprintln(pw, "") }() - result, err := svc.handleClaudeStreamingResponse(c, resp, time.Now(), "gemini-2.5-pro") + result, err := svc.handleClaudeStreamingResponse(c, resp, time.Now(), "gemini-2.5-pro", 0) _ = pr.Close() require.NoError(t, err) @@ -990,7 +990,7 @@ func TestHandleClaudeStreamingResponse_ClientDisconnect(t *testing.T) { fmt.Fprintln(pw, "") }() - result, err := svc.handleClaudeStreamingResponse(c, resp, time.Now(), "claude-sonnet-4-5") + result, err := svc.handleClaudeStreamingResponse(c, resp, time.Now(), "claude-sonnet-4-5", 0) _ = pr.Close() require.NoError(t, err) @@ -1014,7 +1014,7 @@ func TestHandleClaudeStreamingResponse_ContextCanceled(t *testing.T) { resp := &http.Response{StatusCode: http.StatusOK, Body: cancelReadCloser{}, Header: http.Header{}} - result, err := svc.handleClaudeStreamingResponse(c, resp, time.Now(), "claude-sonnet-4-5") + result, err := svc.handleClaudeStreamingResponse(c, resp, time.Now(), "claude-sonnet-4-5", 0) require.NoError(t, err) require.NotNil(t, result) diff --git a/backend/internal/service/claude_max_cache_billing_policy.go b/backend/internal/service/claude_max_cache_billing_policy.go index 398c9ec8..64696d4d 100644 --- a/backend/internal/service/claude_max_cache_billing_policy.go +++ b/backend/internal/service/claude_max_cache_billing_policy.go @@ -10,46 +10,7 @@ import ( ) type claudeMaxCacheBillingOutcome struct { - Simulated bool - ForcedCache1H bool -} - -func applyClaudeMaxCacheBillingPolicy(input *RecordUsageInput) claudeMaxCacheBillingOutcome { - var out claudeMaxCacheBillingOutcome - if !shouldApplyClaudeMaxBillingRules(input) { - return out - } - - if input == nil || input.Result == nil { - return out - } - result := input.Result - usage := &result.Usage - accountID := int64(0) - if input.Account != nil { - accountID = input.Account.ID - } - - if hasCacheCreationTokens(*usage) { - // Upstream already returned cache creation usage; keep original usage. - return out - } - - if !shouldSimulateClaudeMaxUsage(input) { - return out - } - beforeInputTokens := usage.InputTokens - out.Simulated = safelyApplyClaudeMaxUsageSimulation(result, input.ParsedRequest) - if out.Simulated { - logger.LegacyPrintf("service.gateway", "simulate_claude_max_usage: model=%s account=%d input_tokens:%d->%d cache_creation_1h=%d", - result.Model, - accountID, - beforeInputTokens, - usage.InputTokens, - usage.CacheCreation1hTokens, - ) - } - return out + Simulated bool } // detectClaudeMaxCacheBillingOutcomeForUsage only returns whether Claude Max policy @@ -150,55 +111,18 @@ func shouldSimulateClaudeMaxUsage(input *RecordUsageInput) bool { } func shouldSimulateClaudeMaxUsageForUsage(usage ClaudeUsage, parsed *ParsedRequest) bool { - if !hasClaudeCacheSignals(parsed) { - return false - } if usage.InputTokens <= 0 { return false } if hasCacheCreationTokens(usage) { return false } + if !hasClaudeCacheSignals(parsed) { + return false + } return true } -func forceCacheCreationTo1H(usage *ClaudeUsage) bool { - if usage == nil || !hasCacheCreationTokens(*usage) { - return false - } - - before5m := usage.CacheCreation5mTokens - before1h := usage.CacheCreation1hTokens - beforeAgg := usage.CacheCreationInputTokens - - _ = applyCacheTTLOverride(usage, "1h") - total := usage.CacheCreation5mTokens + usage.CacheCreation1hTokens - if total <= 0 { - total = usage.CacheCreationInputTokens - } - if total <= 0 { - return false - } - - usage.CacheCreation5mTokens = 0 - usage.CacheCreation1hTokens = total - usage.CacheCreationInputTokens = total - - return before5m != usage.CacheCreation5mTokens || - before1h != usage.CacheCreation1hTokens || - beforeAgg != usage.CacheCreationInputTokens -} - -func safelyApplyClaudeMaxUsageSimulation(result *ForwardResult, parsed *ParsedRequest) (changed bool) { - defer func() { - if r := recover(); r != nil { - logger.LegacyPrintf("service.gateway", "simulate_claude_max_usage skipped: panic=%v", r) - changed = false - } - }() - return applyClaudeMaxUsageSimulation(result, parsed) -} - func safelyProjectUsageToClaudeMax1H(usage *ClaudeUsage, parsed *ParsedRequest) (changed bool) { defer func() { if r := recover(); r != nil { @@ -209,23 +133,6 @@ func safelyProjectUsageToClaudeMax1H(usage *ClaudeUsage, parsed *ParsedRequest) return projectUsageToClaudeMax1H(usage, parsed) } -func safelyForceCacheCreationTo1H(usage *ClaudeUsage) (changed bool) { - defer func() { - if r := recover(); r != nil { - logger.LegacyPrintf("service.gateway", "force_cache_creation_1h skipped: panic=%v", r) - changed = false - } - }() - return forceCacheCreationTo1H(usage) -} - -func applyClaudeMaxUsageSimulation(result *ForwardResult, parsed *ParsedRequest) bool { - if result == nil { - return false - } - return projectUsageToClaudeMax1H(&result.Usage, parsed) -} - func projectUsageToClaudeMax1H(usage *ClaudeUsage, parsed *ParsedRequest) bool { if usage == nil { return false diff --git a/backend/internal/service/gateway_claude_max_response_helpers.go b/backend/internal/service/gateway_claude_max_response_helpers.go index b4c7e819..a5f5f3d2 100644 --- a/backend/internal/service/gateway_claude_max_response_helpers.go +++ b/backend/internal/service/gateway_claude_max_response_helpers.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" + "github.com/Wei-Shaw/sub2api/internal/pkg/antigravity" "github.com/gin-gonic/gin" "github.com/tidwall/sjson" ) @@ -51,6 +52,18 @@ func claudeMaxGroupFromGinContext(c *gin.Context) *Group { return apiKey.Group } +func parsedRequestFromGinContext(c *gin.Context) *ParsedRequest { + if c == nil { + return nil + } + raw, exists := c.Get("parsed_request") + if !exists { + return nil + } + parsed, _ := raw.(*ParsedRequest) + return parsed +} + func applyClaudeMaxSimulationToUsage(ctx context.Context, usage *ClaudeUsage, model string, accountID int64) claudeMaxCacheBillingOutcome { var out claudeMaxCacheBillingOutcome if usage == nil { @@ -145,3 +158,39 @@ func usageIntFromAny(v any) int { } return 0 } + +// setupClaudeMaxStreamingHook 为 Antigravity 流式路径设置 SSE usage 改写 hook。 +func setupClaudeMaxStreamingHook(c *gin.Context, processor *antigravity.StreamingProcessor, originalModel string, accountID int64) { + group := claudeMaxGroupFromGinContext(c) + parsed := parsedRequestFromGinContext(c) + if !shouldApplyClaudeMaxBillingRulesForUsage(group, originalModel, parsed) { + return + } + processor.SetUsageMapHook(func(usageMap map[string]any) { + svcUsage := claudeUsageFromJSONMap(usageMap) + outcome := applyClaudeMaxCacheBillingPolicyToUsage(&svcUsage, parsed, group, originalModel, accountID) + if outcome.Simulated { + rewriteClaudeUsageJSONMap(usageMap, svcUsage) + } + }) +} + +// applyClaudeMaxNonStreamingRewrite 为 Antigravity 非流式路径改写响应体中的 usage。 +func applyClaudeMaxNonStreamingRewrite(c *gin.Context, claudeResp []byte, agUsage *antigravity.ClaudeUsage, originalModel string, accountID int64) []byte { + group := claudeMaxGroupFromGinContext(c) + parsed := parsedRequestFromGinContext(c) + if !shouldApplyClaudeMaxBillingRulesForUsage(group, originalModel, parsed) { + return claudeResp + } + svcUsage := &ClaudeUsage{ + InputTokens: agUsage.InputTokens, + OutputTokens: agUsage.OutputTokens, + CacheCreationInputTokens: agUsage.CacheCreationInputTokens, + CacheReadInputTokens: agUsage.CacheReadInputTokens, + } + outcome := applyClaudeMaxCacheBillingPolicyToUsage(svcUsage, parsed, group, originalModel, accountID) + if outcome.Simulated { + return rewriteClaudeUsageJSONBytes(claudeResp, *svcUsage) + } + return claudeResp +} diff --git a/backend/internal/service/gateway_record_usage_claude_max_test.go b/backend/internal/service/gateway_record_usage_claude_max_test.go index 7bee1b0f..2e1b5ae7 100644 --- a/backend/internal/service/gateway_record_usage_claude_max_test.go +++ b/backend/internal/service/gateway_record_usage_claude_max_test.go @@ -32,7 +32,7 @@ func newGatewayServiceForRecordUsageTest(repo UsageLogRepository) *GatewayServic } } -func TestRecordUsage_SimulateClaudeMaxEnabled_DoesNotProjectAndSkipsTTLOverride(t *testing.T) { +func TestRecordUsage_SimulateClaudeMaxEnabled_ProjectsUsageAndSkipsTTLOverride(t *testing.T) { repo := &usageLogRepoRecordUsageStub{inserted: true} svc := newGatewayServiceForRecordUsageTest(repo) @@ -195,5 +195,5 @@ func TestRecordUsage_SimulateClaudeMaxEnabled_ExistingCacheCreationBypassesSimul require.Equal(t, 120, log.CacheCreation5mTokens) require.Equal(t, 0, log.CacheCreation1hTokens) require.Equal(t, 120, log.CacheCreationTokens) - require.True(t, log.CacheTTLOverridden, "existing cache_creation should remain under normal account ttl flow") + require.True(t, log.CacheTTLOverridden, "existing cache_creation with SimulateClaudeMax enabled should apply account ttl override") } diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index e025c6d9..2b47509e 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -5481,7 +5481,7 @@ func (s *GatewayService) handleNonStreamingResponse(ctx context.Context, resp *h } // Cache TTL Override: 重写 non-streaming 响应中的 cache_creation 分类 - if account.IsCacheTTLOverrideEnabled() && !claudeMaxOutcome.Simulated && !claudeMaxOutcome.ForcedCache1H { + if account.IsCacheTTLOverrideEnabled() && !claudeMaxOutcome.Simulated { overrideTarget := account.GetCacheTTLOverrideTarget() if applyCacheTTLOverride(&response.Usage, overrideTarget) { // 同步更新 body JSON 中的嵌套 cache_creation 对象 @@ -5623,18 +5623,18 @@ func (s *GatewayService) RecordUsage(ctx context.Context, input *RecordUsageInpu result.Usage.InputTokens = 0 } - // Claude Max cache billing policy (group-level): RecordUsage only checks outcome. + // Claude Max cache billing policy (group-level): + // - GatewayService 路径: Forward 已改写 usage(含 cache tokens)→ apply 见到 cache tokens 跳过 → simulatedClaudeMax=true(通过第二条件) + // - Antigravity 路径: Forward 中 hook 改写了客户端 SSE,但 ForwardResult.Usage 是原始值 → apply 实际执行模拟 → simulatedClaudeMax=true var apiKeyGroup *Group if apiKey != nil { apiKeyGroup = apiKey.Group } claudeMaxOutcome := detectClaudeMaxCacheBillingOutcomeForUsage(result.Usage, input.ParsedRequest, apiKeyGroup, result.Model) simulatedClaudeMax := claudeMaxOutcome.Simulated - forcedClaudeMax1H := claudeMaxOutcome.ForcedCache1H - // Cache TTL Override: 确保计费时 token 分类与账号设置一致 - cacheTTLOverridden := forcedClaudeMax1H - if account.IsCacheTTLOverrideEnabled() && !simulatedClaudeMax && !forcedClaudeMax1H { + cacheTTLOverridden := false + if account.IsCacheTTLOverrideEnabled() && !simulatedClaudeMax { applyCacheTTLOverride(&result.Usage, account.GetCacheTTLOverrideTarget()) cacheTTLOverridden = (result.Usage.CacheCreation5mTokens + result.Usage.CacheCreation1hTokens) > 0 } From ec576fdbde9a70b0735efc0804dee4fb7af17e47 Mon Sep 17 00:00:00 2001 From: erio Date: Fri, 27 Feb 2026 19:59:51 +0800 Subject: [PATCH 108/175] chore: bump version to 0.1.86.8 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index 131dd491..90233132 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.86.7 +0.1.86.8 From 81d896bf78da72fa46f8af68958a30dd0e1e99c7 Mon Sep 17 00:00:00 2001 From: erio Date: Fri, 27 Feb 2026 20:42:53 +0800 Subject: [PATCH 109/175] fix: sync Antigravity ForwardResult.Usage with client response simulation Apply Claude Max cache billing to usage before returning ForwardResult in Antigravity Forward, ensuring RecordUsage gets the same simulated usage that clients see. Restore apply+fallback in RecordUsage for consistency across GatewayService and Antigravity paths. --- backend/internal/service/antigravity_gateway_service.go | 3 +++ .../service/gateway_record_usage_claude_max_test.go | 8 ++++---- backend/internal/service/gateway_service.go | 5 +++-- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 4922de3c..fa5b477b 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -1619,6 +1619,9 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, firstTokenMs = streamRes.firstTokenMs } + // Claude Max cache billing: 同步 ForwardResult.Usage 与客户端响应体一致 + applyClaudeMaxCacheBillingPolicyToUsage(usage, parsedRequestFromGinContext(c), claudeMaxGroupFromGinContext(c), originalModel, account.ID) + return &ForwardResult{ RequestID: requestID, Usage: *usage, diff --git a/backend/internal/service/gateway_record_usage_claude_max_test.go b/backend/internal/service/gateway_record_usage_claude_max_test.go index 2e1b5ae7..3cd86938 100644 --- a/backend/internal/service/gateway_record_usage_claude_max_test.go +++ b/backend/internal/service/gateway_record_usage_claude_max_test.go @@ -92,10 +92,10 @@ func TestRecordUsage_SimulateClaudeMaxEnabled_ProjectsUsageAndSkipsTTLOverride(t require.NotNil(t, repo.last) log := repo.last - require.Equal(t, 160, log.InputTokens) - require.Equal(t, 0, log.CacheCreationTokens) + require.Equal(t, 80, log.InputTokens) + require.Equal(t, 80, log.CacheCreationTokens) require.Equal(t, 0, log.CacheCreation5mTokens) - require.Equal(t, 0, log.CacheCreation1hTokens) + require.Equal(t, 80, log.CacheCreation1hTokens) require.False(t, log.CacheTTLOverridden, "simulate outcome should skip account ttl override") } @@ -195,5 +195,5 @@ func TestRecordUsage_SimulateClaudeMaxEnabled_ExistingCacheCreationBypassesSimul require.Equal(t, 120, log.CacheCreation5mTokens) require.Equal(t, 0, log.CacheCreation1hTokens) require.Equal(t, 120, log.CacheCreationTokens) - require.True(t, log.CacheTTLOverridden, "existing cache_creation with SimulateClaudeMax enabled should apply account ttl override") + require.False(t, log.CacheTTLOverridden, "existing cache_creation with SimulateClaudeMax enabled should skip account ttl override") } diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 2b47509e..af34d472 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -5630,8 +5630,9 @@ func (s *GatewayService) RecordUsage(ctx context.Context, input *RecordUsageInpu if apiKey != nil { apiKeyGroup = apiKey.Group } - claudeMaxOutcome := detectClaudeMaxCacheBillingOutcomeForUsage(result.Usage, input.ParsedRequest, apiKeyGroup, result.Model) - simulatedClaudeMax := claudeMaxOutcome.Simulated + claudeMaxOutcome := applyClaudeMaxCacheBillingPolicyToUsage(&result.Usage, input.ParsedRequest, apiKeyGroup, result.Model, account.ID) + simulatedClaudeMax := claudeMaxOutcome.Simulated || + (shouldApplyClaudeMaxBillingRulesForUsage(apiKeyGroup, result.Model, input.ParsedRequest) && hasCacheCreationTokens(result.Usage)) // Cache TTL Override: 确保计费时 token 分类与账号设置一致 cacheTTLOverridden := false if account.IsCacheTTLOverrideEnabled() && !simulatedClaudeMax { From e0b4b00dc1007db49c6913d4fb05f1cf60b40096 Mon Sep 17 00:00:00 2001 From: erio Date: Fri, 27 Feb 2026 20:45:52 +0800 Subject: [PATCH 110/175] chore: bump version to 0.1.86.9 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index 90233132..2276f1c7 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.86.8 +0.1.86.9 From 3382d496e3986162a00333aa239752b3c132264f Mon Sep 17 00:00:00 2001 From: erio Date: Fri, 27 Feb 2026 20:56:33 +0800 Subject: [PATCH 111/175] refactor: remove unused detectClaudeMaxCacheBillingOutcomeForUsage function --- .../service/claude_max_cache_billing_policy.go | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/backend/internal/service/claude_max_cache_billing_policy.go b/backend/internal/service/claude_max_cache_billing_policy.go index 64696d4d..2381915e 100644 --- a/backend/internal/service/claude_max_cache_billing_policy.go +++ b/backend/internal/service/claude_max_cache_billing_policy.go @@ -13,23 +13,6 @@ type claudeMaxCacheBillingOutcome struct { Simulated bool } -// detectClaudeMaxCacheBillingOutcomeForUsage only returns whether Claude Max policy -// should influence downstream override decisions. It does not mutate usage. -func detectClaudeMaxCacheBillingOutcomeForUsage(usage ClaudeUsage, parsed *ParsedRequest, group *Group, model string) claudeMaxCacheBillingOutcome { - var out claudeMaxCacheBillingOutcome - if !shouldApplyClaudeMaxBillingRulesForUsage(group, model, parsed) { - return out - } - if hasCacheCreationTokens(usage) { - // Upstream already returned cache creation usage; keep original usage. - return out - } - if shouldSimulateClaudeMaxUsageForUsage(usage, parsed) { - out.Simulated = true - } - return out -} - func applyClaudeMaxCacheBillingPolicyToUsage(usage *ClaudeUsage, parsed *ParsedRequest, group *Group, model string, accountID int64) claudeMaxCacheBillingOutcome { var out claudeMaxCacheBillingOutcome if usage == nil || !shouldApplyClaudeMaxBillingRulesForUsage(group, model, parsed) { From cc3cf1d70a2e09258fb84da2ac10b58fe0ba5d3d Mon Sep 17 00:00:00 2001 From: erio Date: Fri, 27 Feb 2026 21:02:24 +0800 Subject: [PATCH 112/175] chore: bump version to 0.1.86.10 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index 2276f1c7..5c05744f 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.86.9 +0.1.86.10 From 9555a99d1c936456154ae8924af9c8df479d6c5f Mon Sep 17 00:00:00 2001 From: erio Date: Fri, 27 Feb 2026 21:24:03 +0800 Subject: [PATCH 113/175] chore: bump version to 0.1.87.1 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index 5c05744f..4a5cf8ff 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.86.10 +0.1.87.1 From 14c80d26c6867a9be7ff01060ac393a944049af6 Mon Sep 17 00:00:00 2001 From: erio Date: Sat, 28 Feb 2026 10:37:25 +0800 Subject: [PATCH 114/175] fix: unify purchase url for iframe and new-tab --- .../views/user/PurchaseSubscriptionView.vue | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/frontend/src/views/user/PurchaseSubscriptionView.vue b/frontend/src/views/user/PurchaseSubscriptionView.vue index 55bcf307..f5612b96 100644 --- a/frontend/src/views/user/PurchaseSubscriptionView.vue +++ b/frontend/src/views/user/PurchaseSubscriptionView.vue @@ -80,11 +80,15 @@ import { computed, onMounted, ref } from 'vue' import { useI18n } from 'vue-i18n' import { useAppStore } from '@/stores' +import { useAuthStore } from '@/stores/auth' import AppLayout from '@/components/layout/AppLayout.vue' import Icon from '@/components/icons/Icon.vue' const { t } = useI18n() const appStore = useAppStore() +const authStore = useAuthStore() + +const PURCHASE_USER_ID_QUERY_KEY = 'user_id' const loading = ref(false) @@ -92,8 +96,21 @@ const purchaseEnabled = computed(() => { return appStore.cachedPublicSettings?.purchase_subscription_enabled ?? false }) +function buildPurchaseUrl(baseUrl: string, userId?: number): string { + if (!baseUrl || !userId) return baseUrl + try { + const url = new URL(baseUrl) + url.searchParams.set(PURCHASE_USER_ID_QUERY_KEY, String(userId)) + return url.toString() + } catch { + const separator = baseUrl.includes('?') ? '&' : '?' + return `${baseUrl}${separator}${PURCHASE_USER_ID_QUERY_KEY}=${encodeURIComponent(String(userId))}` + } +} + const purchaseUrl = computed(() => { - return (appStore.cachedPublicSettings?.purchase_subscription_url || '').trim() + const baseUrl = (appStore.cachedPublicSettings?.purchase_subscription_url || '').trim() + return buildPurchaseUrl(baseUrl, authStore.user?.id) }) const isValidUrl = computed(() => { From 94bdde32bbcd902a78c79d030e32b58300ed61fd Mon Sep 17 00:00:00 2001 From: erio Date: Sat, 28 Feb 2026 10:37:42 +0800 Subject: [PATCH 115/175] chore: bump version to 0.1.87.2 --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index 4a5cf8ff..f85978d8 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.87.1 +0.1.87.2 \ No newline at end of file From 65e0c1b25801617e2c4015d4c62e1b0ecffb85c7 Mon Sep 17 00:00:00 2001 From: erio Date: Sat, 28 Feb 2026 14:08:53 +0800 Subject: [PATCH 116/175] fix: pass theme to purchase iframe and optimize embed layout --- .../views/user/PurchaseSubscriptionView.vue | 89 ++++++++++++++++--- 1 file changed, 79 insertions(+), 10 deletions(-) diff --git a/frontend/src/views/user/PurchaseSubscriptionView.vue b/frontend/src/views/user/PurchaseSubscriptionView.vue index f5612b96..30f3893b 100644 --- a/frontend/src/views/user/PurchaseSubscriptionView.vue +++ b/frontend/src/views/user/PurchaseSubscriptionView.vue @@ -1,4 +1,4 @@ -