Merge tag 'v0.1.90' into merge/upstream-v0.1.90

注册邮箱域名白名单策略上线,后台大数据场景性能大幅优化。

- 注册邮箱域名白名单:支持管理员配置允许注册的邮箱域名策略
- Keys 页面表单筛选:用户 /keys 页面支持按条件筛选 API Key
- Settings 页面分 Tab 拆分:管理后台设置页面按功能模块分 Tab 展示

- 后台大数据场景加载性能优化:仪表盘/用户/账号/Ops 页面大数据集加载显著提速
- Usage 大表分页优化:默认避免全量 COUNT(*),大幅降低分页查询耗时
- 消除重复的 normalizeAccountIDList,补充新增组件的单元测试
- 清理无用文件和过时文档,精简项目结构
- EmailVerifyView 硬编码英文字符串替换为 i18n 调用

- 修复 Anthropic 平台无限流重置时间的 429 误标记账号限流问题
- 修复自定义菜单页面管理员视角菜单不生效问题
- 修复 Ops 错误详情弹窗未展示真实上游 payload 的问题
- 修复充值/订阅菜单 icon 显示问题

# Conflicts:
#	.gitignore
#	backend/cmd/server/VERSION
#	backend/ent/group.go
#	backend/ent/runtime/runtime.go
#	backend/ent/schema/group.go
#	backend/go.sum
#	backend/internal/handler/admin/account_handler.go
#	backend/internal/handler/admin/dashboard_handler.go
#	backend/internal/pkg/usagestats/usage_log_types.go
#	backend/internal/repository/group_repo.go
#	backend/internal/repository/usage_log_repo.go
#	backend/internal/server/middleware/security_headers.go
#	backend/internal/server/router.go
#	backend/internal/service/account_usage_service.go
#	backend/internal/service/admin_service_bulk_update_test.go
#	backend/internal/service/dashboard_service.go
#	backend/internal/service/gateway_service.go
#	frontend/src/api/admin/dashboard.ts
#	frontend/src/components/account/BulkEditAccountModal.vue
#	frontend/src/components/charts/GroupDistributionChart.vue
#	frontend/src/components/layout/AppSidebar.vue
#	frontend/src/i18n/locales/en.ts
#	frontend/src/i18n/locales/zh.ts
#	frontend/src/views/admin/GroupsView.vue
#	frontend/src/views/admin/SettingsView.vue
#	frontend/src/views/admin/UsageView.vue
#	frontend/src/views/user/PurchaseSubscriptionView.vue
This commit is contained in:
erio
2026-03-04 19:58:38 +08:00
461 changed files with 63392 additions and 6617 deletions

View File

@@ -43,4 +43,4 @@ SET credentials = jsonb_set(
)
WHERE platform = 'antigravity'
AND deleted_at IS NULL
AND credentials->'model_mapping' IS NOT NULL;
AND credentials->'model_mapping' IS NOT NULL;

View File

@@ -0,0 +1,2 @@
-- Add openai_ws_mode flag to usage_logs to persist exact OpenAI WS transport type.
ALTER TABLE usage_logs ADD COLUMN IF NOT EXISTS openai_ws_mode BOOLEAN NOT NULL DEFAULT FALSE;

View File

@@ -0,0 +1,65 @@
-- Add request_type enum for usage_logs while keeping legacy stream/openai_ws_mode compatibility.
ALTER TABLE usage_logs
ADD COLUMN IF NOT EXISTS request_type SMALLINT NOT NULL DEFAULT 0;
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1
FROM pg_constraint
WHERE conname = 'usage_logs_request_type_check'
) THEN
ALTER TABLE usage_logs
ADD CONSTRAINT usage_logs_request_type_check
CHECK (request_type IN (0, 1, 2, 3));
END IF;
END
$$;
CREATE INDEX IF NOT EXISTS idx_usage_logs_request_type_created_at
ON usage_logs (request_type, created_at);
-- Backfill from legacy fields in bounded batches.
-- Why bounded:
-- 1) Full-table UPDATE on large usage_logs can block startup for a long time.
-- 2) request_type=0 rows remain query-compatible via legacy fallback logic
-- (stream/openai_ws_mode) in repository filters.
-- 3) Subsequent writes will use explicit request_type and gradually dilute
-- historical unknown rows.
--
-- openai_ws_mode has higher priority than stream.
DO $$
DECLARE
v_rows INTEGER := 0;
v_total_rows INTEGER := 0;
v_batch_size INTEGER := 5000;
v_started_at TIMESTAMPTZ := clock_timestamp();
v_max_duration INTERVAL := INTERVAL '8 seconds';
BEGIN
LOOP
WITH batch AS (
SELECT id
FROM usage_logs
WHERE request_type = 0
ORDER BY id
LIMIT v_batch_size
)
UPDATE usage_logs ul
SET request_type = CASE
WHEN ul.openai_ws_mode = TRUE THEN 3
WHEN ul.stream = TRUE THEN 2
ELSE 1
END
FROM batch
WHERE ul.id = batch.id;
GET DIAGNOSTICS v_rows = ROW_COUNT;
EXIT WHEN v_rows = 0;
v_total_rows := v_total_rows + v_rows;
EXIT WHEN clock_timestamp() - v_started_at >= v_max_duration;
END LOOP;
RAISE NOTICE 'usage_logs.request_type startup backfill rows=%', v_total_rows;
END
$$;

View File

@@ -0,0 +1,15 @@
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_accounts_schedulable_hot
ON accounts (platform, priority)
WHERE deleted_at IS NULL AND status = 'active' AND schedulable = true;
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_accounts_active_schedulable
ON accounts (priority, status)
WHERE deleted_at IS NULL AND schedulable = true;
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_user_subscriptions_user_status_expires_active
ON user_subscriptions (user_id, status, expires_at)
WHERE deleted_at IS NULL;
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_usage_logs_group_created_at_not_null
ON usage_logs (group_id, created_at)
WHERE group_id IS NOT NULL;

View File

@@ -0,0 +1,56 @@
-- Migration: 063_add_sora_client_tables
-- Sora 客户端功能所需的数据库变更:
-- 1. 新增 sora_generations 表:记录 Sora 客户端 UI 的生成历史
-- 2. users 表新增存储配额字段
-- 3. groups 表新增存储配额字段
-- ============================================================
-- 1. sora_generations 表(生成记录)
-- ============================================================
CREATE TABLE IF NOT EXISTS sora_generations (
id BIGSERIAL PRIMARY KEY,
user_id BIGINT NOT NULL REFERENCES users(id) ON DELETE CASCADE,
api_key_id BIGINT,
-- 生成参数
model VARCHAR(64) NOT NULL,
prompt TEXT NOT NULL DEFAULT '',
media_type VARCHAR(16) NOT NULL DEFAULT 'video', -- video / image
-- 结果
status VARCHAR(16) NOT NULL DEFAULT 'pending', -- pending / generating / completed / failed / cancelled
media_url TEXT NOT NULL DEFAULT '',
media_urls JSONB, -- 多图时的 URL 数组
file_size_bytes BIGINT NOT NULL DEFAULT 0,
storage_type VARCHAR(16) NOT NULL DEFAULT 'none', -- s3 / local / upstream / none
s3_object_keys JSONB, -- S3 object key 数组
-- 上游信息
upstream_task_id VARCHAR(128) NOT NULL DEFAULT '',
error_message TEXT NOT NULL DEFAULT '',
-- 时间
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
completed_at TIMESTAMPTZ
);
-- 按用户+时间查询(作品库列表、历史记录)
CREATE INDEX IF NOT EXISTS idx_sora_gen_user_created
ON sora_generations(user_id, created_at DESC);
-- 按用户+状态查询(恢复进行中任务)
CREATE INDEX IF NOT EXISTS idx_sora_gen_user_status
ON sora_generations(user_id, status);
-- ============================================================
-- 2. users 表新增 Sora 存储配额字段
-- ============================================================
ALTER TABLE users
ADD COLUMN IF NOT EXISTS sora_storage_quota_bytes BIGINT NOT NULL DEFAULT 0,
ADD COLUMN IF NOT EXISTS sora_storage_used_bytes BIGINT NOT NULL DEFAULT 0;
-- ============================================================
-- 3. groups 表新增 Sora 存储配额字段
-- ============================================================
ALTER TABLE groups
ADD COLUMN IF NOT EXISTS sora_storage_quota_bytes BIGINT NOT NULL DEFAULT 0;

View File

@@ -0,0 +1,15 @@
-- Add rate limit fields to api_keys table
-- Rate limit configuration (0 = unlimited)
ALTER TABLE api_keys ADD COLUMN IF NOT EXISTS rate_limit_5h decimal(20,8) NOT NULL DEFAULT 0;
ALTER TABLE api_keys ADD COLUMN IF NOT EXISTS rate_limit_1d decimal(20,8) NOT NULL DEFAULT 0;
ALTER TABLE api_keys ADD COLUMN IF NOT EXISTS rate_limit_7d decimal(20,8) NOT NULL DEFAULT 0;
-- Rate limit usage tracking
ALTER TABLE api_keys ADD COLUMN IF NOT EXISTS usage_5h decimal(20,8) NOT NULL DEFAULT 0;
ALTER TABLE api_keys ADD COLUMN IF NOT EXISTS usage_1d decimal(20,8) NOT NULL DEFAULT 0;
ALTER TABLE api_keys ADD COLUMN IF NOT EXISTS usage_7d decimal(20,8) NOT NULL DEFAULT 0;
-- Window start times (nullable)
ALTER TABLE api_keys ADD COLUMN IF NOT EXISTS window_5h_start timestamptz;
ALTER TABLE api_keys ADD COLUMN IF NOT EXISTS window_1d_start timestamptz;
ALTER TABLE api_keys ADD COLUMN IF NOT EXISTS window_7d_start timestamptz;

View File

@@ -0,0 +1,33 @@
-- Improve admin fuzzy-search performance on large datasets.
-- Best effort:
-- 1) try enabling pg_trgm
-- 2) only create trigram indexes when extension is available
DO $$
BEGIN
BEGIN
CREATE EXTENSION IF NOT EXISTS pg_trgm;
EXCEPTION
WHEN OTHERS THEN
RAISE NOTICE 'pg_trgm extension not created: %', SQLERRM;
END;
IF EXISTS (SELECT 1 FROM pg_extension WHERE extname = 'pg_trgm') THEN
EXECUTE 'CREATE INDEX IF NOT EXISTS idx_users_email_trgm
ON users USING gin (email gin_trgm_ops)';
EXECUTE 'CREATE INDEX IF NOT EXISTS idx_users_username_trgm
ON users USING gin (username gin_trgm_ops)';
EXECUTE 'CREATE INDEX IF NOT EXISTS idx_users_notes_trgm
ON users USING gin (notes gin_trgm_ops)';
EXECUTE 'CREATE INDEX IF NOT EXISTS idx_accounts_name_trgm
ON accounts USING gin (name gin_trgm_ops)';
EXECUTE 'CREATE INDEX IF NOT EXISTS idx_api_keys_key_trgm
ON api_keys USING gin ("key" gin_trgm_ops)';
EXECUTE 'CREATE INDEX IF NOT EXISTS idx_api_keys_name_trgm
ON api_keys USING gin (name gin_trgm_ops)';
ELSE
RAISE NOTICE 'skip trigram indexes because pg_trgm is unavailable';
END IF;
END
$$;

View File

@@ -12,6 +12,26 @@ Format: `NNN_description.sql`
Example: `017_add_gemini_tier_id.sql`
### `_notx.sql` 命名与执行语义(并发索引专用)
当迁移包含 `CREATE INDEX CONCURRENTLY``DROP INDEX CONCURRENTLY` 时,必须使用 `_notx.sql` 后缀,例如:
- `062_add_accounts_priority_indexes_notx.sql`
- `063_drop_legacy_indexes_notx.sql`
运行规则:
1. `*.sql`(不带 `_notx`)按事务执行。
2. `*_notx.sql` 按非事务执行,不会包裹在 `BEGIN/COMMIT` 中。
3. `*_notx.sql` 仅允许并发索引语句,不允许混入事务控制语句或其他 DDL/DML。
幂等要求(必须):
- 创建索引:`CREATE INDEX CONCURRENTLY IF NOT EXISTS ...`
- 删除索引:`DROP INDEX CONCURRENTLY IF EXISTS ...`
这样可以保证灾备重放、重复执行时不会因对象已存在/不存在而失败。
## Migration File Structure
```sql