Files
sub2api/backend/internal/service/concurrency_service.go

324 lines
10 KiB
Go
Raw Normal View History

2025-12-18 13:50:39 +08:00
package service
import (
"context"
"crypto/rand"
"encoding/hex"
"fmt"
2025-12-18 13:50:39 +08:00
"log"
"time"
)
// ConcurrencyCache 定义并发控制的缓存接口
// 使用有序集合存储槽位,按时间戳清理过期条目
2025-12-25 17:15:01 +08:00
type ConcurrencyCache interface {
// 账号槽位管理
// 键格式: concurrency:account:{accountID}(有序集合,成员为 requestID
2025-12-25 17:15:01 +08:00
AcquireAccountSlot(ctx context.Context, accountID int64, maxConcurrency int, requestID string) (bool, error)
ReleaseAccountSlot(ctx context.Context, accountID int64, requestID string) error
GetAccountConcurrency(ctx context.Context, accountID int64) (int, error)
// 账号等待队列(账号级)
IncrementAccountWaitCount(ctx context.Context, accountID int64, maxWait int) (bool, error)
DecrementAccountWaitCount(ctx context.Context, accountID int64) error
GetAccountWaitingCount(ctx context.Context, accountID int64) (int, error)
// 用户槽位管理
// 键格式: concurrency:user:{userID}(有序集合,成员为 requestID
2025-12-25 17:15:01 +08:00
AcquireUserSlot(ctx context.Context, userID int64, maxConcurrency int, requestID string) (bool, error)
ReleaseUserSlot(ctx context.Context, userID int64, requestID string) error
GetUserConcurrency(ctx context.Context, userID int64) (int, error)
// 等待队列计数(只在首次创建时设置 TTL
2025-12-25 17:15:01 +08:00
IncrementWaitCount(ctx context.Context, userID int64, maxWait int) (bool, error)
DecrementWaitCount(ctx context.Context, userID int64) error
运维监控系统安全加固和功能优化 (#21) * fix(ops): 修复运维监控系统的关键安全和稳定性问题 ## 修复内容 ### P0 严重问题 1. **DNS Rebinding防护** (ops_alert_service.go) - 实现IP钉住机制防止验证后的DNS rebinding攻击 - 自定义Transport.DialContext强制只允许拨号到验证过的公网IP - 扩展IP黑名单,包括云metadata地址(169.254.169.254) - 添加完整的单元测试覆盖 2. **OpsAlertService生命周期管理** (wire.go) - 在ProvideOpsMetricsCollector中添加opsAlertService.Start()调用 - 确保stopCtx正确初始化,避免nil指针问题 - 实现防御式启动,保证服务启动顺序 3. **数据库查询排序** (ops_repo.go) - 在ListRecentSystemMetrics中添加显式ORDER BY updated_at DESC, id DESC - 在GetLatestSystemMetric中添加排序保证 - 避免数据库返回顺序不确定导致告警误判 ### P1 重要问题 4. **并发安全** (ops_metrics_collector.go) - 为lastGCPauseTotal字段添加sync.Mutex保护 - 防止数据竞争 5. **Goroutine泄漏** (ops_error_logger.go) - 实现worker pool模式限制并发goroutine数量 - 使用256容量缓冲队列和10个固定worker - 非阻塞投递,队列满时丢弃任务 6. **生命周期控制** (ops_alert_service.go) - 添加Start/Stop方法实现优雅关闭 - 使用context控制goroutine生命周期 - 实现WaitGroup等待后台任务完成 7. **Webhook URL验证** (ops_alert_service.go) - 防止SSRF攻击:验证scheme、禁止内网IP - DNS解析验证,拒绝解析到私有IP的域名 - 添加8个单元测试覆盖各种攻击场景 8. **资源泄漏** (ops_repo.go) - 修复多处defer rows.Close()问题 - 简化冗余的defer func()包装 9. **HTTP超时控制** (ops_alert_service.go) - 创建带10秒超时的http.Client - 添加buildWebhookHTTPClient辅助函数 - 防止HTTP请求无限期挂起 10. **数据库查询优化** (ops_repo.go) - 将GetWindowStats的4次独立查询合并为1次CTE查询 - 减少网络往返和表扫描次数 - 显著提升性能 11. **重试机制** (ops_alert_service.go) - 实现邮件发送重试:最多3次,指数退避(1s/2s/4s) - 添加webhook备用通道 - 实现完整的错误处理和日志记录 12. **魔法数字** (ops_repo.go, ops_metrics_collector.go) - 提取硬编码数字为有意义的常量 - 提高代码可读性和可维护性 ## 测试验证 - ✅ go test ./internal/service -tags opsalert_unit 通过 - ✅ 所有webhook验证测试通过 - ✅ 重试机制测试通过 ## 影响范围 - 运维监控系统安全性显著提升 - 系统稳定性和性能优化 - 无破坏性变更,向后兼容 * feat(ops): 运维监控系统V2 - 完整实现 ## 核心功能 - 运维监控仪表盘V2(实时监控、历史趋势、告警管理) - WebSocket实时QPS/TPS监控(30s心跳,自动重连) - 系统指标采集(CPU、内存、延迟、错误率等) - 多维度统计分析(按provider、model、user等维度) - 告警规则管理(阈值配置、通知渠道) - 错误日志追踪(详细错误信息、堆栈跟踪) ## 数据库Schema (Migration 025) ### 扩展现有表 - ops_system_metrics: 新增RED指标、错误分类、延迟指标、资源指标、业务指标 - ops_alert_rules: 新增JSONB字段(dimension_filters, notify_channels, notify_config) ### 新增表 - ops_dimension_stats: 多维度统计数据 - ops_data_retention_config: 数据保留策略配置 ### 新增视图和函数 - ops_latest_metrics: 最新1分钟窗口指标(已修复字段名和window过滤) - ops_active_alerts: 当前活跃告警(已修复字段名和状态值) - calculate_health_score: 健康分数计算函数 ## 一致性修复(98/100分) ### P0级别(阻塞Migration) - ✅ 修复ops_latest_metrics视图字段名(latency_p99→p99_latency_ms, cpu_usage→cpu_usage_percent) - ✅ 修复ops_active_alerts视图字段名(metric→metric_type, triggered_at→fired_at, trigger_value→metric_value, threshold→threshold_value) - ✅ 统一告警历史表名(删除ops_alert_history,使用ops_alert_events) - ✅ 统一API参数限制(ListMetricsHistory和ListErrorLogs的limit改为5000) ### P1级别(功能完整性) - ✅ 修复ops_latest_metrics视图未过滤window_minutes(添加WHERE m.window_minutes = 1) - ✅ 修复数据回填UPDATE逻辑(QPS计算改为request_count/(window_minutes*60.0)) - ✅ 添加ops_alert_rules JSONB字段后端支持(Go结构体+序列化) ### P2级别(优化) - ✅ 前端WebSocket自动重连(指数退避1s→2s→4s→8s→16s,最大5次) - ✅ 后端WebSocket心跳检测(30s ping,60s pong超时) ## 技术实现 ### 后端 (Go) - Handler层: ops_handler.go(REST API), ops_ws_handler.go(WebSocket) - Service层: ops_service.go(核心逻辑), ops_cache.go(缓存), ops_alerts.go(告警) - Repository层: ops_repo.go(数据访问), ops.go(模型定义) - 路由: admin.go(新增ops相关路由) - 依赖注入: wire_gen.go(自动生成) ### 前端 (Vue3 + TypeScript) - 组件: OpsDashboardV2.vue(仪表盘主组件) - API: ops.ts(REST API + WebSocket封装) - 路由: index.ts(新增/admin/ops路由) - 国际化: en.ts, zh.ts(中英文支持) ## 测试验证 - ✅ 所有Go测试通过 - ✅ Migration可正常执行 - ✅ WebSocket连接稳定 - ✅ 前后端数据结构对齐 * refactor: 代码清理和测试优化 ## 测试文件优化 - 简化integration test fixtures和断言 - 优化test helper函数 - 统一测试数据格式 ## 代码清理 - 移除未使用的代码和注释 - 简化concurrency_cache实现 - 优化middleware错误处理 ## 小修复 - 修复gateway_handler和openai_gateway_handler的小问题 - 统一代码风格和格式 变更统计: 27个文件,292行新增,322行删除(净减少30行) * fix(ops): 运维监控系统安全加固和功能优化 ## 安全增强 - feat(security): WebSocket日志脱敏机制,防止token/api_key泄露 - feat(security): X-Forwarded-Host白名单验证,防止CSRF绕过 - feat(security): Origin策略配置化,支持strict/permissive模式 - feat(auth): WebSocket认证支持query参数传递token ## 配置优化 - feat(config): 支持环境变量配置代理信任和Origin策略 - OPS_WS_TRUST_PROXY - OPS_WS_TRUSTED_PROXIES - OPS_WS_ORIGIN_POLICY - fix(ops): 错误日志查询限流从5000降至500,优化内存使用 ## 架构改进 - refactor(ops): 告警服务解耦,独立运行评估定时器 - refactor(ops): OpsDashboard统一版本,移除V2分离 ## 测试和文档 - test(ops): 添加WebSocket安全验证单元测试(8个测试用例) - test(ops): 添加告警服务集成测试 - docs(api): 更新API文档,标注限流变更 - docs: 添加CHANGELOG记录breaking changes ## 修复文件 Backend: - backend/internal/server/middleware/logger.go - backend/internal/handler/admin/ops_handler.go - backend/internal/handler/admin/ops_ws_handler.go - backend/internal/server/middleware/admin_auth.go - backend/internal/service/ops_alert_service.go - backend/internal/service/ops_metrics_collector.go - backend/internal/service/wire.go Frontend: - frontend/src/views/admin/ops/OpsDashboard.vue - frontend/src/router/index.ts - frontend/src/api/admin/ops.ts Tests: - backend/internal/handler/admin/ops_ws_handler_test.go (新增) - backend/internal/service/ops_alert_service_integration_test.go (新增) Docs: - CHANGELOG.md (新增) - docs/API-运维监控中心2.0.md (更新) * fix(migrations): 修复calculate_health_score函数类型匹配问题 在ops_latest_metrics视图中添加显式类型转换,确保参数类型与函数签名匹配 * fix(lint): 修复golangci-lint检查发现的所有问题 - 将Redis依赖从service层移到repository层 - 添加错误检查(WebSocket连接和读取超时) - 运行gofmt格式化代码 - 添加nil指针检查 - 删除未使用的alertService字段 修复问题: - depguard: 3个(service层不应直接import redis) - errcheck: 3个(未检查错误返回值) - gofmt: 2个(代码格式问题) - staticcheck: 4个(nil指针解引用) - unused: 1个(未使用字段) 代码统计: - 修改文件:11个 - 删除代码:490行 - 新增代码:105行 - 净减少:385行
2026-01-02 20:01:12 +08:00
GetTotalWaitCount(ctx context.Context) (int, error)
// 批量负载查询(只读)
GetAccountsLoadBatch(ctx context.Context, accounts []AccountWithConcurrency) (map[int64]*AccountLoadInfo, error)
// 清理过期槽位(后台任务)
CleanupExpiredAccountSlots(ctx context.Context, accountID int64) error
2025-12-25 17:15:01 +08:00
}
// generateRequestID generates a unique request ID for concurrency slot tracking
// Uses 8 random bytes (16 hex chars) for uniqueness
func generateRequestID() string {
b := make([]byte, 8)
if _, err := rand.Read(b); err != nil {
// Fallback to nanosecond timestamp (extremely rare case)
return fmt.Sprintf("%x", time.Now().UnixNano())
}
return hex.EncodeToString(b)
}
2025-12-18 13:50:39 +08:00
const (
// Default extra wait slots beyond concurrency limit
defaultExtraWaitSlots = 20
)
// ConcurrencyService manages concurrent request limiting for accounts and users
type ConcurrencyService struct {
2025-12-25 17:15:01 +08:00
cache ConcurrencyCache
2025-12-18 13:50:39 +08:00
}
// NewConcurrencyService creates a new ConcurrencyService
2025-12-25 17:15:01 +08:00
func NewConcurrencyService(cache ConcurrencyCache) *ConcurrencyService {
return &ConcurrencyService{cache: cache}
2025-12-18 13:50:39 +08:00
}
// AcquireResult represents the result of acquiring a concurrency slot
type AcquireResult struct {
Acquired bool
2025-12-18 13:50:39 +08:00
ReleaseFunc func() // Must be called when done (typically via defer)
}
type AccountWithConcurrency struct {
ID int64
MaxConcurrency int
}
type AccountLoadInfo struct {
AccountID int64
CurrentConcurrency int
WaitingCount int
LoadRate int // 0-100+ (percent)
}
2025-12-18 13:50:39 +08:00
// AcquireAccountSlot attempts to acquire a concurrency slot for an account.
// If the account is at max concurrency, it waits until a slot is available or timeout.
// Returns a release function that MUST be called when the request completes.
func (s *ConcurrencyService) AcquireAccountSlot(ctx context.Context, accountID int64, maxConcurrency int) (*AcquireResult, error) {
// If maxConcurrency is 0 or negative, no limit
if maxConcurrency <= 0 {
return &AcquireResult{
Acquired: true,
ReleaseFunc: func() {}, // no-op
}, nil
}
// Generate unique request ID for this slot
requestID := generateRequestID()
acquired, err := s.cache.AcquireAccountSlot(ctx, accountID, maxConcurrency, requestID)
2025-12-18 13:50:39 +08:00
if err != nil {
return nil, err
}
if acquired {
return &AcquireResult{
Acquired: true,
ReleaseFunc: func() {
bgCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err := s.cache.ReleaseAccountSlot(bgCtx, accountID, requestID); err != nil {
log.Printf("Warning: failed to release account slot for %d (req=%s): %v", accountID, requestID, err)
}
},
2025-12-18 13:50:39 +08:00
}, nil
}
return &AcquireResult{
Acquired: false,
ReleaseFunc: nil,
}, nil
}
// AcquireUserSlot attempts to acquire a concurrency slot for a user.
// If the user is at max concurrency, it waits until a slot is available or timeout.
// Returns a release function that MUST be called when the request completes.
func (s *ConcurrencyService) AcquireUserSlot(ctx context.Context, userID int64, maxConcurrency int) (*AcquireResult, error) {
// If maxConcurrency is 0 or negative, no limit
if maxConcurrency <= 0 {
return &AcquireResult{
Acquired: true,
ReleaseFunc: func() {}, // no-op
}, nil
2025-12-18 13:50:39 +08:00
}
// Generate unique request ID for this slot
requestID := generateRequestID()
acquired, err := s.cache.AcquireUserSlot(ctx, userID, maxConcurrency, requestID)
2025-12-18 13:50:39 +08:00
if err != nil {
return nil, err
2025-12-18 13:50:39 +08:00
}
if acquired {
return &AcquireResult{
Acquired: true,
ReleaseFunc: func() {
bgCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err := s.cache.ReleaseUserSlot(bgCtx, userID, requestID); err != nil {
log.Printf("Warning: failed to release user slot for %d (req=%s): %v", userID, requestID, err)
}
},
}, nil
}
2025-12-18 13:50:39 +08:00
return &AcquireResult{
Acquired: false,
ReleaseFunc: nil,
}, nil
2025-12-18 13:50:39 +08:00
}
// ============================================
// Wait Queue Count Methods
// ============================================
// IncrementWaitCount attempts to increment the wait queue counter for a user.
// Returns true if successful, false if the wait queue is full.
// maxWait should be user.Concurrency + defaultExtraWaitSlots
func (s *ConcurrencyService) IncrementWaitCount(ctx context.Context, userID int64, maxWait int) (bool, error) {
if s.cache == nil {
2025-12-18 13:50:39 +08:00
// Redis not available, allow request
return true, nil
}
result, err := s.cache.IncrementWaitCount(ctx, userID, maxWait)
2025-12-18 13:50:39 +08:00
if err != nil {
// On error, allow the request to proceed (fail open)
log.Printf("Warning: increment wait count failed for user %d: %v", userID, err)
return true, nil
}
return result, nil
2025-12-18 13:50:39 +08:00
}
// DecrementWaitCount decrements the wait queue counter for a user.
// Should be called when a request completes or exits the wait queue.
func (s *ConcurrencyService) DecrementWaitCount(ctx context.Context, userID int64) {
if s.cache == nil {
2025-12-18 13:50:39 +08:00
return
}
// Use background context to ensure decrement even if original context is cancelled
bgCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err := s.cache.DecrementWaitCount(bgCtx, userID); err != nil {
2025-12-18 13:50:39 +08:00
log.Printf("Warning: decrement wait count failed for user %d: %v", userID, err)
}
}
运维监控系统安全加固和功能优化 (#21) * fix(ops): 修复运维监控系统的关键安全和稳定性问题 ## 修复内容 ### P0 严重问题 1. **DNS Rebinding防护** (ops_alert_service.go) - 实现IP钉住机制防止验证后的DNS rebinding攻击 - 自定义Transport.DialContext强制只允许拨号到验证过的公网IP - 扩展IP黑名单,包括云metadata地址(169.254.169.254) - 添加完整的单元测试覆盖 2. **OpsAlertService生命周期管理** (wire.go) - 在ProvideOpsMetricsCollector中添加opsAlertService.Start()调用 - 确保stopCtx正确初始化,避免nil指针问题 - 实现防御式启动,保证服务启动顺序 3. **数据库查询排序** (ops_repo.go) - 在ListRecentSystemMetrics中添加显式ORDER BY updated_at DESC, id DESC - 在GetLatestSystemMetric中添加排序保证 - 避免数据库返回顺序不确定导致告警误判 ### P1 重要问题 4. **并发安全** (ops_metrics_collector.go) - 为lastGCPauseTotal字段添加sync.Mutex保护 - 防止数据竞争 5. **Goroutine泄漏** (ops_error_logger.go) - 实现worker pool模式限制并发goroutine数量 - 使用256容量缓冲队列和10个固定worker - 非阻塞投递,队列满时丢弃任务 6. **生命周期控制** (ops_alert_service.go) - 添加Start/Stop方法实现优雅关闭 - 使用context控制goroutine生命周期 - 实现WaitGroup等待后台任务完成 7. **Webhook URL验证** (ops_alert_service.go) - 防止SSRF攻击:验证scheme、禁止内网IP - DNS解析验证,拒绝解析到私有IP的域名 - 添加8个单元测试覆盖各种攻击场景 8. **资源泄漏** (ops_repo.go) - 修复多处defer rows.Close()问题 - 简化冗余的defer func()包装 9. **HTTP超时控制** (ops_alert_service.go) - 创建带10秒超时的http.Client - 添加buildWebhookHTTPClient辅助函数 - 防止HTTP请求无限期挂起 10. **数据库查询优化** (ops_repo.go) - 将GetWindowStats的4次独立查询合并为1次CTE查询 - 减少网络往返和表扫描次数 - 显著提升性能 11. **重试机制** (ops_alert_service.go) - 实现邮件发送重试:最多3次,指数退避(1s/2s/4s) - 添加webhook备用通道 - 实现完整的错误处理和日志记录 12. **魔法数字** (ops_repo.go, ops_metrics_collector.go) - 提取硬编码数字为有意义的常量 - 提高代码可读性和可维护性 ## 测试验证 - ✅ go test ./internal/service -tags opsalert_unit 通过 - ✅ 所有webhook验证测试通过 - ✅ 重试机制测试通过 ## 影响范围 - 运维监控系统安全性显著提升 - 系统稳定性和性能优化 - 无破坏性变更,向后兼容 * feat(ops): 运维监控系统V2 - 完整实现 ## 核心功能 - 运维监控仪表盘V2(实时监控、历史趋势、告警管理) - WebSocket实时QPS/TPS监控(30s心跳,自动重连) - 系统指标采集(CPU、内存、延迟、错误率等) - 多维度统计分析(按provider、model、user等维度) - 告警规则管理(阈值配置、通知渠道) - 错误日志追踪(详细错误信息、堆栈跟踪) ## 数据库Schema (Migration 025) ### 扩展现有表 - ops_system_metrics: 新增RED指标、错误分类、延迟指标、资源指标、业务指标 - ops_alert_rules: 新增JSONB字段(dimension_filters, notify_channels, notify_config) ### 新增表 - ops_dimension_stats: 多维度统计数据 - ops_data_retention_config: 数据保留策略配置 ### 新增视图和函数 - ops_latest_metrics: 最新1分钟窗口指标(已修复字段名和window过滤) - ops_active_alerts: 当前活跃告警(已修复字段名和状态值) - calculate_health_score: 健康分数计算函数 ## 一致性修复(98/100分) ### P0级别(阻塞Migration) - ✅ 修复ops_latest_metrics视图字段名(latency_p99→p99_latency_ms, cpu_usage→cpu_usage_percent) - ✅ 修复ops_active_alerts视图字段名(metric→metric_type, triggered_at→fired_at, trigger_value→metric_value, threshold→threshold_value) - ✅ 统一告警历史表名(删除ops_alert_history,使用ops_alert_events) - ✅ 统一API参数限制(ListMetricsHistory和ListErrorLogs的limit改为5000) ### P1级别(功能完整性) - ✅ 修复ops_latest_metrics视图未过滤window_minutes(添加WHERE m.window_minutes = 1) - ✅ 修复数据回填UPDATE逻辑(QPS计算改为request_count/(window_minutes*60.0)) - ✅ 添加ops_alert_rules JSONB字段后端支持(Go结构体+序列化) ### P2级别(优化) - ✅ 前端WebSocket自动重连(指数退避1s→2s→4s→8s→16s,最大5次) - ✅ 后端WebSocket心跳检测(30s ping,60s pong超时) ## 技术实现 ### 后端 (Go) - Handler层: ops_handler.go(REST API), ops_ws_handler.go(WebSocket) - Service层: ops_service.go(核心逻辑), ops_cache.go(缓存), ops_alerts.go(告警) - Repository层: ops_repo.go(数据访问), ops.go(模型定义) - 路由: admin.go(新增ops相关路由) - 依赖注入: wire_gen.go(自动生成) ### 前端 (Vue3 + TypeScript) - 组件: OpsDashboardV2.vue(仪表盘主组件) - API: ops.ts(REST API + WebSocket封装) - 路由: index.ts(新增/admin/ops路由) - 国际化: en.ts, zh.ts(中英文支持) ## 测试验证 - ✅ 所有Go测试通过 - ✅ Migration可正常执行 - ✅ WebSocket连接稳定 - ✅ 前后端数据结构对齐 * refactor: 代码清理和测试优化 ## 测试文件优化 - 简化integration test fixtures和断言 - 优化test helper函数 - 统一测试数据格式 ## 代码清理 - 移除未使用的代码和注释 - 简化concurrency_cache实现 - 优化middleware错误处理 ## 小修复 - 修复gateway_handler和openai_gateway_handler的小问题 - 统一代码风格和格式 变更统计: 27个文件,292行新增,322行删除(净减少30行) * fix(ops): 运维监控系统安全加固和功能优化 ## 安全增强 - feat(security): WebSocket日志脱敏机制,防止token/api_key泄露 - feat(security): X-Forwarded-Host白名单验证,防止CSRF绕过 - feat(security): Origin策略配置化,支持strict/permissive模式 - feat(auth): WebSocket认证支持query参数传递token ## 配置优化 - feat(config): 支持环境变量配置代理信任和Origin策略 - OPS_WS_TRUST_PROXY - OPS_WS_TRUSTED_PROXIES - OPS_WS_ORIGIN_POLICY - fix(ops): 错误日志查询限流从5000降至500,优化内存使用 ## 架构改进 - refactor(ops): 告警服务解耦,独立运行评估定时器 - refactor(ops): OpsDashboard统一版本,移除V2分离 ## 测试和文档 - test(ops): 添加WebSocket安全验证单元测试(8个测试用例) - test(ops): 添加告警服务集成测试 - docs(api): 更新API文档,标注限流变更 - docs: 添加CHANGELOG记录breaking changes ## 修复文件 Backend: - backend/internal/server/middleware/logger.go - backend/internal/handler/admin/ops_handler.go - backend/internal/handler/admin/ops_ws_handler.go - backend/internal/server/middleware/admin_auth.go - backend/internal/service/ops_alert_service.go - backend/internal/service/ops_metrics_collector.go - backend/internal/service/wire.go Frontend: - frontend/src/views/admin/ops/OpsDashboard.vue - frontend/src/router/index.ts - frontend/src/api/admin/ops.ts Tests: - backend/internal/handler/admin/ops_ws_handler_test.go (新增) - backend/internal/service/ops_alert_service_integration_test.go (新增) Docs: - CHANGELOG.md (新增) - docs/API-运维监控中心2.0.md (更新) * fix(migrations): 修复calculate_health_score函数类型匹配问题 在ops_latest_metrics视图中添加显式类型转换,确保参数类型与函数签名匹配 * fix(lint): 修复golangci-lint检查发现的所有问题 - 将Redis依赖从service层移到repository层 - 添加错误检查(WebSocket连接和读取超时) - 运行gofmt格式化代码 - 添加nil指针检查 - 删除未使用的alertService字段 修复问题: - depguard: 3个(service层不应直接import redis) - errcheck: 3个(未检查错误返回值) - gofmt: 2个(代码格式问题) - staticcheck: 4个(nil指针解引用) - unused: 1个(未使用字段) 代码统计: - 修改文件:11个 - 删除代码:490行 - 新增代码:105行 - 净减少:385行
2026-01-02 20:01:12 +08:00
// GetTotalWaitCount returns the total wait queue depth across users.
func (s *ConcurrencyService) GetTotalWaitCount(ctx context.Context) (int, error) {
if s.cache == nil {
return 0, nil
}
return s.cache.GetTotalWaitCount(ctx)
}
// IncrementAccountWaitCount increments the wait queue counter for an account.
func (s *ConcurrencyService) IncrementAccountWaitCount(ctx context.Context, accountID int64, maxWait int) (bool, error) {
if s.cache == nil {
return true, nil
}
result, err := s.cache.IncrementAccountWaitCount(ctx, accountID, maxWait)
if err != nil {
log.Printf("Warning: increment wait count failed for account %d: %v", accountID, err)
return true, nil
}
return result, nil
}
// DecrementAccountWaitCount decrements the wait queue counter for an account.
func (s *ConcurrencyService) DecrementAccountWaitCount(ctx context.Context, accountID int64) {
if s.cache == nil {
return
}
bgCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err := s.cache.DecrementAccountWaitCount(bgCtx, accountID); err != nil {
log.Printf("Warning: decrement wait count failed for account %d: %v", accountID, err)
}
}
// GetAccountWaitingCount gets current wait queue count for an account.
func (s *ConcurrencyService) GetAccountWaitingCount(ctx context.Context, accountID int64) (int, error) {
if s.cache == nil {
return 0, nil
}
return s.cache.GetAccountWaitingCount(ctx, accountID)
}
2025-12-18 13:50:39 +08:00
// CalculateMaxWait calculates the maximum wait queue size for a user
// maxWait = userConcurrency + defaultExtraWaitSlots
func CalculateMaxWait(userConcurrency int) int {
if userConcurrency <= 0 {
userConcurrency = 1
}
return userConcurrency + defaultExtraWaitSlots
}
// GetAccountsLoadBatch returns load info for multiple accounts.
func (s *ConcurrencyService) GetAccountsLoadBatch(ctx context.Context, accounts []AccountWithConcurrency) (map[int64]*AccountLoadInfo, error) {
if s.cache == nil {
return map[int64]*AccountLoadInfo{}, nil
}
return s.cache.GetAccountsLoadBatch(ctx, accounts)
}
// CleanupExpiredAccountSlots removes expired slots for one account (background task).
func (s *ConcurrencyService) CleanupExpiredAccountSlots(ctx context.Context, accountID int64) error {
if s.cache == nil {
return nil
}
return s.cache.CleanupExpiredAccountSlots(ctx, accountID)
}
// StartSlotCleanupWorker starts a background cleanup worker for expired account slots.
func (s *ConcurrencyService) StartSlotCleanupWorker(accountRepo AccountRepository, interval time.Duration) {
if s == nil || s.cache == nil || accountRepo == nil || interval <= 0 {
return
}
runCleanup := func() {
listCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
accounts, err := accountRepo.ListSchedulable(listCtx)
cancel()
if err != nil {
log.Printf("Warning: list schedulable accounts failed: %v", err)
return
}
for _, account := range accounts {
accountCtx, accountCancel := context.WithTimeout(context.Background(), 2*time.Second)
err := s.cache.CleanupExpiredAccountSlots(accountCtx, account.ID)
accountCancel()
if err != nil {
log.Printf("Warning: cleanup expired slots failed for account %d: %v", account.ID, err)
}
}
}
go func() {
ticker := time.NewTicker(interval)
defer ticker.Stop()
runCleanup()
for range ticker.C {
runCleanup()
}
}()
}
// GetAccountConcurrencyBatch gets current concurrency counts for multiple accounts
// Returns a map of accountID -> current concurrency count
func (s *ConcurrencyService) GetAccountConcurrencyBatch(ctx context.Context, accountIDs []int64) (map[int64]int, error) {
result := make(map[int64]int)
for _, accountID := range accountIDs {
count, err := s.cache.GetAccountConcurrency(ctx, accountID)
if err != nil {
// If key doesn't exist in Redis, count is 0
count = 0
}
result[accountID] = count
}
return result, nil
}