2025-12-18 13:50:39 +08:00
package repository
import (
"context"
2025-12-29 10:03:27 +08:00
"database/sql"
2026-01-03 06:36:35 -08:00
"errors"
2025-12-29 10:03:27 +08:00
"fmt"
2025-12-31 10:17:38 +08:00
"os"
2025-12-29 10:03:27 +08:00
"strings"
2025-12-25 20:52:47 +08:00
"time"
2025-12-29 10:03:27 +08:00
dbent "github.com/Wei-Shaw/sub2api/ent"
dbaccount "github.com/Wei-Shaw/sub2api/ent/account"
dbapikey "github.com/Wei-Shaw/sub2api/ent/apikey"
dbgroup "github.com/Wei-Shaw/sub2api/ent/group"
dbuser "github.com/Wei-Shaw/sub2api/ent/user"
dbusersub "github.com/Wei-Shaw/sub2api/ent/usersubscription"
2025-12-24 21:07:21 +08:00
"github.com/Wei-Shaw/sub2api/internal/pkg/pagination"
"github.com/Wei-Shaw/sub2api/internal/pkg/timezone"
"github.com/Wei-Shaw/sub2api/internal/pkg/usagestats"
2025-12-29 10:03:27 +08:00
"github.com/Wei-Shaw/sub2api/internal/service"
"github.com/lib/pq"
2025-12-18 13:50:39 +08:00
)
2026-03-08 23:22:28 +08:00
const usageLogSelectColumns = "id, user_id, api_key_id, account_id, request_id, model, group_id, subscription_id, input_tokens, output_tokens, cache_creation_tokens, cache_read_tokens, cache_creation_5m_tokens, cache_creation_1h_tokens, input_cost, output_cost, cache_creation_cost, cache_read_cost, total_cost, actual_cost, rate_multiplier, account_rate_multiplier, billing_type, request_type, stream, openai_ws_mode, duration_ms, first_token_ms, user_agent, ip_address, image_count, image_size, media_type, service_tier, reasoning_effort, cache_ttl_overridden, created_at"
2025-12-29 10:03:27 +08:00
2026-02-07 19:46:42 +08:00
// dateFormatWhitelist 将 granularity 参数映射为 PostgreSQL TO_CHAR 格式字符串,防止外部输入直接拼入 SQL
var dateFormatWhitelist = map [ string ] string {
"hour" : "YYYY-MM-DD HH24:00" ,
"day" : "YYYY-MM-DD" ,
"week" : "IYYY-IW" ,
"month" : "YYYY-MM" ,
}
// safeDateFormat 根据白名单获取 dateFormat, 未匹配时返回默认值
func safeDateFormat ( granularity string ) string {
if f , ok := dateFormatWhitelist [ granularity ] ; ok {
return f
}
return "YYYY-MM-DD"
}
2025-12-25 20:52:47 +08:00
type usageLogRepository struct {
2025-12-29 10:03:27 +08:00
client * dbent . Client
sql sqlExecutor
2025-12-18 13:50:39 +08:00
}
2025-12-29 10:03:27 +08:00
func NewUsageLogRepository ( client * dbent . Client , sqlDB * sql . DB ) service . UsageLogRepository {
return newUsageLogRepositoryWithSQL ( client , sqlDB )
}
func newUsageLogRepositoryWithSQL ( client * dbent . Client , sqlq sqlExecutor ) * usageLogRepository {
2025-12-29 19:23:49 +08:00
// 使用 scanSingleRow 替代 QueryRowContext, 保证 ent.Tx 作为 sqlExecutor 可用。
2025-12-29 10:03:27 +08:00
return & usageLogRepository { client : client , sql : sqlq }
2025-12-18 13:50:39 +08:00
}
2025-12-24 19:58:33 +08:00
// getPerformanceStats 获取 RPM 和 TPM( 近5分钟平均值, 可选按用户过滤)
2025-12-29 10:03:27 +08:00
func ( r * usageLogRepository ) getPerformanceStats ( ctx context . Context , userID int64 ) ( rpm , tpm int64 , err error ) {
2025-12-24 19:58:33 +08:00
fiveMinutesAgo := time . Now ( ) . Add ( - 5 * time . Minute )
2025-12-29 10:03:27 +08:00
query := `
SELECT
2025-12-24 10:24:02 +08:00
COUNT ( * ) as request_count ,
COALESCE ( SUM ( input_tokens + output_tokens ) , 0 ) as token_count
2025-12-29 10:03:27 +08:00
FROM usage_logs
WHERE created_at >= $ 1 `
args := [ ] any { fiveMinutesAgo }
2025-12-24 10:24:02 +08:00
if userID > 0 {
2025-12-29 10:03:27 +08:00
query += " AND user_id = $2"
args = append ( args , userID )
2025-12-24 10:24:02 +08:00
}
2025-12-29 10:03:27 +08:00
var requestCount int64
var tokenCount int64
2025-12-29 19:23:49 +08:00
if err := scanSingleRow ( ctx , r . sql , query , args , & requestCount , & tokenCount ) ; err != nil {
2025-12-29 10:03:27 +08:00
return 0 , 0 , err
}
return requestCount / 5 , tokenCount / 5 , nil
2025-12-24 10:24:02 +08:00
}
2026-01-03 17:10:32 -08:00
func ( r * usageLogRepository ) Create ( ctx context . Context , log * service . UsageLog ) ( bool , error ) {
2025-12-29 10:03:27 +08:00
if log == nil {
2026-01-03 17:10:32 -08:00
return false , nil
}
// 在事务上下文中,使用 tx 绑定的 ExecQuerier 执行原生 SQL, 保证与其他更新同事务。
// 无事务时回退到默认的 *sql.DB 执行器。
sqlq := r . sql
if tx := dbent . TxFromContext ( ctx ) ; tx != nil {
sqlq = tx . Client ( )
2025-12-26 15:40:24 +08:00
}
2025-12-18 13:50:39 +08:00
2025-12-29 10:03:27 +08:00
createdAt := log . CreatedAt
if createdAt . IsZero ( ) {
createdAt = time . Now ( )
2025-12-18 13:50:39 +08:00
}
2026-01-03 06:36:35 -08:00
requestID := strings . TrimSpace ( log . RequestID )
log . RequestID = requestID
2025-12-29 10:03:27 +08:00
rateMultiplier := log . RateMultiplier
2026-02-28 15:01:20 +08:00
log . SyncRequestTypeAndLegacyFields ( )
requestType := int16 ( log . RequestType )
2025-12-18 13:50:39 +08:00
2025-12-29 10:03:27 +08:00
query := `
INSERT INTO usage_logs (
user_id ,
api_key_id ,
account_id ,
request_id ,
model ,
group_id ,
subscription_id ,
input_tokens ,
output_tokens ,
cache_creation_tokens ,
cache_read_tokens ,
cache_creation_5m_tokens ,
cache_creation_1h_tokens ,
input_cost ,
output_cost ,
cache_creation_cost ,
cache_read_cost ,
total_cost ,
actual_cost ,
rate_multiplier ,
2026-01-14 16:12:08 +08:00
account_rate_multiplier ,
2025-12-29 10:03:27 +08:00
billing_type ,
2026-02-28 15:01:20 +08:00
request_type ,
2025-12-29 10:03:27 +08:00
stream ,
2026-02-28 15:01:20 +08:00
openai_ws_mode ,
2025-12-29 10:03:27 +08:00
duration_ms ,
first_token_ms ,
2026-01-06 16:23:56 +08:00
user_agent ,
2026-01-09 21:59:32 +08:00
ip_address ,
2026-01-05 17:07:29 +08:00
image_count ,
image_size ,
2026-01-31 20:22:22 +08:00
media_type ,
2026-03-08 23:22:28 +08:00
service_tier ,
2026-02-03 22:48:04 +08:00
reasoning_effort ,
2026-02-18 20:16:31 +08:00
cache_ttl_overridden ,
2025-12-29 10:03:27 +08:00
created_at
) VALUES (
$ 1 , $ 2 , $ 3 , $ 4 , $ 5 ,
$ 6 , $ 7 ,
$ 8 , $ 9 , $ 10 , $ 11 ,
$ 12 , $ 13 ,
$ 14 , $ 15 , $ 16 , $ 17 , $ 18 , $ 19 ,
2026-03-08 23:22:28 +08:00
$ 20 , $ 21 , $ 22 , $ 23 , $ 24 , $ 25 , $ 26 , $ 27 , $ 28 , $ 29 , $ 30 , $ 31 , $ 32 , $ 33 , $ 34 , $ 35 , $ 36
2025-12-29 10:03:27 +08:00
)
2026-01-03 06:36:35 -08:00
ON CONFLICT ( request_id , api_key_id ) DO NOTHING
2025-12-29 10:03:27 +08:00
RETURNING id , created_at
`
2025-12-18 13:50:39 +08:00
2025-12-29 10:03:27 +08:00
groupID := nullInt64 ( log . GroupID )
subscriptionID := nullInt64 ( log . SubscriptionID )
duration := nullInt ( log . DurationMs )
firstToken := nullInt ( log . FirstTokenMs )
2026-01-06 16:23:56 +08:00
userAgent := nullString ( log . UserAgent )
2026-01-09 21:59:32 +08:00
ipAddress := nullString ( log . IPAddress )
2026-01-05 17:07:29 +08:00
imageSize := nullString ( log . ImageSize )
2026-01-31 20:22:22 +08:00
mediaType := nullString ( log . MediaType )
2026-03-08 23:22:28 +08:00
serviceTier := nullString ( log . ServiceTier )
2026-02-03 14:36:29 +08:00
reasoningEffort := nullString ( log . ReasoningEffort )
2025-12-29 10:03:27 +08:00
2026-01-03 06:36:35 -08:00
var requestIDArg any
if requestID != "" {
requestIDArg = requestID
}
2025-12-29 19:23:49 +08:00
args := [ ] any {
2025-12-29 10:03:27 +08:00
log . UserID ,
2026-01-04 19:27:53 +08:00
log . APIKeyID ,
2025-12-29 10:03:27 +08:00
log . AccountID ,
2026-01-03 06:36:35 -08:00
requestIDArg ,
2025-12-29 10:03:27 +08:00
log . Model ,
groupID ,
subscriptionID ,
log . InputTokens ,
log . OutputTokens ,
log . CacheCreationTokens ,
log . CacheReadTokens ,
log . CacheCreation5mTokens ,
log . CacheCreation1hTokens ,
log . InputCost ,
log . OutputCost ,
log . CacheCreationCost ,
log . CacheReadCost ,
log . TotalCost ,
log . ActualCost ,
rateMultiplier ,
2026-01-14 16:12:08 +08:00
log . AccountRateMultiplier ,
2025-12-29 10:03:27 +08:00
log . BillingType ,
2026-02-28 15:01:20 +08:00
requestType ,
2025-12-29 10:03:27 +08:00
log . Stream ,
2026-02-28 15:01:20 +08:00
log . OpenAIWSMode ,
2025-12-29 10:03:27 +08:00
duration ,
firstToken ,
2026-01-06 16:23:56 +08:00
userAgent ,
2026-01-09 21:59:32 +08:00
ipAddress ,
2026-01-05 17:07:29 +08:00
log . ImageCount ,
imageSize ,
2026-01-31 20:22:22 +08:00
mediaType ,
2026-03-08 23:22:28 +08:00
serviceTier ,
2026-02-03 14:36:29 +08:00
reasoningEffort ,
2026-02-17 11:22:08 +03:00
log . CacheTTLOverridden ,
2025-12-29 10:03:27 +08:00
createdAt ,
2025-12-29 19:23:49 +08:00
}
2026-01-03 17:10:32 -08:00
if err := scanSingleRow ( ctx , sqlq , query , args , & log . ID , & log . CreatedAt ) ; err != nil {
2026-01-03 06:36:35 -08:00
if errors . Is ( err , sql . ErrNoRows ) && requestID != "" {
selectQuery := "SELECT id, created_at FROM usage_logs WHERE request_id = $1 AND api_key_id = $2"
2026-01-04 19:27:53 +08:00
if err := scanSingleRow ( ctx , sqlq , selectQuery , [ ] any { requestID , log . APIKeyID } , & log . ID , & log . CreatedAt ) ; err != nil {
2026-01-03 17:10:32 -08:00
return false , err
2026-01-03 06:36:35 -08:00
}
2026-01-03 17:10:32 -08:00
log . RateMultiplier = rateMultiplier
return false , nil
2026-01-03 06:36:35 -08:00
} else {
2026-01-03 17:10:32 -08:00
return false , err
2026-01-03 06:36:35 -08:00
}
2025-12-29 10:03:27 +08:00
}
log . RateMultiplier = rateMultiplier
2026-01-03 17:10:32 -08:00
return true , nil
2025-12-18 13:50:39 +08:00
}
2025-12-30 17:13:32 +08:00
func ( r * usageLogRepository ) GetByID ( ctx context . Context , id int64 ) ( log * service . UsageLog , err error ) {
2025-12-29 10:03:27 +08:00
query := "SELECT " + usageLogSelectColumns + " FROM usage_logs WHERE id = $1"
2025-12-29 19:23:49 +08:00
rows , err := r . sql . QueryContext ( ctx , query , id )
2025-12-29 10:03:27 +08:00
if err != nil {
2025-12-29 19:23:49 +08:00
return nil , err
}
2025-12-30 17:13:32 +08:00
defer func ( ) {
// 保持主错误优先;仅在无错误时回传 Close 失败。
// 同时清空返回值,避免误用不完整结果。
if closeErr := rows . Close ( ) ; closeErr != nil && err == nil {
err = closeErr
log = nil
}
} ( )
2025-12-29 19:23:49 +08:00
if ! rows . Next ( ) {
2025-12-30 17:13:32 +08:00
if err = rows . Err ( ) ; err != nil {
2025-12-29 19:23:49 +08:00
return nil , err
2025-12-29 10:03:27 +08:00
}
2025-12-29 19:23:49 +08:00
return nil , service . ErrUsageLogNotFound
}
2025-12-30 17:13:32 +08:00
log , err = scanUsageLog ( rows )
2025-12-29 19:23:49 +08:00
if err != nil {
return nil , err
}
2025-12-30 17:13:32 +08:00
if err = rows . Err ( ) ; err != nil {
2025-12-29 10:03:27 +08:00
return nil , err
2025-12-18 13:50:39 +08:00
}
2025-12-29 10:03:27 +08:00
return log , nil
}
2025-12-18 13:50:39 +08:00
2025-12-29 10:03:27 +08:00
func ( r * usageLogRepository ) ListByUser ( ctx context . Context , userID int64 , params pagination . PaginationParams ) ( [ ] service . UsageLog , * pagination . PaginationResult , error ) {
return r . listUsageLogsWithPagination ( ctx , "WHERE user_id = $1" , [ ] any { userID } , params )
}
2025-12-18 13:50:39 +08:00
2026-01-04 19:27:53 +08:00
func ( r * usageLogRepository ) ListByAPIKey ( ctx context . Context , apiKeyID int64 , params pagination . PaginationParams ) ( [ ] service . UsageLog , * pagination . PaginationResult , error ) {
2025-12-29 10:03:27 +08:00
return r . listUsageLogsWithPagination ( ctx , "WHERE api_key_id = $1" , [ ] any { apiKeyID } , params )
2025-12-18 13:50:39 +08:00
}
// UserStats 用户使用统计
type UserStats struct {
TotalRequests int64 ` json:"total_requests" `
TotalTokens int64 ` json:"total_tokens" `
TotalCost float64 ` json:"total_cost" `
InputTokens int64 ` json:"input_tokens" `
OutputTokens int64 ` json:"output_tokens" `
CacheReadTokens int64 ` json:"cache_read_tokens" `
}
2025-12-25 20:52:47 +08:00
func ( r * usageLogRepository ) GetUserStats ( ctx context . Context , userID int64 , startTime , endTime time . Time ) ( * UserStats , error ) {
2025-12-29 10:03:27 +08:00
query := `
SELECT
2025-12-18 13:50:39 +08:00
COUNT ( * ) as total_requests ,
COALESCE ( SUM ( input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens ) , 0 ) as total_tokens ,
COALESCE ( SUM ( actual_cost ) , 0 ) as total_cost ,
COALESCE ( SUM ( input_tokens ) , 0 ) as input_tokens ,
COALESCE ( SUM ( output_tokens ) , 0 ) as output_tokens ,
COALESCE ( SUM ( cache_read_tokens ) , 0 ) as cache_read_tokens
2025-12-29 10:03:27 +08:00
FROM usage_logs
WHERE user_id = $ 1 AND created_at >= $ 2 AND created_at < $ 3
`
stats := & UserStats { }
2025-12-29 19:23:49 +08:00
if err := scanSingleRow (
ctx ,
r . sql ,
query ,
[ ] any { userID , startTime , endTime } ,
& stats . TotalRequests ,
& stats . TotalTokens ,
& stats . TotalCost ,
& stats . InputTokens ,
& stats . OutputTokens ,
& stats . CacheReadTokens ,
) ; err != nil {
2025-12-29 10:03:27 +08:00
return nil , err
}
return stats , nil
2025-12-18 13:50:39 +08:00
}
// DashboardStats 仪表盘统计
2025-12-24 08:41:31 +08:00
type DashboardStats = usagestats . DashboardStats
2025-12-18 13:50:39 +08:00
2025-12-25 20:52:47 +08:00
func ( r * usageLogRepository ) GetDashboardStats ( ctx context . Context ) ( * DashboardStats , error ) {
2026-01-11 18:20:15 +08:00
stats := & DashboardStats { }
2026-01-15 09:12:16 +08:00
now := timezone . Now ( )
todayStart := timezone . Today ( )
2025-12-18 13:50:39 +08:00
2026-01-15 09:12:16 +08:00
if err := r . fillDashboardEntityStats ( ctx , stats , todayStart , now ) ; err != nil {
2026-01-11 18:20:15 +08:00
return nil , err
}
2026-01-15 09:12:16 +08:00
if err := r . fillDashboardUsageStatsAggregated ( ctx , stats , todayStart , now ) ; err != nil {
2026-01-11 18:20:15 +08:00
return nil , err
}
rpm , tpm , err := r . getPerformanceStats ( ctx , 0 )
if err != nil {
return nil , err
}
stats . Rpm = rpm
stats . Tpm = tpm
return stats , nil
}
func ( r * usageLogRepository ) GetDashboardStatsWithRange ( ctx context . Context , start , end time . Time ) ( * DashboardStats , error ) {
startUTC := start . UTC ( )
endUTC := end . UTC ( )
if ! endUTC . After ( startUTC ) {
return nil , errors . New ( "统计时间范围无效" )
}
stats := & DashboardStats { }
2026-01-15 09:12:16 +08:00
now := timezone . Now ( )
todayStart := timezone . Today ( )
2026-01-11 18:20:15 +08:00
2026-01-15 09:12:16 +08:00
if err := r . fillDashboardEntityStats ( ctx , stats , todayStart , now ) ; err != nil {
2026-01-11 18:20:15 +08:00
return nil , err
}
2026-01-15 09:12:16 +08:00
if err := r . fillDashboardUsageStatsFromUsageLogs ( ctx , stats , startUTC , endUTC , todayStart , now ) ; err != nil {
2026-01-11 18:20:15 +08:00
return nil , err
}
rpm , tpm , err := r . getPerformanceStats ( ctx , 0 )
if err != nil {
return nil , err
}
stats . Rpm = rpm
stats . Tpm = tpm
return stats , nil
}
func ( r * usageLogRepository ) fillDashboardEntityStats ( ctx context . Context , stats * DashboardStats , todayUTC , now time . Time ) error {
2025-12-29 10:03:27 +08:00
userStatsQuery := `
2025-12-27 16:03:37 +08:00
SELECT
COUNT ( * ) as total_users ,
2026-01-11 16:01:35 +08:00
COUNT ( CASE WHEN created_at >= $ 1 THEN 1 END ) as today_new_users
2025-12-27 16:03:37 +08:00
FROM users
2025-12-29 10:03:27 +08:00
WHERE deleted_at IS NULL
`
2025-12-29 19:23:49 +08:00
if err := scanSingleRow (
ctx ,
r . sql ,
userStatsQuery ,
2026-01-11 16:01:35 +08:00
[ ] any { todayUTC } ,
2025-12-29 19:23:49 +08:00
& stats . TotalUsers ,
& stats . TodayNewUsers ,
) ; err != nil {
2026-01-11 18:20:15 +08:00
return err
2025-12-27 16:03:37 +08:00
}
2025-12-29 10:03:27 +08:00
apiKeyStatsQuery := `
2025-12-27 16:03:37 +08:00
SELECT
COUNT ( * ) as total_api_keys ,
2025-12-29 10:03:27 +08:00
COUNT ( CASE WHEN status = $ 1 THEN 1 END ) as active_api_keys
2025-12-27 16:03:37 +08:00
FROM api_keys
2025-12-29 10:03:27 +08:00
WHERE deleted_at IS NULL
`
2025-12-29 19:23:49 +08:00
if err := scanSingleRow (
ctx ,
r . sql ,
apiKeyStatsQuery ,
[ ] any { service . StatusActive } ,
2026-01-04 19:27:53 +08:00
& stats . TotalAPIKeys ,
& stats . ActiveAPIKeys ,
2025-12-29 19:23:49 +08:00
) ; err != nil {
2026-01-11 18:20:15 +08:00
return err
2025-12-27 16:03:37 +08:00
}
2025-12-29 10:03:27 +08:00
accountStatsQuery := `
2025-12-27 16:03:37 +08:00
SELECT
COUNT ( * ) as total_accounts ,
2025-12-29 10:03:27 +08:00
COUNT ( CASE WHEN status = $ 1 AND schedulable = true THEN 1 END ) as normal_accounts ,
COUNT ( CASE WHEN status = $ 2 THEN 1 END ) as error_accounts ,
COUNT ( CASE WHEN rate_limited_at IS NOT NULL AND rate_limit_reset_at > $ 3 THEN 1 END ) as ratelimit_accounts ,
COUNT ( CASE WHEN overload_until IS NOT NULL AND overload_until > $ 4 THEN 1 END ) as overload_accounts
2025-12-27 16:03:37 +08:00
FROM accounts
2025-12-29 10:03:27 +08:00
WHERE deleted_at IS NULL
`
2025-12-29 19:23:49 +08:00
if err := scanSingleRow (
ctx ,
r . sql ,
accountStatsQuery ,
[ ] any { service . StatusActive , service . StatusError , now , now } ,
& stats . TotalAccounts ,
& stats . NormalAccounts ,
& stats . ErrorAccounts ,
& stats . RateLimitAccounts ,
& stats . OverloadAccounts ,
) ; err != nil {
2026-01-11 18:20:15 +08:00
return err
2025-12-27 16:03:37 +08:00
}
2025-12-18 13:50:39 +08:00
2026-01-11 18:20:15 +08:00
return nil
}
func ( r * usageLogRepository ) fillDashboardUsageStatsAggregated ( ctx context . Context , stats * DashboardStats , todayUTC , now time . Time ) error {
2025-12-29 10:03:27 +08:00
totalStatsQuery := `
SELECT
2026-01-11 16:01:35 +08:00
COALESCE ( SUM ( total_requests ) , 0 ) as total_requests ,
2025-12-18 13:50:39 +08:00
COALESCE ( SUM ( input_tokens ) , 0 ) as total_input_tokens ,
COALESCE ( SUM ( output_tokens ) , 0 ) as total_output_tokens ,
COALESCE ( SUM ( cache_creation_tokens ) , 0 ) as total_cache_creation_tokens ,
COALESCE ( SUM ( cache_read_tokens ) , 0 ) as total_cache_read_tokens ,
COALESCE ( SUM ( total_cost ) , 0 ) as total_cost ,
COALESCE ( SUM ( actual_cost ) , 0 ) as total_actual_cost ,
2026-01-11 16:01:35 +08:00
COALESCE ( SUM ( total_duration_ms ) , 0 ) as total_duration_ms
FROM usage_dashboard_daily
2025-12-29 10:03:27 +08:00
`
2026-01-11 16:01:35 +08:00
var totalDurationMs int64
2025-12-29 19:23:49 +08:00
if err := scanSingleRow (
ctx ,
r . sql ,
totalStatsQuery ,
nil ,
& stats . TotalRequests ,
& stats . TotalInputTokens ,
& stats . TotalOutputTokens ,
& stats . TotalCacheCreationTokens ,
& stats . TotalCacheReadTokens ,
& stats . TotalCost ,
& stats . TotalActualCost ,
2026-01-11 16:01:35 +08:00
& totalDurationMs ,
2025-12-29 19:23:49 +08:00
) ; err != nil {
2026-01-11 18:20:15 +08:00
return err
2025-12-29 10:03:27 +08:00
}
2025-12-18 13:50:39 +08:00
stats . TotalTokens = stats . TotalInputTokens + stats . TotalOutputTokens + stats . TotalCacheCreationTokens + stats . TotalCacheReadTokens
2026-01-11 16:01:35 +08:00
if stats . TotalRequests > 0 {
stats . AverageDurationMs = float64 ( totalDurationMs ) / float64 ( stats . TotalRequests )
}
2025-12-18 13:50:39 +08:00
2025-12-29 10:03:27 +08:00
todayStatsQuery := `
SELECT
2026-01-11 16:01:35 +08:00
total_requests as today_requests ,
input_tokens as today_input_tokens ,
output_tokens as today_output_tokens ,
cache_creation_tokens as today_cache_creation_tokens ,
cache_read_tokens as today_cache_read_tokens ,
total_cost as today_cost ,
actual_cost as today_actual_cost ,
active_users as active_users
FROM usage_dashboard_daily
WHERE bucket_date = $ 1 : : date
2025-12-29 10:03:27 +08:00
`
2025-12-29 19:23:49 +08:00
if err := scanSingleRow (
ctx ,
r . sql ,
todayStatsQuery ,
2026-01-11 16:01:35 +08:00
[ ] any { todayUTC } ,
2025-12-29 19:23:49 +08:00
& stats . TodayRequests ,
& stats . TodayInputTokens ,
& stats . TodayOutputTokens ,
& stats . TodayCacheCreationTokens ,
& stats . TodayCacheReadTokens ,
& stats . TodayCost ,
& stats . TodayActualCost ,
2026-01-11 16:01:35 +08:00
& stats . ActiveUsers ,
2025-12-29 19:23:49 +08:00
) ; err != nil {
2026-01-11 16:01:35 +08:00
if err != sql . ErrNoRows {
2026-01-11 18:20:15 +08:00
return err
2026-01-11 16:01:35 +08:00
}
2025-12-29 10:03:27 +08:00
}
2025-12-18 13:50:39 +08:00
stats . TodayTokens = stats . TodayInputTokens + stats . TodayOutputTokens + stats . TodayCacheCreationTokens + stats . TodayCacheReadTokens
2026-01-11 16:01:35 +08:00
hourlyActiveQuery := `
SELECT active_users
FROM usage_dashboard_hourly
WHERE bucket_start = $ 1
`
2026-01-15 11:22:13 +08:00
hourStart := now . In ( timezone . Location ( ) ) . Truncate ( time . Hour )
2026-01-11 16:01:35 +08:00
if err := scanSingleRow ( ctx , r . sql , hourlyActiveQuery , [ ] any { hourStart } , & stats . HourlyActiveUsers ) ; err != nil {
if err != sql . ErrNoRows {
2026-01-11 18:20:15 +08:00
return err
2026-01-11 16:01:35 +08:00
}
}
2026-01-11 18:20:15 +08:00
return nil
}
func ( r * usageLogRepository ) fillDashboardUsageStatsFromUsageLogs ( ctx context . Context , stats * DashboardStats , startUTC , endUTC , todayUTC , now time . Time ) error {
2026-02-28 15:01:20 +08:00
todayEnd := todayUTC . Add ( 24 * time . Hour )
combinedStatsQuery := `
WITH scoped AS (
SELECT
created_at ,
input_tokens ,
output_tokens ,
cache_creation_tokens ,
cache_read_tokens ,
total_cost ,
actual_cost ,
COALESCE ( duration_ms , 0 ) AS duration_ms
FROM usage_logs
WHERE created_at >= LEAST ( $ 1 : : timestamptz , $ 3 : : timestamptz )
AND created_at < GREATEST ( $ 2 : : timestamptz , $ 4 : : timestamptz )
)
2026-01-11 18:20:15 +08:00
SELECT
2026-02-28 15:01:20 +08:00
COUNT ( * ) FILTER ( WHERE created_at >= $ 1 : : timestamptz AND created_at < $ 2 : : timestamptz ) AS total_requests ,
COALESCE ( SUM ( input_tokens ) FILTER ( WHERE created_at >= $ 1 : : timestamptz AND created_at < $ 2 : : timestamptz ) , 0 ) AS total_input_tokens ,
COALESCE ( SUM ( output_tokens ) FILTER ( WHERE created_at >= $ 1 : : timestamptz AND created_at < $ 2 : : timestamptz ) , 0 ) AS total_output_tokens ,
COALESCE ( SUM ( cache_creation_tokens ) FILTER ( WHERE created_at >= $ 1 : : timestamptz AND created_at < $ 2 : : timestamptz ) , 0 ) AS total_cache_creation_tokens ,
COALESCE ( SUM ( cache_read_tokens ) FILTER ( WHERE created_at >= $ 1 : : timestamptz AND created_at < $ 2 : : timestamptz ) , 0 ) AS total_cache_read_tokens ,
COALESCE ( SUM ( total_cost ) FILTER ( WHERE created_at >= $ 1 : : timestamptz AND created_at < $ 2 : : timestamptz ) , 0 ) AS total_cost ,
COALESCE ( SUM ( actual_cost ) FILTER ( WHERE created_at >= $ 1 : : timestamptz AND created_at < $ 2 : : timestamptz ) , 0 ) AS total_actual_cost ,
COALESCE ( SUM ( duration_ms ) FILTER ( WHERE created_at >= $ 1 : : timestamptz AND created_at < $ 2 : : timestamptz ) , 0 ) AS total_duration_ms ,
COUNT ( * ) FILTER ( WHERE created_at >= $ 3 : : timestamptz AND created_at < $ 4 : : timestamptz ) AS today_requests ,
COALESCE ( SUM ( input_tokens ) FILTER ( WHERE created_at >= $ 3 : : timestamptz AND created_at < $ 4 : : timestamptz ) , 0 ) AS today_input_tokens ,
COALESCE ( SUM ( output_tokens ) FILTER ( WHERE created_at >= $ 3 : : timestamptz AND created_at < $ 4 : : timestamptz ) , 0 ) AS today_output_tokens ,
COALESCE ( SUM ( cache_creation_tokens ) FILTER ( WHERE created_at >= $ 3 : : timestamptz AND created_at < $ 4 : : timestamptz ) , 0 ) AS today_cache_creation_tokens ,
COALESCE ( SUM ( cache_read_tokens ) FILTER ( WHERE created_at >= $ 3 : : timestamptz AND created_at < $ 4 : : timestamptz ) , 0 ) AS today_cache_read_tokens ,
COALESCE ( SUM ( total_cost ) FILTER ( WHERE created_at >= $ 3 : : timestamptz AND created_at < $ 4 : : timestamptz ) , 0 ) AS today_cost ,
COALESCE ( SUM ( actual_cost ) FILTER ( WHERE created_at >= $ 3 : : timestamptz AND created_at < $ 4 : : timestamptz ) , 0 ) AS today_actual_cost
FROM scoped
2026-01-11 18:20:15 +08:00
`
var totalDurationMs int64
if err := scanSingleRow (
ctx ,
r . sql ,
2026-02-28 15:01:20 +08:00
combinedStatsQuery ,
[ ] any { startUTC , endUTC , todayUTC , todayEnd } ,
2026-01-11 18:20:15 +08:00
& stats . TotalRequests ,
& stats . TotalInputTokens ,
& stats . TotalOutputTokens ,
& stats . TotalCacheCreationTokens ,
& stats . TotalCacheReadTokens ,
& stats . TotalCost ,
& stats . TotalActualCost ,
& totalDurationMs ,
& stats . TodayRequests ,
& stats . TodayInputTokens ,
& stats . TodayOutputTokens ,
& stats . TodayCacheCreationTokens ,
& stats . TodayCacheReadTokens ,
& stats . TodayCost ,
& stats . TodayActualCost ,
) ; err != nil {
return err
}
2026-02-28 15:01:20 +08:00
stats . TotalTokens = stats . TotalInputTokens + stats . TotalOutputTokens + stats . TotalCacheCreationTokens + stats . TotalCacheReadTokens
if stats . TotalRequests > 0 {
stats . AverageDurationMs = float64 ( totalDurationMs ) / float64 ( stats . TotalRequests )
2026-01-11 18:20:15 +08:00
}
2026-02-28 15:01:20 +08:00
stats . TodayTokens = stats . TodayInputTokens + stats . TodayOutputTokens + stats . TodayCacheCreationTokens + stats . TodayCacheReadTokens
2026-01-11 18:20:15 +08:00
hourStart := now . UTC ( ) . Truncate ( time . Hour )
hourEnd := hourStart . Add ( time . Hour )
2026-02-28 15:01:20 +08:00
activeUsersQuery := `
WITH scoped AS (
SELECT user_id , created_at
FROM usage_logs
WHERE created_at >= LEAST ( $ 1 : : timestamptz , $ 3 : : timestamptz )
AND created_at < GREATEST ( $ 2 : : timestamptz , $ 4 : : timestamptz )
)
SELECT
COUNT ( DISTINCT CASE WHEN created_at >= $ 1 : : timestamptz AND created_at < $ 2 : : timestamptz THEN user_id END ) AS active_users ,
COUNT ( DISTINCT CASE WHEN created_at >= $ 3 : : timestamptz AND created_at < $ 4 : : timestamptz THEN user_id END ) AS hourly_active_users
FROM scoped
2026-01-11 18:20:15 +08:00
`
2026-02-28 15:01:20 +08:00
if err := scanSingleRow ( ctx , r . sql , activeUsersQuery , [ ] any { todayUTC , todayEnd , hourStart , hourEnd } , & stats . ActiveUsers , & stats . HourlyActiveUsers ) ; err != nil {
2026-01-11 18:20:15 +08:00
return err
2025-12-29 10:03:27 +08:00
}
2025-12-24 10:24:02 +08:00
2026-01-11 18:20:15 +08:00
return nil
2025-12-18 13:50:39 +08:00
}
2025-12-26 15:40:24 +08:00
func ( r * usageLogRepository ) ListByAccount ( ctx context . Context , accountID int64 , params pagination . PaginationParams ) ( [ ] service . UsageLog , * pagination . PaginationResult , error ) {
2025-12-29 10:03:27 +08:00
return r . listUsageLogsWithPagination ( ctx , "WHERE account_id = $1" , [ ] any { accountID } , params )
2025-12-18 13:50:39 +08:00
}
2025-12-26 15:40:24 +08:00
func ( r * usageLogRepository ) ListByUserAndTimeRange ( ctx context . Context , userID int64 , startTime , endTime time . Time ) ( [ ] service . UsageLog , * pagination . PaginationResult , error ) {
2026-02-07 19:46:42 +08:00
query := "SELECT " + usageLogSelectColumns + " FROM usage_logs WHERE user_id = $1 AND created_at >= $2 AND created_at < $3 ORDER BY id DESC LIMIT 10000"
2025-12-29 10:03:27 +08:00
logs , err := r . queryUsageLogs ( ctx , query , userID , startTime , endTime )
return logs , nil , err
2025-12-18 13:50:39 +08:00
}
2025-12-27 16:03:37 +08:00
// GetUserStatsAggregated returns aggregated usage statistics for a user using database-level aggregation
func ( r * usageLogRepository ) GetUserStatsAggregated ( ctx context . Context , userID int64 , startTime , endTime time . Time ) ( * usagestats . UsageStats , error ) {
2025-12-29 10:03:27 +08:00
query := `
SELECT
2025-12-27 16:03:37 +08:00
COUNT ( * ) as total_requests ,
COALESCE ( SUM ( input_tokens ) , 0 ) as total_input_tokens ,
COALESCE ( SUM ( output_tokens ) , 0 ) as total_output_tokens ,
COALESCE ( SUM ( cache_creation_tokens + cache_read_tokens ) , 0 ) as total_cache_tokens ,
COALESCE ( SUM ( total_cost ) , 0 ) as total_cost ,
COALESCE ( SUM ( actual_cost ) , 0 ) as total_actual_cost ,
COALESCE ( AVG ( COALESCE ( duration_ms , 0 ) ) , 0 ) as avg_duration_ms
2025-12-29 10:03:27 +08:00
FROM usage_logs
WHERE user_id = $ 1 AND created_at >= $ 2 AND created_at < $ 3
`
2025-12-27 16:03:37 +08:00
2025-12-29 10:03:27 +08:00
var stats usagestats . UsageStats
2025-12-29 19:23:49 +08:00
if err := scanSingleRow (
ctx ,
r . sql ,
query ,
[ ] any { userID , startTime , endTime } ,
& stats . TotalRequests ,
& stats . TotalInputTokens ,
& stats . TotalOutputTokens ,
& stats . TotalCacheTokens ,
& stats . TotalCost ,
& stats . TotalActualCost ,
& stats . AverageDurationMs ,
) ; err != nil {
2025-12-27 16:03:37 +08:00
return nil , err
}
2025-12-29 10:03:27 +08:00
stats . TotalTokens = stats . TotalInputTokens + stats . TotalOutputTokens + stats . TotalCacheTokens
return & stats , nil
2025-12-27 16:03:37 +08:00
}
2026-01-04 19:27:53 +08:00
// GetAPIKeyStatsAggregated returns aggregated usage statistics for an API key using database-level aggregation
func ( r * usageLogRepository ) GetAPIKeyStatsAggregated ( ctx context . Context , apiKeyID int64 , startTime , endTime time . Time ) ( * usagestats . UsageStats , error ) {
2025-12-29 10:03:27 +08:00
query := `
SELECT
2025-12-27 16:03:37 +08:00
COUNT ( * ) as total_requests ,
COALESCE ( SUM ( input_tokens ) , 0 ) as total_input_tokens ,
COALESCE ( SUM ( output_tokens ) , 0 ) as total_output_tokens ,
COALESCE ( SUM ( cache_creation_tokens + cache_read_tokens ) , 0 ) as total_cache_tokens ,
COALESCE ( SUM ( total_cost ) , 0 ) as total_cost ,
COALESCE ( SUM ( actual_cost ) , 0 ) as total_actual_cost ,
COALESCE ( AVG ( COALESCE ( duration_ms , 0 ) ) , 0 ) as avg_duration_ms
2025-12-29 10:03:27 +08:00
FROM usage_logs
WHERE api_key_id = $ 1 AND created_at >= $ 2 AND created_at < $ 3
`
2025-12-27 16:03:37 +08:00
2025-12-29 10:03:27 +08:00
var stats usagestats . UsageStats
2025-12-29 19:23:49 +08:00
if err := scanSingleRow (
ctx ,
r . sql ,
query ,
[ ] any { apiKeyID , startTime , endTime } ,
& stats . TotalRequests ,
& stats . TotalInputTokens ,
& stats . TotalOutputTokens ,
& stats . TotalCacheTokens ,
& stats . TotalCost ,
& stats . TotalActualCost ,
& stats . AverageDurationMs ,
) ; err != nil {
2025-12-27 16:03:37 +08:00
return nil , err
}
2025-12-29 10:03:27 +08:00
stats . TotalTokens = stats . TotalInputTokens + stats . TotalOutputTokens + stats . TotalCacheTokens
return & stats , nil
2025-12-27 16:03:37 +08:00
}
2025-12-31 08:50:12 +08:00
// GetAccountStatsAggregated 使用 SQL 聚合统计账号使用数据
//
// 性能优化说明:
// 原实现先查询所有日志记录,再在应用层循环计算统计值:
// 1. 需要传输大量数据到应用层
// 2. 应用层循环计算增加 CPU 和内存开销
//
// 新实现使用 SQL 聚合函数:
// 1. 在数据库层完成 COUNT/SUM/AVG 计算
// 2. 只返回单行聚合结果,大幅减少数据传输量
// 3. 利用数据库索引优化聚合查询性能
func ( r * usageLogRepository ) GetAccountStatsAggregated ( ctx context . Context , accountID int64 , startTime , endTime time . Time ) ( * usagestats . UsageStats , error ) {
query := `
SELECT
COUNT ( * ) as total_requests ,
COALESCE ( SUM ( input_tokens ) , 0 ) as total_input_tokens ,
COALESCE ( SUM ( output_tokens ) , 0 ) as total_output_tokens ,
COALESCE ( SUM ( cache_creation_tokens + cache_read_tokens ) , 0 ) as total_cache_tokens ,
COALESCE ( SUM ( total_cost ) , 0 ) as total_cost ,
COALESCE ( SUM ( actual_cost ) , 0 ) as total_actual_cost ,
COALESCE ( AVG ( COALESCE ( duration_ms , 0 ) ) , 0 ) as avg_duration_ms
FROM usage_logs
WHERE account_id = $ 1 AND created_at >= $ 2 AND created_at < $ 3
`
var stats usagestats . UsageStats
if err := scanSingleRow (
ctx ,
r . sql ,
query ,
[ ] any { accountID , startTime , endTime } ,
& stats . TotalRequests ,
& stats . TotalInputTokens ,
& stats . TotalOutputTokens ,
& stats . TotalCacheTokens ,
& stats . TotalCost ,
& stats . TotalActualCost ,
& stats . AverageDurationMs ,
) ; err != nil {
return nil , err
}
stats . TotalTokens = stats . TotalInputTokens + stats . TotalOutputTokens + stats . TotalCacheTokens
return & stats , nil
}
// GetModelStatsAggregated 使用 SQL 聚合统计模型使用数据
// 性能优化:数据库层聚合计算,避免应用层循环统计
func ( r * usageLogRepository ) GetModelStatsAggregated ( ctx context . Context , modelName string , startTime , endTime time . Time ) ( * usagestats . UsageStats , error ) {
query := `
SELECT
COUNT ( * ) as total_requests ,
COALESCE ( SUM ( input_tokens ) , 0 ) as total_input_tokens ,
COALESCE ( SUM ( output_tokens ) , 0 ) as total_output_tokens ,
COALESCE ( SUM ( cache_creation_tokens + cache_read_tokens ) , 0 ) as total_cache_tokens ,
COALESCE ( SUM ( total_cost ) , 0 ) as total_cost ,
COALESCE ( SUM ( actual_cost ) , 0 ) as total_actual_cost ,
COALESCE ( AVG ( COALESCE ( duration_ms , 0 ) ) , 0 ) as avg_duration_ms
FROM usage_logs
WHERE model = $ 1 AND created_at >= $ 2 AND created_at < $ 3
`
var stats usagestats . UsageStats
if err := scanSingleRow (
ctx ,
r . sql ,
query ,
[ ] any { modelName , startTime , endTime } ,
& stats . TotalRequests ,
& stats . TotalInputTokens ,
& stats . TotalOutputTokens ,
& stats . TotalCacheTokens ,
& stats . TotalCost ,
& stats . TotalActualCost ,
& stats . AverageDurationMs ,
) ; err != nil {
return nil , err
}
stats . TotalTokens = stats . TotalInputTokens + stats . TotalOutputTokens + stats . TotalCacheTokens
return & stats , nil
}
// GetDailyStatsAggregated 使用 SQL 聚合统计用户的每日使用数据
// 性能优化:使用 GROUP BY 在数据库层按日期分组聚合,避免应用层循环分组统计
func ( r * usageLogRepository ) GetDailyStatsAggregated ( ctx context . Context , userID int64 , startTime , endTime time . Time ) ( result [ ] map [ string ] any , err error ) {
2025-12-31 10:17:38 +08:00
tzName := resolveUsageStatsTimezone ( )
2025-12-31 08:50:12 +08:00
query := `
SELECT
2025-12-31 10:17:38 +08:00
-- 使用应用时区分组 , 避免数据库会话时区导致日边界偏移 。
TO_CHAR ( created_at AT TIME ZONE $ 4 , ' YYYY - MM - DD ' ) as date ,
2025-12-31 08:50:12 +08:00
COUNT ( * ) as total_requests ,
COALESCE ( SUM ( input_tokens ) , 0 ) as total_input_tokens ,
COALESCE ( SUM ( output_tokens ) , 0 ) as total_output_tokens ,
COALESCE ( SUM ( cache_creation_tokens + cache_read_tokens ) , 0 ) as total_cache_tokens ,
COALESCE ( SUM ( total_cost ) , 0 ) as total_cost ,
COALESCE ( SUM ( actual_cost ) , 0 ) as total_actual_cost ,
COALESCE ( AVG ( COALESCE ( duration_ms , 0 ) ) , 0 ) as avg_duration_ms
FROM usage_logs
WHERE user_id = $ 1 AND created_at >= $ 2 AND created_at < $ 3
GROUP BY 1
ORDER BY 1
`
2025-12-31 10:17:38 +08:00
rows , err := r . sql . QueryContext ( ctx , query , userID , startTime , endTime , tzName )
2025-12-31 08:50:12 +08:00
if err != nil {
return nil , err
}
defer func ( ) {
if closeErr := rows . Close ( ) ; closeErr != nil && err == nil {
err = closeErr
result = nil
}
} ( )
result = make ( [ ] map [ string ] any , 0 )
for rows . Next ( ) {
var (
date string
totalRequests int64
totalInputTokens int64
totalOutputTokens int64
totalCacheTokens int64
totalCost float64
totalActualCost float64
avgDurationMs float64
)
if err = rows . Scan (
& date ,
& totalRequests ,
& totalInputTokens ,
& totalOutputTokens ,
& totalCacheTokens ,
& totalCost ,
& totalActualCost ,
& avgDurationMs ,
) ; err != nil {
return nil , err
}
result = append ( result , map [ string ] any {
"date" : date ,
"total_requests" : totalRequests ,
"total_input_tokens" : totalInputTokens ,
"total_output_tokens" : totalOutputTokens ,
"total_cache_tokens" : totalCacheTokens ,
"total_tokens" : totalInputTokens + totalOutputTokens + totalCacheTokens ,
"total_cost" : totalCost ,
"total_actual_cost" : totalActualCost ,
"average_duration_ms" : avgDurationMs ,
} )
}
if err = rows . Err ( ) ; err != nil {
return nil , err
}
return result , nil
}
2025-12-31 10:17:38 +08:00
// resolveUsageStatsTimezone 获取用于 SQL 分组的时区名称。
// 优先使用应用初始化的时区,其次尝试读取 TZ 环境变量,最后回落为 UTC。
func resolveUsageStatsTimezone ( ) string {
tzName := timezone . Name ( )
if tzName != "" && tzName != "Local" {
return tzName
}
if envTZ := strings . TrimSpace ( os . Getenv ( "TZ" ) ) ; envTZ != "" {
return envTZ
}
return "UTC"
}
2026-01-04 19:27:53 +08:00
func ( r * usageLogRepository ) ListByAPIKeyAndTimeRange ( ctx context . Context , apiKeyID int64 , startTime , endTime time . Time ) ( [ ] service . UsageLog , * pagination . PaginationResult , error ) {
2026-02-07 19:46:42 +08:00
query := "SELECT " + usageLogSelectColumns + " FROM usage_logs WHERE api_key_id = $1 AND created_at >= $2 AND created_at < $3 ORDER BY id DESC LIMIT 10000"
2025-12-29 10:03:27 +08:00
logs , err := r . queryUsageLogs ( ctx , query , apiKeyID , startTime , endTime )
return logs , nil , err
2025-12-18 13:50:39 +08:00
}
2025-12-26 15:40:24 +08:00
func ( r * usageLogRepository ) ListByAccountAndTimeRange ( ctx context . Context , accountID int64 , startTime , endTime time . Time ) ( [ ] service . UsageLog , * pagination . PaginationResult , error ) {
2026-02-07 19:46:42 +08:00
query := "SELECT " + usageLogSelectColumns + " FROM usage_logs WHERE account_id = $1 AND created_at >= $2 AND created_at < $3 ORDER BY id DESC LIMIT 10000"
2025-12-29 10:03:27 +08:00
logs , err := r . queryUsageLogs ( ctx , query , accountID , startTime , endTime )
return logs , nil , err
2025-12-18 13:50:39 +08:00
}
2025-12-26 15:40:24 +08:00
func ( r * usageLogRepository ) ListByModelAndTimeRange ( ctx context . Context , modelName string , startTime , endTime time . Time ) ( [ ] service . UsageLog , * pagination . PaginationResult , error ) {
2026-02-07 19:46:42 +08:00
query := "SELECT " + usageLogSelectColumns + " FROM usage_logs WHERE model = $1 AND created_at >= $2 AND created_at < $3 ORDER BY id DESC LIMIT 10000"
2025-12-29 10:03:27 +08:00
logs , err := r . queryUsageLogs ( ctx , query , modelName , startTime , endTime )
return logs , nil , err
2025-12-18 13:50:39 +08:00
}
2025-12-25 20:52:47 +08:00
func ( r * usageLogRepository ) Delete ( ctx context . Context , id int64 ) error {
2025-12-29 10:03:27 +08:00
_ , err := r . sql . ExecContext ( ctx , "DELETE FROM usage_logs WHERE id = $1" , id )
return err
2025-12-18 13:50:39 +08:00
}
// GetAccountTodayStats 获取账号今日统计
2025-12-25 20:52:47 +08:00
func ( r * usageLogRepository ) GetAccountTodayStats ( ctx context . Context , accountID int64 ) ( * usagestats . AccountStats , error ) {
2025-12-18 13:50:39 +08:00
today := timezone . Today ( )
2025-12-29 10:03:27 +08:00
query := `
SELECT
2025-12-18 13:50:39 +08:00
COUNT ( * ) as requests ,
COALESCE ( SUM ( input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens ) , 0 ) as tokens ,
2026-01-14 16:12:08 +08:00
COALESCE ( SUM ( total_cost * COALESCE ( account_rate_multiplier , 1 ) ) , 0 ) as cost ,
COALESCE ( SUM ( total_cost ) , 0 ) as standard_cost ,
COALESCE ( SUM ( actual_cost ) , 0 ) as user_cost
2025-12-29 10:03:27 +08:00
FROM usage_logs
WHERE account_id = $ 1 AND created_at >= $ 2
`
2025-12-18 13:50:39 +08:00
2025-12-29 10:03:27 +08:00
stats := & usagestats . AccountStats { }
2025-12-29 19:23:49 +08:00
if err := scanSingleRow (
ctx ,
r . sql ,
query ,
[ ] any { accountID , today } ,
& stats . Requests ,
& stats . Tokens ,
& stats . Cost ,
2026-01-14 16:12:08 +08:00
& stats . StandardCost ,
& stats . UserCost ,
2025-12-29 19:23:49 +08:00
) ; err != nil {
2025-12-18 13:50:39 +08:00
return nil , err
}
2025-12-29 10:03:27 +08:00
return stats , nil
2025-12-18 13:50:39 +08:00
}
// GetAccountWindowStats 获取账号时间窗口内的统计
2025-12-25 20:52:47 +08:00
func ( r * usageLogRepository ) GetAccountWindowStats ( ctx context . Context , accountID int64 , startTime time . Time ) ( * usagestats . AccountStats , error ) {
2025-12-29 10:03:27 +08:00
query := `
SELECT
2025-12-18 13:50:39 +08:00
COUNT ( * ) as requests ,
COALESCE ( SUM ( input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens ) , 0 ) as tokens ,
2026-01-14 16:12:08 +08:00
COALESCE ( SUM ( total_cost * COALESCE ( account_rate_multiplier , 1 ) ) , 0 ) as cost ,
COALESCE ( SUM ( total_cost ) , 0 ) as standard_cost ,
COALESCE ( SUM ( actual_cost ) , 0 ) as user_cost
2025-12-29 10:03:27 +08:00
FROM usage_logs
WHERE account_id = $ 1 AND created_at >= $ 2
`
2025-12-18 13:50:39 +08:00
2025-12-29 10:03:27 +08:00
stats := & usagestats . AccountStats { }
2025-12-29 19:23:49 +08:00
if err := scanSingleRow (
ctx ,
r . sql ,
query ,
[ ] any { accountID , startTime } ,
& stats . Requests ,
& stats . Tokens ,
& stats . Cost ,
2026-01-14 16:12:08 +08:00
& stats . StandardCost ,
& stats . UserCost ,
2025-12-29 19:23:49 +08:00
) ; err != nil {
2025-12-18 13:50:39 +08:00
return nil , err
}
2025-12-29 10:03:27 +08:00
return stats , nil
2025-12-18 13:50:39 +08:00
}
2026-02-22 13:31:30 +08:00
// GetAccountWindowStatsBatch 批量获取同一窗口起点下多个账号的统计数据。
// 返回 map[accountID]*AccountStats, 未命中的账号会返回零值统计, 便于上层直接复用。
func ( r * usageLogRepository ) GetAccountWindowStatsBatch ( ctx context . Context , accountIDs [ ] int64 , startTime time . Time ) ( map [ int64 ] * usagestats . AccountStats , error ) {
result := make ( map [ int64 ] * usagestats . AccountStats , len ( accountIDs ) )
if len ( accountIDs ) == 0 {
return result , nil
}
query := `
SELECT
account_id ,
COUNT ( * ) as requests ,
COALESCE ( SUM ( input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens ) , 0 ) as tokens ,
COALESCE ( SUM ( total_cost * COALESCE ( account_rate_multiplier , 1 ) ) , 0 ) as cost ,
COALESCE ( SUM ( total_cost ) , 0 ) as standard_cost ,
COALESCE ( SUM ( actual_cost ) , 0 ) as user_cost
FROM usage_logs
WHERE account_id = ANY ( $ 1 ) AND created_at >= $ 2
GROUP BY account_id
`
rows , err := r . sql . QueryContext ( ctx , query , pq . Array ( accountIDs ) , startTime )
if err != nil {
return nil , err
}
defer func ( ) { _ = rows . Close ( ) } ( )
for rows . Next ( ) {
var accountID int64
stats := & usagestats . AccountStats { }
if err := rows . Scan (
& accountID ,
& stats . Requests ,
& stats . Tokens ,
& stats . Cost ,
& stats . StandardCost ,
& stats . UserCost ,
) ; err != nil {
return nil , err
}
result [ accountID ] = stats
}
if err := rows . Err ( ) ; err != nil {
return nil , err
}
for _ , accountID := range accountIDs {
if _ , ok := result [ accountID ] ; ! ok {
result [ accountID ] = & usagestats . AccountStats { }
}
}
return result , nil
}
2026-02-28 15:01:20 +08:00
// GetGeminiUsageTotalsBatch 批量聚合 Gemini 账号在窗口内的 Pro/Flash 请求与用量。
// 模型分类规则与 service.geminiModelClassFromName 一致: model 包含 flash/lite 视为 flash, 其余视为 pro。
func ( r * usageLogRepository ) GetGeminiUsageTotalsBatch ( ctx context . Context , accountIDs [ ] int64 , startTime , endTime time . Time ) ( map [ int64 ] service . GeminiUsageTotals , error ) {
result := make ( map [ int64 ] service . GeminiUsageTotals , len ( accountIDs ) )
if len ( accountIDs ) == 0 {
return result , nil
}
query := `
SELECT
account_id ,
COALESCE ( SUM ( CASE WHEN LOWER ( COALESCE ( model , ' ' ) ) LIKE ' % flash % ' OR LOWER ( COALESCE ( model , ' ' ) ) LIKE ' % lite % ' THEN 1 ELSE 0 END ) , 0 ) AS flash_requests ,
COALESCE ( SUM ( CASE WHEN LOWER ( COALESCE ( model , ' ' ) ) LIKE ' % flash % ' OR LOWER ( COALESCE ( model , ' ' ) ) LIKE ' % lite % ' THEN 0 ELSE 1 END ) , 0 ) AS pro_requests ,
COALESCE ( SUM ( CASE WHEN LOWER ( COALESCE ( model , ' ' ) ) LIKE ' % flash % ' OR LOWER ( COALESCE ( model , ' ' ) ) LIKE ' % lite % ' THEN ( input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens ) ELSE 0 END ) , 0 ) AS flash_tokens ,
COALESCE ( SUM ( CASE WHEN LOWER ( COALESCE ( model , ' ' ) ) LIKE ' % flash % ' OR LOWER ( COALESCE ( model , ' ' ) ) LIKE ' % lite % ' THEN 0 ELSE ( input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens ) END ) , 0 ) AS pro_tokens ,
COALESCE ( SUM ( CASE WHEN LOWER ( COALESCE ( model , ' ' ) ) LIKE ' % flash % ' OR LOWER ( COALESCE ( model , ' ' ) ) LIKE ' % lite % ' THEN actual_cost ELSE 0 END ) , 0 ) AS flash_cost ,
COALESCE ( SUM ( CASE WHEN LOWER ( COALESCE ( model , ' ' ) ) LIKE ' % flash % ' OR LOWER ( COALESCE ( model , ' ' ) ) LIKE ' % lite % ' THEN 0 ELSE actual_cost END ) , 0 ) AS pro_cost
FROM usage_logs
WHERE account_id = ANY ( $ 1 ) AND created_at >= $ 2 AND created_at < $ 3
GROUP BY account_id
`
rows , err := r . sql . QueryContext ( ctx , query , pq . Array ( accountIDs ) , startTime , endTime )
if err != nil {
return nil , err
}
defer func ( ) { _ = rows . Close ( ) } ( )
for rows . Next ( ) {
var accountID int64
var totals service . GeminiUsageTotals
if err := rows . Scan (
& accountID ,
& totals . FlashRequests ,
& totals . ProRequests ,
& totals . FlashTokens ,
& totals . ProTokens ,
& totals . FlashCost ,
& totals . ProCost ,
) ; err != nil {
return nil , err
}
result [ accountID ] = totals
}
if err := rows . Err ( ) ; err != nil {
return nil , err
}
for _ , accountID := range accountIDs {
if _ , ok := result [ accountID ] ; ! ok {
result [ accountID ] = service . GeminiUsageTotals { }
}
}
return result , nil
}
2025-12-18 13:50:39 +08:00
// TrendDataPoint represents a single point in trend data
2025-12-24 08:41:31 +08:00
type TrendDataPoint = usagestats . TrendDataPoint
2025-12-18 13:50:39 +08:00
// ModelStat represents usage statistics for a single model
2025-12-24 08:41:31 +08:00
type ModelStat = usagestats . ModelStat
2025-12-18 13:50:39 +08:00
// UserUsageTrendPoint represents user usage trend data point
2025-12-24 08:41:31 +08:00
type UserUsageTrendPoint = usagestats . UserUsageTrendPoint
2025-12-18 13:50:39 +08:00
2026-01-04 19:27:53 +08:00
// APIKeyUsageTrendPoint represents API key usage trend data point
type APIKeyUsageTrendPoint = usagestats . APIKeyUsageTrendPoint
2025-12-18 13:50:39 +08:00
2026-01-04 19:27:53 +08:00
// GetAPIKeyUsageTrend returns usage trend data grouped by API key and date
func ( r * usageLogRepository ) GetAPIKeyUsageTrend ( ctx context . Context , startTime , endTime time . Time , granularity string , limit int ) ( results [ ] APIKeyUsageTrendPoint , err error ) {
2026-02-07 19:46:42 +08:00
dateFormat := safeDateFormat ( granularity )
2025-12-18 13:50:39 +08:00
2025-12-29 10:03:27 +08:00
query := fmt . Sprintf ( `
2025-12-18 13:50:39 +08:00
WITH top_keys AS (
SELECT api_key_id
FROM usage_logs
2025-12-29 10:03:27 +08:00
WHERE created_at >= $ 1 AND created_at < $ 2
2025-12-18 13:50:39 +08:00
GROUP BY api_key_id
ORDER BY SUM ( input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens ) DESC
2025-12-29 10:03:27 +08:00
LIMIT $ 3
2025-12-18 13:50:39 +08:00
)
SELECT
2025-12-29 10:03:27 +08:00
TO_CHAR ( u . created_at , ' % s ' ) as date ,
2025-12-18 13:50:39 +08:00
u . api_key_id ,
COALESCE ( k . name , ' ' ) as key_name ,
COUNT ( * ) as requests ,
COALESCE ( SUM ( u . input_tokens + u . output_tokens + u . cache_creation_tokens + u . cache_read_tokens ) , 0 ) as tokens
FROM usage_logs u
LEFT JOIN api_keys k ON u . api_key_id = k . id
WHERE u . api_key_id IN ( SELECT api_key_id FROM top_keys )
2025-12-29 10:03:27 +08:00
AND u . created_at >= $ 4 AND u . created_at < $ 5
2025-12-18 13:50:39 +08:00
GROUP BY date , u . api_key_id , k . name
ORDER BY date ASC , tokens DESC
2025-12-29 10:03:27 +08:00
` , dateFormat )
2025-12-18 13:50:39 +08:00
2025-12-29 10:03:27 +08:00
rows , err := r . sql . QueryContext ( ctx , query , startTime , endTime , limit , startTime , endTime )
2025-12-18 13:50:39 +08:00
if err != nil {
return nil , err
}
2025-12-30 17:13:32 +08:00
defer func ( ) {
// 保持主错误优先;仅在无错误时回传 Close 失败。
// 同时清空返回值,避免误用不完整结果。
if closeErr := rows . Close ( ) ; closeErr != nil && err == nil {
err = closeErr
results = nil
}
} ( )
2025-12-29 10:03:27 +08:00
2026-01-04 19:27:53 +08:00
results = make ( [ ] APIKeyUsageTrendPoint , 0 )
2025-12-29 10:03:27 +08:00
for rows . Next ( ) {
2026-01-04 19:27:53 +08:00
var row APIKeyUsageTrendPoint
if err = rows . Scan ( & row . Date , & row . APIKeyID , & row . KeyName , & row . Requests , & row . Tokens ) ; err != nil {
2025-12-29 10:03:27 +08:00
return nil , err
}
results = append ( results , row )
}
2025-12-30 17:13:32 +08:00
if err = rows . Err ( ) ; err != nil {
2025-12-29 10:03:27 +08:00
return nil , err
}
2025-12-18 13:50:39 +08:00
return results , nil
}
// GetUserUsageTrend returns usage trend data grouped by user and date
2025-12-30 17:13:32 +08:00
func ( r * usageLogRepository ) GetUserUsageTrend ( ctx context . Context , startTime , endTime time . Time , granularity string , limit int ) ( results [ ] UserUsageTrendPoint , err error ) {
2026-02-07 19:46:42 +08:00
dateFormat := safeDateFormat ( granularity )
2025-12-18 13:50:39 +08:00
2025-12-29 10:03:27 +08:00
query := fmt . Sprintf ( `
2025-12-18 13:50:39 +08:00
WITH top_users AS (
SELECT user_id
FROM usage_logs
2025-12-29 10:03:27 +08:00
WHERE created_at >= $ 1 AND created_at < $ 2
2025-12-18 13:50:39 +08:00
GROUP BY user_id
ORDER BY SUM ( input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens ) DESC
2025-12-29 10:03:27 +08:00
LIMIT $ 3
2025-12-18 13:50:39 +08:00
)
SELECT
2025-12-29 10:03:27 +08:00
TO_CHAR ( u . created_at , ' % s ' ) as date ,
2025-12-18 13:50:39 +08:00
u . user_id ,
COALESCE ( us . email , ' ' ) as email ,
COUNT ( * ) as requests ,
COALESCE ( SUM ( u . input_tokens + u . output_tokens + u . cache_creation_tokens + u . cache_read_tokens ) , 0 ) as tokens ,
COALESCE ( SUM ( u . total_cost ) , 0 ) as cost ,
COALESCE ( SUM ( u . actual_cost ) , 0 ) as actual_cost
FROM usage_logs u
LEFT JOIN users us ON u . user_id = us . id
WHERE u . user_id IN ( SELECT user_id FROM top_users )
2025-12-29 10:03:27 +08:00
AND u . created_at >= $ 4 AND u . created_at < $ 5
2025-12-18 13:50:39 +08:00
GROUP BY date , u . user_id , us . email
ORDER BY date ASC , tokens DESC
2025-12-29 10:03:27 +08:00
` , dateFormat )
2025-12-18 13:50:39 +08:00
2025-12-29 10:03:27 +08:00
rows , err := r . sql . QueryContext ( ctx , query , startTime , endTime , limit , startTime , endTime )
2025-12-18 13:50:39 +08:00
if err != nil {
return nil , err
}
2025-12-30 17:13:32 +08:00
defer func ( ) {
// 保持主错误优先;仅在无错误时回传 Close 失败。
// 同时清空返回值,避免误用不完整结果。
if closeErr := rows . Close ( ) ; closeErr != nil && err == nil {
err = closeErr
results = nil
}
} ( )
2025-12-29 10:03:27 +08:00
2025-12-30 17:13:32 +08:00
results = make ( [ ] UserUsageTrendPoint , 0 )
2025-12-29 10:03:27 +08:00
for rows . Next ( ) {
var row UserUsageTrendPoint
2025-12-30 17:13:32 +08:00
if err = rows . Scan ( & row . Date , & row . UserID , & row . Email , & row . Requests , & row . Tokens , & row . Cost , & row . ActualCost ) ; err != nil {
2025-12-29 10:03:27 +08:00
return nil , err
}
results = append ( results , row )
}
2025-12-30 17:13:32 +08:00
if err = rows . Err ( ) ; err != nil {
2025-12-29 10:03:27 +08:00
return nil , err
}
2025-12-18 13:50:39 +08:00
return results , nil
}
// UserDashboardStats 用户仪表盘统计
2025-12-24 08:41:31 +08:00
type UserDashboardStats = usagestats . UserDashboardStats
2025-12-18 13:50:39 +08:00
// GetUserDashboardStats 获取用户专属的仪表盘统计
2025-12-25 20:52:47 +08:00
func ( r * usageLogRepository ) GetUserDashboardStats ( ctx context . Context , userID int64 ) ( * UserDashboardStats , error ) {
2025-12-29 10:03:27 +08:00
stats := & UserDashboardStats { }
2025-12-18 13:50:39 +08:00
today := timezone . Today ( )
// API Key 统计
2025-12-29 19:23:49 +08:00
if err := scanSingleRow (
ctx ,
r . sql ,
"SELECT COUNT(*) FROM api_keys WHERE user_id = $1 AND deleted_at IS NULL" ,
[ ] any { userID } ,
2026-01-04 19:27:53 +08:00
& stats . TotalAPIKeys ,
2025-12-29 19:23:49 +08:00
) ; err != nil {
2025-12-29 10:03:27 +08:00
return nil , err
}
2025-12-29 19:23:49 +08:00
if err := scanSingleRow (
ctx ,
r . sql ,
"SELECT COUNT(*) FROM api_keys WHERE user_id = $1 AND status = $2 AND deleted_at IS NULL" ,
[ ] any { userID , service . StatusActive } ,
2026-01-04 19:27:53 +08:00
& stats . ActiveAPIKeys ,
2025-12-29 19:23:49 +08:00
) ; err != nil {
2025-12-29 10:03:27 +08:00
return nil , err
}
2025-12-18 13:50:39 +08:00
// 累计 Token 统计
2025-12-29 10:03:27 +08:00
totalStatsQuery := `
SELECT
2025-12-18 13:50:39 +08:00
COUNT ( * ) as total_requests ,
COALESCE ( SUM ( input_tokens ) , 0 ) as total_input_tokens ,
COALESCE ( SUM ( output_tokens ) , 0 ) as total_output_tokens ,
COALESCE ( SUM ( cache_creation_tokens ) , 0 ) as total_cache_creation_tokens ,
COALESCE ( SUM ( cache_read_tokens ) , 0 ) as total_cache_read_tokens ,
COALESCE ( SUM ( total_cost ) , 0 ) as total_cost ,
COALESCE ( SUM ( actual_cost ) , 0 ) as total_actual_cost ,
COALESCE ( AVG ( duration_ms ) , 0 ) as avg_duration_ms
2025-12-29 10:03:27 +08:00
FROM usage_logs
WHERE user_id = $ 1
`
2025-12-29 19:23:49 +08:00
if err := scanSingleRow (
ctx ,
r . sql ,
totalStatsQuery ,
[ ] any { userID } ,
& stats . TotalRequests ,
& stats . TotalInputTokens ,
& stats . TotalOutputTokens ,
& stats . TotalCacheCreationTokens ,
& stats . TotalCacheReadTokens ,
& stats . TotalCost ,
& stats . TotalActualCost ,
& stats . AverageDurationMs ,
) ; err != nil {
2025-12-29 10:03:27 +08:00
return nil , err
}
2025-12-18 13:50:39 +08:00
stats . TotalTokens = stats . TotalInputTokens + stats . TotalOutputTokens + stats . TotalCacheCreationTokens + stats . TotalCacheReadTokens
// 今日 Token 统计
2025-12-29 10:03:27 +08:00
todayStatsQuery := `
SELECT
2025-12-18 13:50:39 +08:00
COUNT ( * ) as today_requests ,
COALESCE ( SUM ( input_tokens ) , 0 ) as today_input_tokens ,
COALESCE ( SUM ( output_tokens ) , 0 ) as today_output_tokens ,
COALESCE ( SUM ( cache_creation_tokens ) , 0 ) as today_cache_creation_tokens ,
COALESCE ( SUM ( cache_read_tokens ) , 0 ) as today_cache_read_tokens ,
COALESCE ( SUM ( total_cost ) , 0 ) as today_cost ,
COALESCE ( SUM ( actual_cost ) , 0 ) as today_actual_cost
2025-12-29 10:03:27 +08:00
FROM usage_logs
WHERE user_id = $ 1 AND created_at >= $ 2
`
2025-12-29 19:23:49 +08:00
if err := scanSingleRow (
ctx ,
r . sql ,
todayStatsQuery ,
[ ] any { userID , today } ,
& stats . TodayRequests ,
& stats . TodayInputTokens ,
& stats . TodayOutputTokens ,
& stats . TodayCacheCreationTokens ,
& stats . TodayCacheReadTokens ,
& stats . TodayCost ,
& stats . TodayActualCost ,
) ; err != nil {
2025-12-29 10:03:27 +08:00
return nil , err
}
2025-12-18 13:50:39 +08:00
stats . TodayTokens = stats . TodayInputTokens + stats . TodayOutputTokens + stats . TodayCacheCreationTokens + stats . TodayCacheReadTokens
2025-12-24 10:24:02 +08:00
// 性能指标: RPM 和 TPM( 最近1分钟, 仅统计该用户的请求)
2025-12-29 10:03:27 +08:00
rpm , tpm , err := r . getPerformanceStats ( ctx , userID )
if err != nil {
return nil , err
}
stats . Rpm = rpm
stats . Tpm = tpm
2025-12-24 10:24:02 +08:00
2025-12-29 10:03:27 +08:00
return stats , nil
2025-12-18 13:50:39 +08:00
}
2026-02-05 11:41:25 +08:00
// getPerformanceStatsByAPIKey 获取指定 API Key 的 RPM 和 TPM( 近5分钟平均值)
func ( r * usageLogRepository ) getPerformanceStatsByAPIKey ( ctx context . Context , apiKeyID int64 ) ( rpm , tpm int64 , err error ) {
fiveMinutesAgo := time . Now ( ) . Add ( - 5 * time . Minute )
query := `
SELECT
COUNT ( * ) as request_count ,
COALESCE ( SUM ( input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens ) , 0 ) as token_count
FROM usage_logs
WHERE created_at >= $ 1 AND api_key_id = $ 2 `
args := [ ] any { fiveMinutesAgo , apiKeyID }
var requestCount int64
var tokenCount int64
if err := scanSingleRow ( ctx , r . sql , query , args , & requestCount , & tokenCount ) ; err != nil {
return 0 , 0 , err
}
return requestCount / 5 , tokenCount / 5 , nil
}
// GetAPIKeyDashboardStats 获取指定 API Key 的仪表盘统计(按 api_key_id 过滤)
func ( r * usageLogRepository ) GetAPIKeyDashboardStats ( ctx context . Context , apiKeyID int64 ) ( * UserDashboardStats , error ) {
stats := & UserDashboardStats { }
today := timezone . Today ( )
// API Key 维度不需要统计 key 数量,设为 1
stats . TotalAPIKeys = 1
stats . ActiveAPIKeys = 1
// 累计 Token 统计
totalStatsQuery := `
SELECT
COUNT ( * ) as total_requests ,
COALESCE ( SUM ( input_tokens ) , 0 ) as total_input_tokens ,
COALESCE ( SUM ( output_tokens ) , 0 ) as total_output_tokens ,
COALESCE ( SUM ( cache_creation_tokens ) , 0 ) as total_cache_creation_tokens ,
COALESCE ( SUM ( cache_read_tokens ) , 0 ) as total_cache_read_tokens ,
COALESCE ( SUM ( total_cost ) , 0 ) as total_cost ,
COALESCE ( SUM ( actual_cost ) , 0 ) as total_actual_cost ,
COALESCE ( AVG ( duration_ms ) , 0 ) as avg_duration_ms
FROM usage_logs
WHERE api_key_id = $ 1
`
if err := scanSingleRow (
ctx ,
r . sql ,
totalStatsQuery ,
[ ] any { apiKeyID } ,
& stats . TotalRequests ,
& stats . TotalInputTokens ,
& stats . TotalOutputTokens ,
& stats . TotalCacheCreationTokens ,
& stats . TotalCacheReadTokens ,
& stats . TotalCost ,
& stats . TotalActualCost ,
& stats . AverageDurationMs ,
) ; err != nil {
return nil , err
}
stats . TotalTokens = stats . TotalInputTokens + stats . TotalOutputTokens + stats . TotalCacheCreationTokens + stats . TotalCacheReadTokens
// 今日 Token 统计
todayStatsQuery := `
SELECT
COUNT ( * ) as today_requests ,
COALESCE ( SUM ( input_tokens ) , 0 ) as today_input_tokens ,
COALESCE ( SUM ( output_tokens ) , 0 ) as today_output_tokens ,
COALESCE ( SUM ( cache_creation_tokens ) , 0 ) as today_cache_creation_tokens ,
COALESCE ( SUM ( cache_read_tokens ) , 0 ) as today_cache_read_tokens ,
COALESCE ( SUM ( total_cost ) , 0 ) as today_cost ,
COALESCE ( SUM ( actual_cost ) , 0 ) as today_actual_cost
FROM usage_logs
WHERE api_key_id = $ 1 AND created_at >= $ 2
`
if err := scanSingleRow (
ctx ,
r . sql ,
todayStatsQuery ,
[ ] any { apiKeyID , today } ,
& stats . TodayRequests ,
& stats . TodayInputTokens ,
& stats . TodayOutputTokens ,
& stats . TodayCacheCreationTokens ,
& stats . TodayCacheReadTokens ,
& stats . TodayCost ,
& stats . TodayActualCost ,
) ; err != nil {
return nil , err
}
stats . TodayTokens = stats . TodayInputTokens + stats . TodayOutputTokens + stats . TodayCacheCreationTokens + stats . TodayCacheReadTokens
// 性能指标: RPM 和 TPM( 最近5分钟, 按 API Key 过滤)
rpm , tpm , err := r . getPerformanceStatsByAPIKey ( ctx , apiKeyID )
if err != nil {
return nil , err
}
stats . Rpm = rpm
stats . Tpm = tpm
return stats , nil
}
2025-12-18 13:50:39 +08:00
// GetUserUsageTrendByUserID 获取指定用户的使用趋势
2025-12-30 17:13:32 +08:00
func ( r * usageLogRepository ) GetUserUsageTrendByUserID ( ctx context . Context , userID int64 , startTime , endTime time . Time , granularity string ) ( results [ ] TrendDataPoint , err error ) {
2026-02-07 19:46:42 +08:00
dateFormat := safeDateFormat ( granularity )
2025-12-18 13:50:39 +08:00
2025-12-29 10:03:27 +08:00
query := fmt . Sprintf ( `
SELECT
TO_CHAR ( created_at , ' % s ' ) as date ,
2025-12-18 13:50:39 +08:00
COUNT ( * ) as requests ,
COALESCE ( SUM ( input_tokens ) , 0 ) as input_tokens ,
COALESCE ( SUM ( output_tokens ) , 0 ) as output_tokens ,
2026-03-05 18:32:17 +08:00
COALESCE ( SUM ( cache_creation_tokens ) , 0 ) as cache_creation_tokens ,
COALESCE ( SUM ( cache_read_tokens ) , 0 ) as cache_read_tokens ,
2025-12-18 13:50:39 +08:00
COALESCE ( SUM ( input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens ) , 0 ) as total_tokens ,
COALESCE ( SUM ( total_cost ) , 0 ) as cost ,
COALESCE ( SUM ( actual_cost ) , 0 ) as actual_cost
2025-12-29 10:03:27 +08:00
FROM usage_logs
WHERE user_id = $ 1 AND created_at >= $ 2 AND created_at < $ 3
GROUP BY date
ORDER BY date ASC
` , dateFormat )
2025-12-18 13:50:39 +08:00
2025-12-29 10:03:27 +08:00
rows , err := r . sql . QueryContext ( ctx , query , userID , startTime , endTime )
2025-12-18 13:50:39 +08:00
if err != nil {
return nil , err
}
2025-12-30 17:13:32 +08:00
defer func ( ) {
// 保持主错误优先;仅在无错误时回传 Close 失败。
// 同时清空返回值,避免误用不完整结果。
if closeErr := rows . Close ( ) ; closeErr != nil && err == nil {
err = closeErr
results = nil
}
} ( )
2025-12-18 13:50:39 +08:00
2025-12-30 17:13:32 +08:00
results , err = scanTrendRows ( rows )
if err != nil {
return nil , err
}
return results , nil
2025-12-18 13:50:39 +08:00
}
// GetUserModelStats 获取指定用户的模型统计
2025-12-30 17:13:32 +08:00
func ( r * usageLogRepository ) GetUserModelStats ( ctx context . Context , userID int64 , startTime , endTime time . Time ) ( results [ ] ModelStat , err error ) {
2025-12-29 10:03:27 +08:00
query := `
SELECT
2025-12-18 13:50:39 +08:00
model ,
COUNT ( * ) as requests ,
COALESCE ( SUM ( input_tokens ) , 0 ) as input_tokens ,
COALESCE ( SUM ( output_tokens ) , 0 ) as output_tokens ,
2026-03-05 18:32:17 +08:00
COALESCE ( SUM ( cache_creation_tokens ) , 0 ) as cache_creation_tokens ,
COALESCE ( SUM ( cache_read_tokens ) , 0 ) as cache_read_tokens ,
2025-12-18 13:50:39 +08:00
COALESCE ( SUM ( input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens ) , 0 ) as total_tokens ,
COALESCE ( SUM ( total_cost ) , 0 ) as cost ,
COALESCE ( SUM ( actual_cost ) , 0 ) as actual_cost
2025-12-29 10:03:27 +08:00
FROM usage_logs
WHERE user_id = $ 1 AND created_at >= $ 2 AND created_at < $ 3
GROUP BY model
ORDER BY total_tokens DESC
`
2025-12-18 13:50:39 +08:00
2025-12-29 10:03:27 +08:00
rows , err := r . sql . QueryContext ( ctx , query , userID , startTime , endTime )
2025-12-18 13:50:39 +08:00
if err != nil {
return nil , err
}
2025-12-30 17:13:32 +08:00
defer func ( ) {
// 保持主错误优先;仅在无错误时回传 Close 失败。
// 同时清空返回值,避免误用不完整结果。
if closeErr := rows . Close ( ) ; closeErr != nil && err == nil {
err = closeErr
results = nil
}
} ( )
2025-12-18 13:50:39 +08:00
2025-12-30 17:13:32 +08:00
results , err = scanModelStatsRows ( rows )
if err != nil {
return nil , err
}
return results , nil
2025-12-18 13:50:39 +08:00
}
// UsageLogFilters represents filters for usage log queries
2025-12-24 08:41:31 +08:00
type UsageLogFilters = usagestats . UsageLogFilters
2025-12-18 13:50:39 +08:00
// ListWithFilters lists usage logs with optional filters (for admin)
2025-12-26 15:40:24 +08:00
func ( r * usageLogRepository ) ListWithFilters ( ctx context . Context , params pagination . PaginationParams , filters UsageLogFilters ) ( [ ] service . UsageLog , * pagination . PaginationResult , error ) {
2025-12-29 10:03:27 +08:00
conditions := make ( [ ] string , 0 , 8 )
args := make ( [ ] any , 0 , 8 )
2025-12-18 13:50:39 +08:00
if filters . UserID > 0 {
2025-12-29 10:03:27 +08:00
conditions = append ( conditions , fmt . Sprintf ( "user_id = $%d" , len ( args ) + 1 ) )
args = append ( args , filters . UserID )
2025-12-18 13:50:39 +08:00
}
2026-01-04 19:27:53 +08:00
if filters . APIKeyID > 0 {
2025-12-29 10:03:27 +08:00
conditions = append ( conditions , fmt . Sprintf ( "api_key_id = $%d" , len ( args ) + 1 ) )
2026-01-04 19:27:53 +08:00
args = append ( args , filters . APIKeyID )
2025-12-18 13:50:39 +08:00
}
2025-12-27 10:50:25 +08:00
if filters . AccountID > 0 {
2025-12-29 10:03:27 +08:00
conditions = append ( conditions , fmt . Sprintf ( "account_id = $%d" , len ( args ) + 1 ) )
args = append ( args , filters . AccountID )
2025-12-27 10:50:25 +08:00
}
if filters . GroupID > 0 {
2025-12-29 10:03:27 +08:00
conditions = append ( conditions , fmt . Sprintf ( "group_id = $%d" , len ( args ) + 1 ) )
args = append ( args , filters . GroupID )
2025-12-27 10:50:25 +08:00
}
if filters . Model != "" {
2025-12-29 10:03:27 +08:00
conditions = append ( conditions , fmt . Sprintf ( "model = $%d" , len ( args ) + 1 ) )
args = append ( args , filters . Model )
2025-12-27 10:50:25 +08:00
}
2026-02-28 15:01:20 +08:00
conditions , args = appendRequestTypeOrStreamWhereCondition ( conditions , args , filters . RequestType , filters . Stream )
2025-12-27 10:50:25 +08:00
if filters . BillingType != nil {
2025-12-29 10:03:27 +08:00
conditions = append ( conditions , fmt . Sprintf ( "billing_type = $%d" , len ( args ) + 1 ) )
args = append ( args , int16 ( * filters . BillingType ) )
2025-12-27 10:50:25 +08:00
}
2025-12-18 13:50:39 +08:00
if filters . StartTime != nil {
2025-12-29 10:03:27 +08:00
conditions = append ( conditions , fmt . Sprintf ( "created_at >= $%d" , len ( args ) + 1 ) )
args = append ( args , * filters . StartTime )
2025-12-18 13:50:39 +08:00
}
if filters . EndTime != nil {
2025-12-29 10:03:27 +08:00
conditions = append ( conditions , fmt . Sprintf ( "created_at <= $%d" , len ( args ) + 1 ) )
args = append ( args , * filters . EndTime )
2025-12-18 13:50:39 +08:00
}
2025-12-29 10:03:27 +08:00
whereClause := buildWhere ( conditions )
2026-03-04 13:46:08 +08:00
var (
logs [ ] service . UsageLog
page * pagination . PaginationResult
err error
)
if shouldUseFastUsageLogTotal ( filters ) {
logs , page , err = r . listUsageLogsWithFastPagination ( ctx , whereClause , args , params )
} else {
logs , page , err = r . listUsageLogsWithPagination ( ctx , whereClause , args , params )
}
2025-12-29 10:03:27 +08:00
if err != nil {
2025-12-18 13:50:39 +08:00
return nil , nil , err
}
2025-12-29 10:03:27 +08:00
if err := r . hydrateUsageLogAssociations ( ctx , logs ) ; err != nil {
2025-12-18 13:50:39 +08:00
return nil , nil , err
}
2025-12-29 10:03:27 +08:00
return logs , page , nil
2025-12-18 13:50:39 +08:00
}
2026-03-04 13:46:08 +08:00
func shouldUseFastUsageLogTotal ( filters UsageLogFilters ) bool {
if filters . ExactTotal {
return false
}
// 强选择过滤下记录集通常较小,保留精确总数。
return filters . UserID == 0 && filters . APIKeyID == 0 && filters . AccountID == 0
}
2025-12-18 13:50:39 +08:00
// UsageStats represents usage statistics
2025-12-24 08:41:31 +08:00
type UsageStats = usagestats . UsageStats
2025-12-18 13:50:39 +08:00
// BatchUserUsageStats represents usage stats for a single user
2025-12-24 08:41:31 +08:00
type BatchUserUsageStats = usagestats . BatchUserUsageStats
2025-12-18 13:50:39 +08:00
2026-03-04 13:46:08 +08:00
func normalizePositiveInt64IDs ( ids [ ] int64 ) [ ] int64 {
if len ( ids ) == 0 {
return nil
}
seen := make ( map [ int64 ] struct { } , len ( ids ) )
out := make ( [ ] int64 , 0 , len ( ids ) )
for _ , id := range ids {
if id <= 0 {
continue
}
if _ , ok := seen [ id ] ; ok {
continue
}
seen [ id ] = struct { } { }
out = append ( out , id )
}
return out
}
2026-02-07 19:46:42 +08:00
// GetBatchUserUsageStats gets today and total actual_cost for multiple users within a time range.
// If startTime is zero, defaults to 30 days ago.
func ( r * usageLogRepository ) GetBatchUserUsageStats ( ctx context . Context , userIDs [ ] int64 , startTime , endTime time . Time ) ( map [ int64 ] * BatchUserUsageStats , error ) {
2025-12-29 10:03:27 +08:00
result := make ( map [ int64 ] * BatchUserUsageStats )
2026-03-04 13:46:08 +08:00
normalizedUserIDs := normalizePositiveInt64IDs ( userIDs )
if len ( normalizedUserIDs ) == 0 {
2025-12-29 10:03:27 +08:00
return result , nil
2025-12-18 13:50:39 +08:00
}
2026-02-07 19:46:42 +08:00
// 默认最近 30 天
if startTime . IsZero ( ) {
startTime = time . Now ( ) . AddDate ( 0 , 0 , - 30 )
}
if endTime . IsZero ( ) {
endTime = time . Now ( )
}
2026-03-04 13:46:08 +08:00
for _ , id := range normalizedUserIDs {
2025-12-18 13:50:39 +08:00
result [ id ] = & BatchUserUsageStats { UserID : id }
}
2025-12-29 10:03:27 +08:00
query := `
2026-03-04 13:46:08 +08:00
SELECT
user_id ,
COALESCE ( SUM ( actual_cost ) FILTER ( WHERE created_at >= $ 2 AND created_at < $ 3 ) , 0 ) as total_cost ,
COALESCE ( SUM ( actual_cost ) FILTER ( WHERE created_at >= $ 4 ) , 0 ) as today_cost
2025-12-29 10:03:27 +08:00
FROM usage_logs
2026-03-04 13:46:08 +08:00
WHERE user_id = ANY ( $ 1 )
AND created_at >= LEAST ( $ 2 , $ 4 )
2025-12-29 10:03:27 +08:00
GROUP BY user_id
`
today := timezone . Today ( )
2026-03-04 13:46:08 +08:00
rows , err := r . sql . QueryContext ( ctx , query , pq . Array ( normalizedUserIDs ) , startTime , endTime , today )
2025-12-18 13:50:39 +08:00
if err != nil {
return nil , err
}
2025-12-29 10:03:27 +08:00
for rows . Next ( ) {
var userID int64
var total float64
2026-03-04 13:46:08 +08:00
var todayTotal float64
if err := rows . Scan ( & userID , & total , & todayTotal ) ; err != nil {
2025-12-29 10:03:27 +08:00
_ = rows . Close ( )
return nil , err
}
if stats , ok := result [ userID ] ; ok {
2026-03-04 13:46:08 +08:00
stats . TotalActualCost = total
stats . TodayActualCost = todayTotal
2025-12-18 13:50:39 +08:00
}
}
2025-12-29 10:03:27 +08:00
if err := rows . Close ( ) ; err != nil {
return nil , err
}
2025-12-31 14:11:57 +08:00
if err := rows . Err ( ) ; err != nil {
return nil , err
}
2025-12-18 13:50:39 +08:00
return result , nil
}
2026-01-04 19:27:53 +08:00
// BatchAPIKeyUsageStats represents usage stats for a single API key
type BatchAPIKeyUsageStats = usagestats . BatchAPIKeyUsageStats
2025-12-18 13:50:39 +08:00
2026-02-07 19:46:42 +08:00
// GetBatchAPIKeyUsageStats gets today and total actual_cost for multiple API keys within a time range.
// If startTime is zero, defaults to 30 days ago.
func ( r * usageLogRepository ) GetBatchAPIKeyUsageStats ( ctx context . Context , apiKeyIDs [ ] int64 , startTime , endTime time . Time ) ( map [ int64 ] * BatchAPIKeyUsageStats , error ) {
2026-01-04 19:27:53 +08:00
result := make ( map [ int64 ] * BatchAPIKeyUsageStats )
2026-03-04 13:46:08 +08:00
normalizedAPIKeyIDs := normalizePositiveInt64IDs ( apiKeyIDs )
if len ( normalizedAPIKeyIDs ) == 0 {
2025-12-29 10:03:27 +08:00
return result , nil
2025-12-18 13:50:39 +08:00
}
2026-02-07 19:46:42 +08:00
// 默认最近 30 天
if startTime . IsZero ( ) {
startTime = time . Now ( ) . AddDate ( 0 , 0 , - 30 )
}
if endTime . IsZero ( ) {
endTime = time . Now ( )
}
2026-03-04 13:46:08 +08:00
for _ , id := range normalizedAPIKeyIDs {
2026-01-04 19:27:53 +08:00
result [ id ] = & BatchAPIKeyUsageStats { APIKeyID : id }
2025-12-18 13:50:39 +08:00
}
2025-12-29 10:03:27 +08:00
query := `
2026-03-04 13:46:08 +08:00
SELECT
api_key_id ,
COALESCE ( SUM ( actual_cost ) FILTER ( WHERE created_at >= $ 2 AND created_at < $ 3 ) , 0 ) as total_cost ,
COALESCE ( SUM ( actual_cost ) FILTER ( WHERE created_at >= $ 4 ) , 0 ) as today_cost
2025-12-29 10:03:27 +08:00
FROM usage_logs
2026-03-04 13:46:08 +08:00
WHERE api_key_id = ANY ( $ 1 )
AND created_at >= LEAST ( $ 2 , $ 4 )
2025-12-29 10:03:27 +08:00
GROUP BY api_key_id
`
today := timezone . Today ( )
2026-03-04 13:46:08 +08:00
rows , err := r . sql . QueryContext ( ctx , query , pq . Array ( normalizedAPIKeyIDs ) , startTime , endTime , today )
2025-12-18 13:50:39 +08:00
if err != nil {
return nil , err
}
2025-12-29 10:03:27 +08:00
for rows . Next ( ) {
var apiKeyID int64
var total float64
2026-03-04 13:46:08 +08:00
var todayTotal float64
if err := rows . Scan ( & apiKeyID , & total , & todayTotal ) ; err != nil {
2025-12-29 10:03:27 +08:00
_ = rows . Close ( )
return nil , err
}
if stats , ok := result [ apiKeyID ] ; ok {
2026-03-04 13:46:08 +08:00
stats . TotalActualCost = total
stats . TodayActualCost = todayTotal
2025-12-18 13:50:39 +08:00
}
}
2025-12-29 10:03:27 +08:00
if err := rows . Close ( ) ; err != nil {
return nil , err
}
2025-12-31 14:11:57 +08:00
if err := rows . Err ( ) ; err != nil {
return nil , err
}
2025-12-18 13:50:39 +08:00
return result , nil
}
2026-01-14 21:46:39 +08:00
// GetUsageTrendWithFilters returns usage trend data with optional filters
2026-02-28 15:01:20 +08:00
func ( r * usageLogRepository ) GetUsageTrendWithFilters ( ctx context . Context , startTime , endTime time . Time , granularity string , userID , apiKeyID , accountID , groupID int64 , model string , requestType * int16 , stream * bool , billingType * int8 ) ( results [ ] TrendDataPoint , err error ) {
2026-03-03 11:49:24 +08:00
if shouldUsePreaggregatedTrend ( granularity , userID , apiKeyID , accountID , groupID , model , requestType , stream , billingType ) {
aggregated , aggregatedErr := r . getUsageTrendFromAggregates ( ctx , startTime , endTime , granularity )
if aggregatedErr == nil && len ( aggregated ) > 0 {
return aggregated , nil
}
}
2026-02-07 19:46:42 +08:00
dateFormat := safeDateFormat ( granularity )
2025-12-20 10:06:55 +08:00
2025-12-29 10:03:27 +08:00
query := fmt . Sprintf ( `
SELECT
TO_CHAR ( created_at , ' % s ' ) as date ,
2025-12-20 10:06:55 +08:00
COUNT ( * ) as requests ,
COALESCE ( SUM ( input_tokens ) , 0 ) as input_tokens ,
COALESCE ( SUM ( output_tokens ) , 0 ) as output_tokens ,
2026-03-05 18:32:17 +08:00
COALESCE ( SUM ( cache_creation_tokens ) , 0 ) as cache_creation_tokens ,
COALESCE ( SUM ( cache_read_tokens ) , 0 ) as cache_read_tokens ,
2025-12-20 10:06:55 +08:00
COALESCE ( SUM ( input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens ) , 0 ) as total_tokens ,
COALESCE ( SUM ( total_cost ) , 0 ) as cost ,
COALESCE ( SUM ( actual_cost ) , 0 ) as actual_cost
2025-12-29 10:03:27 +08:00
FROM usage_logs
WHERE created_at >= $ 1 AND created_at < $ 2
` , dateFormat )
2025-12-20 10:06:55 +08:00
2025-12-29 10:03:27 +08:00
args := [ ] any { startTime , endTime }
2025-12-20 10:06:55 +08:00
if userID > 0 {
2025-12-29 10:03:27 +08:00
query += fmt . Sprintf ( " AND user_id = $%d" , len ( args ) + 1 )
args = append ( args , userID )
2025-12-20 10:06:55 +08:00
}
if apiKeyID > 0 {
2025-12-29 10:03:27 +08:00
query += fmt . Sprintf ( " AND api_key_id = $%d" , len ( args ) + 1 )
args = append ( args , apiKeyID )
2025-12-20 10:06:55 +08:00
}
2026-01-14 21:46:39 +08:00
if accountID > 0 {
query += fmt . Sprintf ( " AND account_id = $%d" , len ( args ) + 1 )
args = append ( args , accountID )
}
if groupID > 0 {
query += fmt . Sprintf ( " AND group_id = $%d" , len ( args ) + 1 )
args = append ( args , groupID )
}
if model != "" {
query += fmt . Sprintf ( " AND model = $%d" , len ( args ) + 1 )
args = append ( args , model )
}
2026-02-28 15:01:20 +08:00
query , args = appendRequestTypeOrStreamQueryFilter ( query , args , requestType , stream )
2026-01-18 10:52:18 +08:00
if billingType != nil {
query += fmt . Sprintf ( " AND billing_type = $%d" , len ( args ) + 1 )
args = append ( args , int16 ( * billingType ) )
}
2025-12-29 10:03:27 +08:00
query += " GROUP BY date ORDER BY date ASC"
2025-12-20 10:06:55 +08:00
2025-12-29 10:03:27 +08:00
rows , err := r . sql . QueryContext ( ctx , query , args ... )
2025-12-20 10:06:55 +08:00
if err != nil {
return nil , err
}
2025-12-30 17:13:32 +08:00
defer func ( ) {
// 保持主错误优先;仅在无错误时回传 Close 失败。
// 同时清空返回值,避免误用不完整结果。
if closeErr := rows . Close ( ) ; closeErr != nil && err == nil {
err = closeErr
results = nil
}
} ( )
2025-12-20 10:06:55 +08:00
2025-12-30 17:13:32 +08:00
results , err = scanTrendRows ( rows )
if err != nil {
return nil , err
}
return results , nil
2025-12-20 10:06:55 +08:00
}
2026-03-03 11:49:24 +08:00
func shouldUsePreaggregatedTrend ( granularity string , userID , apiKeyID , accountID , groupID int64 , model string , requestType * int16 , stream * bool , billingType * int8 ) bool {
if granularity != "day" && granularity != "hour" {
return false
}
return userID == 0 &&
apiKeyID == 0 &&
accountID == 0 &&
groupID == 0 &&
model == "" &&
requestType == nil &&
stream == nil &&
billingType == nil
}
func ( r * usageLogRepository ) getUsageTrendFromAggregates ( ctx context . Context , startTime , endTime time . Time , granularity string ) ( results [ ] TrendDataPoint , err error ) {
dateFormat := safeDateFormat ( granularity )
query := ""
args := [ ] any { startTime , endTime }
switch granularity {
case "hour" :
query = fmt . Sprintf ( `
SELECT
TO_CHAR ( bucket_start , ' % s ' ) as date ,
total_requests as requests ,
input_tokens ,
output_tokens ,
2026-03-05 18:32:17 +08:00
cache_creation_tokens ,
cache_read_tokens ,
2026-03-03 11:49:24 +08:00
( input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens ) as total_tokens ,
total_cost as cost ,
actual_cost
FROM usage_dashboard_hourly
WHERE bucket_start >= $ 1 AND bucket_start < $ 2
ORDER BY bucket_start ASC
` , dateFormat )
case "day" :
query = fmt . Sprintf ( `
SELECT
TO_CHAR ( bucket_date : : timestamp , ' % s ' ) as date ,
total_requests as requests ,
input_tokens ,
output_tokens ,
2026-03-05 18:32:17 +08:00
cache_creation_tokens ,
cache_read_tokens ,
2026-03-03 11:49:24 +08:00
( input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens ) as total_tokens ,
total_cost as cost ,
actual_cost
FROM usage_dashboard_daily
WHERE bucket_date >= $ 1 : : date AND bucket_date < $ 2 : : date
ORDER BY bucket_date ASC
` , dateFormat )
default :
return nil , nil
}
rows , err := r . sql . QueryContext ( ctx , query , args ... )
if err != nil {
return nil , err
}
defer func ( ) {
if closeErr := rows . Close ( ) ; closeErr != nil && err == nil {
err = closeErr
results = nil
}
} ( )
results , err = scanTrendRows ( rows )
if err != nil {
return nil , err
}
return results , nil
}
2026-01-14 21:46:39 +08:00
// GetModelStatsWithFilters returns model statistics with optional filters
2026-02-28 15:01:20 +08:00
func ( r * usageLogRepository ) GetModelStatsWithFilters ( ctx context . Context , startTime , endTime time . Time , userID , apiKeyID , accountID , groupID int64 , requestType * int16 , stream * bool , billingType * int8 ) ( results [ ] ModelStat , err error ) {
2026-01-14 16:12:08 +08:00
actualCostExpr := "COALESCE(SUM(actual_cost), 0) as actual_cost"
// 当仅按 account_id 聚合时, 实际费用使用账号倍率( total_cost * account_rate_multiplier) 。
if accountID > 0 && userID == 0 && apiKeyID == 0 {
actualCostExpr = "COALESCE(SUM(total_cost * COALESCE(account_rate_multiplier, 1)), 0) as actual_cost"
}
query := fmt . Sprintf ( `
2025-12-29 10:03:27 +08:00
SELECT
2025-12-20 10:06:55 +08:00
model ,
COUNT ( * ) as requests ,
COALESCE ( SUM ( input_tokens ) , 0 ) as input_tokens ,
COALESCE ( SUM ( output_tokens ) , 0 ) as output_tokens ,
2026-03-05 18:32:17 +08:00
COALESCE ( SUM ( cache_creation_tokens ) , 0 ) as cache_creation_tokens ,
COALESCE ( SUM ( cache_read_tokens ) , 0 ) as cache_read_tokens ,
2025-12-20 10:06:55 +08:00
COALESCE ( SUM ( input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens ) , 0 ) as total_tokens ,
COALESCE ( SUM ( total_cost ) , 0 ) as cost ,
2026-01-14 16:12:08 +08:00
% s
2025-12-29 10:03:27 +08:00
FROM usage_logs
WHERE created_at >= $ 1 AND created_at < $ 2
2026-01-14 16:12:08 +08:00
` , actualCostExpr )
2025-12-20 10:06:55 +08:00
2025-12-29 10:03:27 +08:00
args := [ ] any { startTime , endTime }
2025-12-20 10:06:55 +08:00
if userID > 0 {
2025-12-29 10:03:27 +08:00
query += fmt . Sprintf ( " AND user_id = $%d" , len ( args ) + 1 )
args = append ( args , userID )
2025-12-20 10:06:55 +08:00
}
if apiKeyID > 0 {
2025-12-29 10:03:27 +08:00
query += fmt . Sprintf ( " AND api_key_id = $%d" , len ( args ) + 1 )
args = append ( args , apiKeyID )
2025-12-20 10:06:55 +08:00
}
2025-12-23 13:42:33 +08:00
if accountID > 0 {
2025-12-29 10:03:27 +08:00
query += fmt . Sprintf ( " AND account_id = $%d" , len ( args ) + 1 )
args = append ( args , accountID )
2025-12-23 13:42:33 +08:00
}
2026-01-14 21:46:39 +08:00
if groupID > 0 {
query += fmt . Sprintf ( " AND group_id = $%d" , len ( args ) + 1 )
args = append ( args , groupID )
}
2026-02-28 15:01:20 +08:00
query , args = appendRequestTypeOrStreamQueryFilter ( query , args , requestType , stream )
2026-01-18 10:52:18 +08:00
if billingType != nil {
query += fmt . Sprintf ( " AND billing_type = $%d" , len ( args ) + 1 )
args = append ( args , int16 ( * billingType ) )
}
2025-12-29 10:03:27 +08:00
query += " GROUP BY model ORDER BY total_tokens DESC"
2025-12-20 10:06:55 +08:00
2025-12-29 10:03:27 +08:00
rows , err := r . sql . QueryContext ( ctx , query , args ... )
2025-12-20 10:06:55 +08:00
if err != nil {
return nil , err
}
2025-12-30 17:13:32 +08:00
defer func ( ) {
// 保持主错误优先;仅在无错误时回传 Close 失败。
// 同时清空返回值,避免误用不完整结果。
if closeErr := rows . Close ( ) ; closeErr != nil && err == nil {
err = closeErr
results = nil
}
} ( )
2025-12-20 10:06:55 +08:00
2025-12-30 17:13:32 +08:00
results , err = scanModelStatsRows ( rows )
if err != nil {
return nil , err
}
return results , nil
2025-12-20 10:06:55 +08:00
}
2026-03-01 20:10:51 +08:00
// GetGroupStatsWithFilters returns group usage statistics with optional filters
func ( r * usageLogRepository ) GetGroupStatsWithFilters ( ctx context . Context , startTime , endTime time . Time , userID , apiKeyID , accountID , groupID int64 , requestType * int16 , stream * bool , billingType * int8 ) ( results [ ] usagestats . GroupStat , err error ) {
query := `
SELECT
COALESCE ( ul . group_id , 0 ) as group_id ,
2026-03-01 19:49:01 +08:00
COALESCE ( g . name , ' ( 无分组 ) ' ) as group_name ,
2026-03-01 20:10:51 +08:00
COUNT ( * ) as requests ,
COALESCE ( SUM ( ul . input_tokens + ul . output_tokens + ul . cache_creation_tokens + ul . cache_read_tokens ) , 0 ) as total_tokens ,
COALESCE ( SUM ( ul . total_cost ) , 0 ) as cost ,
COALESCE ( SUM ( ul . actual_cost ) , 0 ) as actual_cost
FROM usage_logs ul
LEFT JOIN groups g ON g . id = ul . group_id
WHERE ul . created_at >= $ 1 AND ul . created_at < $ 2
`
args := [ ] any { startTime , endTime }
if userID > 0 {
query += fmt . Sprintf ( " AND ul.user_id = $%d" , len ( args ) + 1 )
args = append ( args , userID )
}
if apiKeyID > 0 {
query += fmt . Sprintf ( " AND ul.api_key_id = $%d" , len ( args ) + 1 )
args = append ( args , apiKeyID )
}
if accountID > 0 {
query += fmt . Sprintf ( " AND ul.account_id = $%d" , len ( args ) + 1 )
args = append ( args , accountID )
}
if groupID > 0 {
query += fmt . Sprintf ( " AND ul.group_id = $%d" , len ( args ) + 1 )
args = append ( args , groupID )
}
query , args = appendRequestTypeOrStreamQueryFilter ( query , args , requestType , stream )
if billingType != nil {
query += fmt . Sprintf ( " AND ul.billing_type = $%d" , len ( args ) + 1 )
args = append ( args , int16 ( * billingType ) )
}
query += " GROUP BY ul.group_id, g.name ORDER BY total_tokens DESC"
rows , err := r . sql . QueryContext ( ctx , query , args ... )
if err != nil {
return nil , err
}
defer func ( ) {
if closeErr := rows . Close ( ) ; closeErr != nil && err == nil {
err = closeErr
results = nil
}
} ( )
results = make ( [ ] usagestats . GroupStat , 0 )
for rows . Next ( ) {
var row usagestats . GroupStat
if err := rows . Scan (
& row . GroupID ,
& row . GroupName ,
& row . Requests ,
& row . TotalTokens ,
& row . Cost ,
& row . ActualCost ,
) ; err != nil {
return nil , err
}
results = append ( results , row )
}
if err := rows . Err ( ) ; err != nil {
return nil , err
}
return results , nil
}
2025-12-18 13:50:39 +08:00
// GetGlobalStats gets usage statistics for all users within a time range
2025-12-25 20:52:47 +08:00
func ( r * usageLogRepository ) GetGlobalStats ( ctx context . Context , startTime , endTime time . Time ) ( * UsageStats , error ) {
2025-12-29 10:03:27 +08:00
query := `
SELECT
2025-12-18 13:50:39 +08:00
COUNT ( * ) as total_requests ,
COALESCE ( SUM ( input_tokens ) , 0 ) as total_input_tokens ,
COALESCE ( SUM ( output_tokens ) , 0 ) as total_output_tokens ,
COALESCE ( SUM ( cache_creation_tokens + cache_read_tokens ) , 0 ) as total_cache_tokens ,
COALESCE ( SUM ( total_cost ) , 0 ) as total_cost ,
COALESCE ( SUM ( actual_cost ) , 0 ) as total_actual_cost ,
COALESCE ( AVG ( duration_ms ) , 0 ) as avg_duration_ms
2025-12-29 10:03:27 +08:00
FROM usage_logs
WHERE created_at >= $ 1 AND created_at <= $ 2
`
2025-12-18 13:50:39 +08:00
2025-12-29 10:03:27 +08:00
stats := & UsageStats { }
2025-12-29 19:23:49 +08:00
if err := scanSingleRow (
ctx ,
r . sql ,
query ,
[ ] any { startTime , endTime } ,
& stats . TotalRequests ,
& stats . TotalInputTokens ,
& stats . TotalOutputTokens ,
& stats . TotalCacheTokens ,
& stats . TotalCost ,
& stats . TotalActualCost ,
2026-01-06 22:19:07 +08:00
& stats . AverageDurationMs ,
) ; err != nil {
return nil , err
}
stats . TotalTokens = stats . TotalInputTokens + stats . TotalOutputTokens + stats . TotalCacheTokens
return stats , nil
}
// GetStatsWithFilters gets usage statistics with optional filters
func ( r * usageLogRepository ) GetStatsWithFilters ( ctx context . Context , filters UsageLogFilters ) ( * UsageStats , error ) {
conditions := make ( [ ] string , 0 , 9 )
args := make ( [ ] any , 0 , 9 )
if filters . UserID > 0 {
conditions = append ( conditions , fmt . Sprintf ( "user_id = $%d" , len ( args ) + 1 ) )
args = append ( args , filters . UserID )
}
if filters . APIKeyID > 0 {
conditions = append ( conditions , fmt . Sprintf ( "api_key_id = $%d" , len ( args ) + 1 ) )
args = append ( args , filters . APIKeyID )
}
if filters . AccountID > 0 {
conditions = append ( conditions , fmt . Sprintf ( "account_id = $%d" , len ( args ) + 1 ) )
args = append ( args , filters . AccountID )
}
if filters . GroupID > 0 {
conditions = append ( conditions , fmt . Sprintf ( "group_id = $%d" , len ( args ) + 1 ) )
args = append ( args , filters . GroupID )
}
if filters . Model != "" {
conditions = append ( conditions , fmt . Sprintf ( "model = $%d" , len ( args ) + 1 ) )
args = append ( args , filters . Model )
}
2026-02-28 15:01:20 +08:00
conditions , args = appendRequestTypeOrStreamWhereCondition ( conditions , args , filters . RequestType , filters . Stream )
2026-01-06 22:19:07 +08:00
if filters . BillingType != nil {
conditions = append ( conditions , fmt . Sprintf ( "billing_type = $%d" , len ( args ) + 1 ) )
args = append ( args , int16 ( * filters . BillingType ) )
}
if filters . StartTime != nil {
conditions = append ( conditions , fmt . Sprintf ( "created_at >= $%d" , len ( args ) + 1 ) )
args = append ( args , * filters . StartTime )
}
if filters . EndTime != nil {
conditions = append ( conditions , fmt . Sprintf ( "created_at <= $%d" , len ( args ) + 1 ) )
args = append ( args , * filters . EndTime )
}
query := fmt . Sprintf ( `
SELECT
COUNT ( * ) as total_requests ,
COALESCE ( SUM ( input_tokens ) , 0 ) as total_input_tokens ,
COALESCE ( SUM ( output_tokens ) , 0 ) as total_output_tokens ,
COALESCE ( SUM ( cache_creation_tokens + cache_read_tokens ) , 0 ) as total_cache_tokens ,
COALESCE ( SUM ( total_cost ) , 0 ) as total_cost ,
COALESCE ( SUM ( actual_cost ) , 0 ) as total_actual_cost ,
2026-01-14 16:12:08 +08:00
COALESCE ( SUM ( total_cost * COALESCE ( account_rate_multiplier , 1 ) ) , 0 ) as total_account_cost ,
2026-01-06 22:19:07 +08:00
COALESCE ( AVG ( duration_ms ) , 0 ) as avg_duration_ms
FROM usage_logs
% s
` , buildWhere ( conditions ) )
stats := & UsageStats { }
2026-01-14 16:12:08 +08:00
var totalAccountCost float64
2026-01-06 22:19:07 +08:00
if err := scanSingleRow (
ctx ,
r . sql ,
query ,
args ,
& stats . TotalRequests ,
& stats . TotalInputTokens ,
& stats . TotalOutputTokens ,
& stats . TotalCacheTokens ,
& stats . TotalCost ,
& stats . TotalActualCost ,
2026-01-14 16:12:08 +08:00
& totalAccountCost ,
2025-12-29 19:23:49 +08:00
& stats . AverageDurationMs ,
) ; err != nil {
2025-12-18 13:50:39 +08:00
return nil , err
}
2026-01-14 16:12:08 +08:00
if filters . AccountID > 0 {
stats . TotalAccountCost = & totalAccountCost
}
2025-12-29 10:03:27 +08:00
stats . TotalTokens = stats . TotalInputTokens + stats . TotalOutputTokens + stats . TotalCacheTokens
return stats , nil
2025-12-18 13:50:39 +08:00
}
2025-12-23 13:42:33 +08:00
// AccountUsageHistory represents daily usage history for an account
2025-12-24 08:41:31 +08:00
type AccountUsageHistory = usagestats . AccountUsageHistory
2025-12-23 13:42:33 +08:00
// AccountUsageSummary represents summary statistics for an account
2025-12-24 08:41:31 +08:00
type AccountUsageSummary = usagestats . AccountUsageSummary
2025-12-23 13:42:33 +08:00
// AccountUsageStatsResponse represents the full usage statistics response for an account
2025-12-24 08:41:31 +08:00
type AccountUsageStatsResponse = usagestats . AccountUsageStatsResponse
2025-12-23 13:42:33 +08:00
// GetAccountUsageStats returns comprehensive usage statistics for an account over a time range
2025-12-30 17:13:32 +08:00
func ( r * usageLogRepository ) GetAccountUsageStats ( ctx context . Context , accountID int64 , startTime , endTime time . Time ) ( resp * AccountUsageStatsResponse , err error ) {
2025-12-23 13:42:33 +08:00
daysCount := int ( endTime . Sub ( startTime ) . Hours ( ) / 24 ) + 1
if daysCount <= 0 {
daysCount = 30
}
2025-12-29 10:03:27 +08:00
query := `
SELECT
2025-12-23 13:42:33 +08:00
TO_CHAR ( created_at , ' YYYY - MM - DD ' ) as date ,
COUNT ( * ) as requests ,
COALESCE ( SUM ( input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens ) , 0 ) as tokens ,
COALESCE ( SUM ( total_cost ) , 0 ) as cost ,
2026-01-14 16:12:08 +08:00
COALESCE ( SUM ( total_cost * COALESCE ( account_rate_multiplier , 1 ) ) , 0 ) as actual_cost ,
COALESCE ( SUM ( actual_cost ) , 0 ) as user_cost
2025-12-29 10:03:27 +08:00
FROM usage_logs
WHERE account_id = $ 1 AND created_at >= $ 2 AND created_at < $ 3
GROUP BY date
ORDER BY date ASC
`
rows , err := r . sql . QueryContext ( ctx , query , accountID , startTime , endTime )
2025-12-23 13:42:33 +08:00
if err != nil {
return nil , err
}
2025-12-30 17:13:32 +08:00
defer func ( ) {
// 保持主错误优先;仅在无错误时回传 Close 失败。
// 同时清空返回值,避免误用不完整结果。
if closeErr := rows . Close ( ) ; closeErr != nil && err == nil {
err = closeErr
resp = nil
}
} ( )
2025-12-29 10:03:27 +08:00
history := make ( [ ] AccountUsageHistory , 0 )
for rows . Next ( ) {
var date string
var requests int64
var tokens int64
var cost float64
var actualCost float64
2026-01-14 16:12:08 +08:00
var userCost float64
if err = rows . Scan ( & date , & requests , & tokens , & cost , & actualCost , & userCost ) ; err != nil {
2025-12-29 10:03:27 +08:00
return nil , err
}
t , _ := time . Parse ( "2006-01-02" , date )
2025-12-23 13:42:33 +08:00
history = append ( history , AccountUsageHistory {
2025-12-29 10:03:27 +08:00
Date : date ,
Label : t . Format ( "01/02" ) ,
Requests : requests ,
Tokens : tokens ,
Cost : cost ,
ActualCost : actualCost ,
2026-01-14 16:12:08 +08:00
UserCost : userCost ,
2025-12-23 13:42:33 +08:00
} )
}
2025-12-30 17:13:32 +08:00
if err = rows . Err ( ) ; err != nil {
2025-12-29 10:03:27 +08:00
return nil , err
}
2025-12-23 13:42:33 +08:00
2026-01-14 16:12:08 +08:00
var totalAccountCost , totalUserCost , totalStandardCost float64
2025-12-23 13:42:33 +08:00
var totalRequests , totalTokens int64
var highestCostDay , highestRequestDay * AccountUsageHistory
for i := range history {
h := & history [ i ]
2026-01-14 16:12:08 +08:00
totalAccountCost += h . ActualCost
totalUserCost += h . UserCost
2025-12-23 13:42:33 +08:00
totalStandardCost += h . Cost
totalRequests += h . Requests
totalTokens += h . Tokens
if highestCostDay == nil || h . ActualCost > highestCostDay . ActualCost {
highestCostDay = h
}
if highestRequestDay == nil || h . Requests > highestRequestDay . Requests {
highestRequestDay = h
}
}
actualDaysUsed := len ( history )
if actualDaysUsed == 0 {
actualDaysUsed = 1
}
2025-12-29 10:03:27 +08:00
avgQuery := "SELECT COALESCE(AVG(duration_ms), 0) as avg_duration_ms FROM usage_logs WHERE account_id = $1 AND created_at >= $2 AND created_at < $3"
var avgDuration float64
2025-12-29 19:23:49 +08:00
if err := scanSingleRow ( ctx , r . sql , avgQuery , [ ] any { accountID , startTime , endTime } , & avgDuration ) ; err != nil {
2025-12-29 10:03:27 +08:00
return nil , err
2025-12-23 13:42:33 +08:00
}
summary := AccountUsageSummary {
Days : daysCount ,
ActualDaysUsed : actualDaysUsed ,
2026-01-14 16:12:08 +08:00
TotalCost : totalAccountCost ,
TotalUserCost : totalUserCost ,
2025-12-23 13:42:33 +08:00
TotalStandardCost : totalStandardCost ,
TotalRequests : totalRequests ,
TotalTokens : totalTokens ,
2026-01-14 16:12:08 +08:00
AvgDailyCost : totalAccountCost / float64 ( actualDaysUsed ) ,
AvgDailyUserCost : totalUserCost / float64 ( actualDaysUsed ) ,
2025-12-23 13:42:33 +08:00
AvgDailyRequests : float64 ( totalRequests ) / float64 ( actualDaysUsed ) ,
AvgDailyTokens : float64 ( totalTokens ) / float64 ( actualDaysUsed ) ,
2025-12-29 10:03:27 +08:00
AvgDurationMs : avgDuration ,
2025-12-23 13:42:33 +08:00
}
todayStr := timezone . Now ( ) . Format ( "2006-01-02" )
for i := range history {
if history [ i ] . Date == todayStr {
summary . Today = & struct {
Date string ` json:"date" `
Cost float64 ` json:"cost" `
2026-01-14 16:12:08 +08:00
UserCost float64 ` json:"user_cost" `
2025-12-23 13:42:33 +08:00
Requests int64 ` json:"requests" `
Tokens int64 ` json:"tokens" `
} {
Date : history [ i ] . Date ,
Cost : history [ i ] . ActualCost ,
2026-01-14 16:12:08 +08:00
UserCost : history [ i ] . UserCost ,
2025-12-23 13:42:33 +08:00
Requests : history [ i ] . Requests ,
Tokens : history [ i ] . Tokens ,
}
break
}
}
if highestCostDay != nil {
summary . HighestCostDay = & struct {
Date string ` json:"date" `
Label string ` json:"label" `
Cost float64 ` json:"cost" `
2026-01-14 16:12:08 +08:00
UserCost float64 ` json:"user_cost" `
2025-12-23 13:42:33 +08:00
Requests int64 ` json:"requests" `
} {
Date : highestCostDay . Date ,
Label : highestCostDay . Label ,
Cost : highestCostDay . ActualCost ,
2026-01-14 16:12:08 +08:00
UserCost : highestCostDay . UserCost ,
2025-12-23 13:42:33 +08:00
Requests : highestCostDay . Requests ,
}
}
if highestRequestDay != nil {
summary . HighestRequestDay = & struct {
Date string ` json:"date" `
Label string ` json:"label" `
Requests int64 ` json:"requests" `
Cost float64 ` json:"cost" `
2026-01-14 16:12:08 +08:00
UserCost float64 ` json:"user_cost" `
2025-12-23 13:42:33 +08:00
} {
Date : highestRequestDay . Date ,
Label : highestRequestDay . Label ,
Requests : highestRequestDay . Requests ,
Cost : highestRequestDay . ActualCost ,
2026-01-14 16:12:08 +08:00
UserCost : highestRequestDay . UserCost ,
2025-12-23 13:42:33 +08:00
}
}
2026-02-28 15:01:20 +08:00
models , err := r . GetModelStatsWithFilters ( ctx , startTime , endTime , 0 , 0 , accountID , 0 , nil , nil , nil )
2025-12-23 13:42:33 +08:00
if err != nil {
models = [ ] ModelStat { }
}
2025-12-30 17:13:32 +08:00
resp = & AccountUsageStatsResponse {
2025-12-23 13:42:33 +08:00
History : history ,
Summary : summary ,
Models : models ,
2025-12-30 17:13:32 +08:00
}
return resp , nil
2025-12-23 13:42:33 +08:00
}
2025-12-26 15:40:24 +08:00
2025-12-29 10:03:27 +08:00
func ( r * usageLogRepository ) listUsageLogsWithPagination ( ctx context . Context , whereClause string , args [ ] any , params pagination . PaginationParams ) ( [ ] service . UsageLog , * pagination . PaginationResult , error ) {
countQuery := "SELECT COUNT(*) FROM usage_logs " + whereClause
var total int64
2025-12-29 19:23:49 +08:00
if err := scanSingleRow ( ctx , r . sql , countQuery , args , & total ) ; err != nil {
2025-12-29 10:03:27 +08:00
return nil , nil , err
}
limitPos := len ( args ) + 1
offsetPos := len ( args ) + 2
listArgs := append ( append ( [ ] any { } , args ... ) , params . Limit ( ) , params . Offset ( ) )
query := fmt . Sprintf ( "SELECT %s FROM usage_logs %s ORDER BY id DESC LIMIT $%d OFFSET $%d" , usageLogSelectColumns , whereClause , limitPos , offsetPos )
logs , err := r . queryUsageLogs ( ctx , query , listArgs ... )
if err != nil {
return nil , nil , err
}
return logs , paginationResultFromTotal ( total , params ) , nil
2025-12-26 15:40:24 +08:00
}
2026-03-04 13:46:08 +08:00
func ( r * usageLogRepository ) listUsageLogsWithFastPagination ( ctx context . Context , whereClause string , args [ ] any , params pagination . PaginationParams ) ( [ ] service . UsageLog , * pagination . PaginationResult , error ) {
limit := params . Limit ( )
offset := params . Offset ( )
limitPos := len ( args ) + 1
offsetPos := len ( args ) + 2
listArgs := append ( append ( [ ] any { } , args ... ) , limit + 1 , offset )
query := fmt . Sprintf ( "SELECT %s FROM usage_logs %s ORDER BY id DESC LIMIT $%d OFFSET $%d" , usageLogSelectColumns , whereClause , limitPos , offsetPos )
logs , err := r . queryUsageLogs ( ctx , query , listArgs ... )
if err != nil {
return nil , nil , err
}
hasMore := false
if len ( logs ) > limit {
hasMore = true
logs = logs [ : limit ]
}
total := int64 ( offset ) + int64 ( len ( logs ) )
if hasMore {
// 只保证“还有下一页”,避免对超大表做全量 COUNT(*)。
total = int64 ( offset ) + int64 ( limit ) + 1
}
return logs , paginationResultFromTotal ( total , params ) , nil
}
2025-12-30 17:13:32 +08:00
func ( r * usageLogRepository ) queryUsageLogs ( ctx context . Context , query string , args ... any ) ( logs [ ] service . UsageLog , err error ) {
2025-12-29 10:03:27 +08:00
rows , err := r . sql . QueryContext ( ctx , query , args ... )
if err != nil {
return nil , err
}
2025-12-30 17:13:32 +08:00
defer func ( ) {
// 保持主错误优先;仅在无错误时回传 Close 失败。
// 同时清空返回值,避免误用不完整结果。
if closeErr := rows . Close ( ) ; closeErr != nil && err == nil {
err = closeErr
logs = nil
}
} ( )
2025-12-26 15:40:24 +08:00
2025-12-30 17:13:32 +08:00
logs = make ( [ ] service . UsageLog , 0 )
2025-12-29 10:03:27 +08:00
for rows . Next ( ) {
2025-12-30 17:13:32 +08:00
var log * service . UsageLog
log , err = scanUsageLog ( rows )
2025-12-29 10:03:27 +08:00
if err != nil {
return nil , err
}
logs = append ( logs , * log )
}
2025-12-30 17:13:32 +08:00
if err = rows . Err ( ) ; err != nil {
2025-12-29 10:03:27 +08:00
return nil , err
}
return logs , nil
}
func ( r * usageLogRepository ) hydrateUsageLogAssociations ( ctx context . Context , logs [ ] service . UsageLog ) error {
// 关联数据使用 Ent 批量加载,避免把复杂 SQL 继续膨胀。
if len ( logs ) == 0 {
2025-12-26 15:40:24 +08:00
return nil
}
2025-12-29 10:03:27 +08:00
ids := collectUsageLogIDs ( logs )
users , err := r . loadUsers ( ctx , ids . userIDs )
if err != nil {
return err
}
2026-01-04 19:27:53 +08:00
apiKeys , err := r . loadAPIKeys ( ctx , ids . apiKeyIDs )
2025-12-29 10:03:27 +08:00
if err != nil {
return err
}
accounts , err := r . loadAccounts ( ctx , ids . accountIDs )
if err != nil {
return err
}
groups , err := r . loadGroups ( ctx , ids . groupIDs )
if err != nil {
return err
}
subs , err := r . loadSubscriptions ( ctx , ids . subscriptionIDs )
if err != nil {
return err
}
for i := range logs {
if user , ok := users [ logs [ i ] . UserID ] ; ok {
logs [ i ] . User = user
}
2026-01-04 19:27:53 +08:00
if key , ok := apiKeys [ logs [ i ] . APIKeyID ] ; ok {
logs [ i ] . APIKey = key
2025-12-29 10:03:27 +08:00
}
if acc , ok := accounts [ logs [ i ] . AccountID ] ; ok {
logs [ i ] . Account = acc
}
if logs [ i ] . GroupID != nil {
if group , ok := groups [ * logs [ i ] . GroupID ] ; ok {
logs [ i ] . Group = group
}
}
if logs [ i ] . SubscriptionID != nil {
if sub , ok := subs [ * logs [ i ] . SubscriptionID ] ; ok {
logs [ i ] . Subscription = sub
}
}
2025-12-26 15:40:24 +08:00
}
2025-12-29 10:03:27 +08:00
return nil
}
type usageLogIDs struct {
userIDs [ ] int64
apiKeyIDs [ ] int64
accountIDs [ ] int64
groupIDs [ ] int64
subscriptionIDs [ ] int64
2025-12-26 15:40:24 +08:00
}
2025-12-29 10:03:27 +08:00
func collectUsageLogIDs ( logs [ ] service . UsageLog ) usageLogIDs {
idSet := func ( ) map [ int64 ] struct { } { return make ( map [ int64 ] struct { } ) }
userIDs := idSet ( )
apiKeyIDs := idSet ( )
accountIDs := idSet ( )
groupIDs := idSet ( )
subscriptionIDs := idSet ( )
for i := range logs {
userIDs [ logs [ i ] . UserID ] = struct { } { }
2026-01-04 19:27:53 +08:00
apiKeyIDs [ logs [ i ] . APIKeyID ] = struct { } { }
2025-12-29 10:03:27 +08:00
accountIDs [ logs [ i ] . AccountID ] = struct { } { }
if logs [ i ] . GroupID != nil {
groupIDs [ * logs [ i ] . GroupID ] = struct { } { }
}
if logs [ i ] . SubscriptionID != nil {
subscriptionIDs [ * logs [ i ] . SubscriptionID ] = struct { } { }
2025-12-26 15:40:24 +08:00
}
}
2025-12-29 10:03:27 +08:00
return usageLogIDs {
userIDs : setToSlice ( userIDs ) ,
apiKeyIDs : setToSlice ( apiKeyIDs ) ,
accountIDs : setToSlice ( accountIDs ) ,
groupIDs : setToSlice ( groupIDs ) ,
subscriptionIDs : setToSlice ( subscriptionIDs ) ,
}
2025-12-26 15:40:24 +08:00
}
2025-12-29 10:03:27 +08:00
func ( r * usageLogRepository ) loadUsers ( ctx context . Context , ids [ ] int64 ) ( map [ int64 ] * service . User , error ) {
out := make ( map [ int64 ] * service . User )
if len ( ids ) == 0 {
return out , nil
}
models , err := r . client . User . Query ( ) . Where ( dbuser . IDIn ( ids ... ) ) . All ( ctx )
if err != nil {
return nil , err
}
for _ , m := range models {
out [ m . ID ] = userEntityToService ( m )
}
return out , nil
}
2026-01-04 19:27:53 +08:00
func ( r * usageLogRepository ) loadAPIKeys ( ctx context . Context , ids [ ] int64 ) ( map [ int64 ] * service . APIKey , error ) {
out := make ( map [ int64 ] * service . APIKey )
2025-12-29 10:03:27 +08:00
if len ( ids ) == 0 {
return out , nil
}
2026-01-04 19:27:53 +08:00
models , err := r . client . APIKey . Query ( ) . Where ( dbapikey . IDIn ( ids ... ) ) . All ( ctx )
2025-12-29 10:03:27 +08:00
if err != nil {
return nil , err
}
for _ , m := range models {
out [ m . ID ] = apiKeyEntityToService ( m )
2025-12-26 15:40:24 +08:00
}
2025-12-29 10:03:27 +08:00
return out , nil
}
func ( r * usageLogRepository ) loadAccounts ( ctx context . Context , ids [ ] int64 ) ( map [ int64 ] * service . Account , error ) {
out := make ( map [ int64 ] * service . Account )
if len ( ids ) == 0 {
return out , nil
}
models , err := r . client . Account . Query ( ) . Where ( dbaccount . IDIn ( ids ... ) ) . All ( ctx )
if err != nil {
return nil , err
2025-12-26 15:40:24 +08:00
}
2025-12-29 10:03:27 +08:00
for _ , m := range models {
out [ m . ID ] = accountEntityToService ( m )
}
return out , nil
2025-12-26 15:40:24 +08:00
}
2025-12-29 10:03:27 +08:00
func ( r * usageLogRepository ) loadGroups ( ctx context . Context , ids [ ] int64 ) ( map [ int64 ] * service . Group , error ) {
out := make ( map [ int64 ] * service . Group )
if len ( ids ) == 0 {
return out , nil
}
models , err := r . client . Group . Query ( ) . Where ( dbgroup . IDIn ( ids ... ) ) . All ( ctx )
if err != nil {
return nil , err
}
for _ , m := range models {
out [ m . ID ] = groupEntityToService ( m )
2025-12-26 15:40:24 +08:00
}
2025-12-29 10:03:27 +08:00
return out , nil
}
func ( r * usageLogRepository ) loadSubscriptions ( ctx context . Context , ids [ ] int64 ) ( map [ int64 ] * service . UserSubscription , error ) {
out := make ( map [ int64 ] * service . UserSubscription )
if len ( ids ) == 0 {
return out , nil
}
models , err := r . client . UserSubscription . Query ( ) . Where ( dbusersub . IDIn ( ids ... ) ) . All ( ctx )
if err != nil {
return nil , err
}
for _ , m := range models {
out [ m . ID ] = userSubscriptionEntityToService ( m )
}
return out , nil
}
func scanUsageLog ( scanner interface { Scan ( ... any ) error } ) ( * service . UsageLog , error ) {
var (
2026-01-14 16:12:08 +08:00
id int64
userID int64
apiKeyID int64
accountID int64
requestID sql . NullString
model string
groupID sql . NullInt64
subscriptionID sql . NullInt64
inputTokens int
outputTokens int
cacheCreationTokens int
cacheReadTokens int
cacheCreation5m int
cacheCreation1h int
inputCost float64
outputCost float64
cacheCreationCost float64
cacheReadCost float64
totalCost float64
actualCost float64
rateMultiplier float64
accountRateMultiplier sql . NullFloat64
billingType int16
2026-02-28 15:01:20 +08:00
requestTypeRaw int16
2026-01-14 16:12:08 +08:00
stream bool
2026-02-28 15:01:20 +08:00
openaiWSMode bool
2026-01-14 16:12:08 +08:00
durationMs sql . NullInt64
firstTokenMs sql . NullInt64
userAgent sql . NullString
ipAddress sql . NullString
imageCount int
imageSize sql . NullString
2026-01-31 20:22:22 +08:00
mediaType sql . NullString
2026-03-08 23:22:28 +08:00
serviceTier sql . NullString
2026-02-03 14:36:29 +08:00
reasoningEffort sql . NullString
2026-02-17 11:22:08 +03:00
cacheTTLOverridden bool
2026-01-14 16:12:08 +08:00
createdAt time . Time
2025-12-29 10:03:27 +08:00
)
if err := scanner . Scan (
& id ,
& userID ,
& apiKeyID ,
& accountID ,
& requestID ,
& model ,
& groupID ,
& subscriptionID ,
& inputTokens ,
& outputTokens ,
& cacheCreationTokens ,
& cacheReadTokens ,
& cacheCreation5m ,
& cacheCreation1h ,
& inputCost ,
& outputCost ,
& cacheCreationCost ,
& cacheReadCost ,
& totalCost ,
& actualCost ,
& rateMultiplier ,
2026-01-14 16:12:08 +08:00
& accountRateMultiplier ,
2025-12-29 10:03:27 +08:00
& billingType ,
2026-02-28 15:01:20 +08:00
& requestTypeRaw ,
2025-12-29 10:03:27 +08:00
& stream ,
2026-02-28 15:01:20 +08:00
& openaiWSMode ,
2025-12-29 10:03:27 +08:00
& durationMs ,
& firstTokenMs ,
2026-01-06 16:23:56 +08:00
& userAgent ,
2026-01-09 21:59:32 +08:00
& ipAddress ,
2026-01-05 17:07:29 +08:00
& imageCount ,
& imageSize ,
2026-01-31 20:22:22 +08:00
& mediaType ,
2026-03-08 23:22:28 +08:00
& serviceTier ,
2026-02-03 14:36:29 +08:00
& reasoningEffort ,
2026-02-17 11:22:08 +03:00
& cacheTTLOverridden ,
2025-12-29 10:03:27 +08:00
& createdAt ,
) ; err != nil {
return nil , err
}
log := & service . UsageLog {
ID : id ,
UserID : userID ,
2026-01-04 19:27:53 +08:00
APIKeyID : apiKeyID ,
2025-12-29 10:03:27 +08:00
AccountID : accountID ,
Model : model ,
InputTokens : inputTokens ,
OutputTokens : outputTokens ,
CacheCreationTokens : cacheCreationTokens ,
CacheReadTokens : cacheReadTokens ,
CacheCreation5mTokens : cacheCreation5m ,
CacheCreation1hTokens : cacheCreation1h ,
InputCost : inputCost ,
OutputCost : outputCost ,
CacheCreationCost : cacheCreationCost ,
CacheReadCost : cacheReadCost ,
TotalCost : totalCost ,
ActualCost : actualCost ,
RateMultiplier : rateMultiplier ,
2026-01-14 16:12:08 +08:00
AccountRateMultiplier : nullFloat64Ptr ( accountRateMultiplier ) ,
2025-12-29 10:03:27 +08:00
BillingType : int8 ( billingType ) ,
2026-02-28 15:01:20 +08:00
RequestType : service . RequestTypeFromInt16 ( requestTypeRaw ) ,
2026-01-05 17:07:29 +08:00
ImageCount : imageCount ,
2026-02-17 11:22:08 +03:00
CacheTTLOverridden : cacheTTLOverridden ,
2025-12-29 10:03:27 +08:00
CreatedAt : createdAt ,
}
2026-02-28 15:01:20 +08:00
// 先回填 legacy 字段,再基于 legacy + request_type 计算最终请求类型,保证历史数据兼容。
log . Stream = stream
log . OpenAIWSMode = openaiWSMode
log . RequestType = log . EffectiveRequestType ( )
log . Stream , log . OpenAIWSMode = service . ApplyLegacyRequestFields ( log . RequestType , stream , openaiWSMode )
2025-12-29 10:03:27 +08:00
if requestID . Valid {
log . RequestID = requestID . String
}
if groupID . Valid {
value := groupID . Int64
log . GroupID = & value
}
if subscriptionID . Valid {
value := subscriptionID . Int64
log . SubscriptionID = & value
}
if durationMs . Valid {
value := int ( durationMs . Int64 )
log . DurationMs = & value
}
if firstTokenMs . Valid {
value := int ( firstTokenMs . Int64 )
log . FirstTokenMs = & value
}
2026-01-06 16:23:56 +08:00
if userAgent . Valid {
log . UserAgent = & userAgent . String
}
2026-01-09 21:59:32 +08:00
if ipAddress . Valid {
log . IPAddress = & ipAddress . String
}
2026-01-05 17:07:29 +08:00
if imageSize . Valid {
log . ImageSize = & imageSize . String
}
2026-01-31 20:22:22 +08:00
if mediaType . Valid {
log . MediaType = & mediaType . String
}
2026-03-08 23:22:28 +08:00
if serviceTier . Valid {
log . ServiceTier = & serviceTier . String
}
2026-02-03 14:36:29 +08:00
if reasoningEffort . Valid {
log . ReasoningEffort = & reasoningEffort . String
}
2025-12-29 10:03:27 +08:00
return log , nil
}
func scanTrendRows ( rows * sql . Rows ) ( [ ] TrendDataPoint , error ) {
results := make ( [ ] TrendDataPoint , 0 )
for rows . Next ( ) {
var row TrendDataPoint
if err := rows . Scan (
& row . Date ,
& row . Requests ,
& row . InputTokens ,
& row . OutputTokens ,
2026-03-05 18:32:17 +08:00
& row . CacheCreationTokens ,
& row . CacheReadTokens ,
2025-12-29 10:03:27 +08:00
& row . TotalTokens ,
& row . Cost ,
& row . ActualCost ,
) ; err != nil {
return nil , err
}
results = append ( results , row )
}
if err := rows . Err ( ) ; err != nil {
return nil , err
}
return results , nil
}
func scanModelStatsRows ( rows * sql . Rows ) ( [ ] ModelStat , error ) {
results := make ( [ ] ModelStat , 0 )
for rows . Next ( ) {
var row ModelStat
if err := rows . Scan (
& row . Model ,
& row . Requests ,
& row . InputTokens ,
& row . OutputTokens ,
2026-03-05 18:32:17 +08:00
& row . CacheCreationTokens ,
& row . CacheReadTokens ,
2025-12-29 10:03:27 +08:00
& row . TotalTokens ,
& row . Cost ,
& row . ActualCost ,
) ; err != nil {
return nil , err
}
results = append ( results , row )
}
if err := rows . Err ( ) ; err != nil {
return nil , err
}
return results , nil
}
func buildWhere ( conditions [ ] string ) string {
if len ( conditions ) == 0 {
return ""
}
return "WHERE " + strings . Join ( conditions , " AND " )
}
2026-02-28 15:01:20 +08:00
func appendRequestTypeOrStreamWhereCondition ( conditions [ ] string , args [ ] any , requestType * int16 , stream * bool ) ( [ ] string , [ ] any ) {
if requestType != nil {
condition , conditionArgs := buildRequestTypeFilterCondition ( len ( args ) + 1 , * requestType )
conditions = append ( conditions , condition )
args = append ( args , conditionArgs ... )
return conditions , args
}
if stream != nil {
conditions = append ( conditions , fmt . Sprintf ( "stream = $%d" , len ( args ) + 1 ) )
args = append ( args , * stream )
}
return conditions , args
}
func appendRequestTypeOrStreamQueryFilter ( query string , args [ ] any , requestType * int16 , stream * bool ) ( string , [ ] any ) {
if requestType != nil {
condition , conditionArgs := buildRequestTypeFilterCondition ( len ( args ) + 1 , * requestType )
query += " AND " + condition
args = append ( args , conditionArgs ... )
return query , args
}
if stream != nil {
query += fmt . Sprintf ( " AND stream = $%d" , len ( args ) + 1 )
args = append ( args , * stream )
}
return query , args
}
// buildRequestTypeFilterCondition 在 request_type 过滤时兼容 legacy 字段,避免历史数据漏查。
func buildRequestTypeFilterCondition ( startArgIndex int , requestType int16 ) ( string , [ ] any ) {
normalized := service . RequestTypeFromInt16 ( requestType )
requestTypeArg := int16 ( normalized )
switch normalized {
case service . RequestTypeSync :
return fmt . Sprintf ( "(request_type = $%d OR (request_type = %d AND stream = FALSE AND openai_ws_mode = FALSE))" , startArgIndex , int16 ( service . RequestTypeUnknown ) ) , [ ] any { requestTypeArg }
case service . RequestTypeStream :
return fmt . Sprintf ( "(request_type = $%d OR (request_type = %d AND stream = TRUE AND openai_ws_mode = FALSE))" , startArgIndex , int16 ( service . RequestTypeUnknown ) ) , [ ] any { requestTypeArg }
case service . RequestTypeWSV2 :
return fmt . Sprintf ( "(request_type = $%d OR (request_type = %d AND openai_ws_mode = TRUE))" , startArgIndex , int16 ( service . RequestTypeUnknown ) ) , [ ] any { requestTypeArg }
default :
return fmt . Sprintf ( "request_type = $%d" , startArgIndex ) , [ ] any { requestTypeArg }
}
}
2025-12-29 10:03:27 +08:00
func nullInt64 ( v * int64 ) sql . NullInt64 {
if v == nil {
return sql . NullInt64 { }
}
return sql . NullInt64 { Int64 : * v , Valid : true }
}
func nullInt ( v * int ) sql . NullInt64 {
if v == nil {
return sql . NullInt64 { }
}
return sql . NullInt64 { Int64 : int64 ( * v ) , Valid : true }
}
2026-01-14 16:12:08 +08:00
func nullFloat64Ptr ( v sql . NullFloat64 ) * float64 {
if ! v . Valid {
return nil
}
out := v . Float64
return & out
}
2026-01-05 17:07:29 +08:00
func nullString ( v * string ) sql . NullString {
if v == nil || * v == "" {
return sql . NullString { }
}
return sql . NullString { String : * v , Valid : true }
}
2025-12-29 10:03:27 +08:00
func setToSlice ( set map [ int64 ] struct { } ) [ ] int64 {
out := make ( [ ] int64 , 0 , len ( set ) )
for id := range set {
out = append ( out , id )
}
return out
2025-12-26 15:40:24 +08:00
}