mirror of
https://gitee.com/wanwujie/sub2api
synced 2026-04-09 01:24:46 +08:00
Compare commits
156 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4da681f58a | ||
|
|
68ba866c38 | ||
|
|
9622347faa | ||
|
|
8363663ea8 | ||
|
|
b588ea194c | ||
|
|
465ba76788 | ||
|
|
cf313d5761 | ||
|
|
8c1958c9ad | ||
|
|
2db34139f0 | ||
|
|
e0cccf6ed2 | ||
|
|
89c1a41305 | ||
|
|
202ec21bab | ||
|
|
6dcb27632e | ||
|
|
3141aa5144 | ||
|
|
5443efd7d7 | ||
|
|
62771583e7 | ||
|
|
5526f122b7 | ||
|
|
9c144587fe | ||
|
|
098bf5a1e8 | ||
|
|
4c37ca71ee | ||
|
|
0c52809591 | ||
|
|
53e730f8d5 | ||
|
|
8e248e0853 | ||
|
|
2a0758bdfe | ||
|
|
f55ba3f6c1 | ||
|
|
db51e65b42 | ||
|
|
72a2ed958b | ||
|
|
d0b91a40d4 | ||
|
|
bd74bf7994 | ||
|
|
f28d4b78e7 | ||
|
|
7536dbfee5 | ||
|
|
b76cc583fb | ||
|
|
955af6b3ec | ||
|
|
1073317a3e | ||
|
|
839ab37d40 | ||
|
|
9dd0ef187d | ||
|
|
fd8473f267 | ||
|
|
cc4910dd30 | ||
|
|
50de5d05b0 | ||
|
|
7844dc4f2d | ||
|
|
c48795a948 | ||
|
|
19b67e89a2 | ||
|
|
f017fd97c1 | ||
|
|
ce3336e3f4 | ||
|
|
54c5788b86 | ||
|
|
4cb7b26f03 | ||
|
|
3dfb62e996 | ||
|
|
d5c711d081 | ||
|
|
73b62bb15c | ||
|
|
18b8bd43ad | ||
|
|
8fffcd8091 | ||
|
|
c8e3a476fc | ||
|
|
808cee9665 | ||
|
|
92eafbc2a6 | ||
|
|
2548800c3f | ||
|
|
9dce8a5388 | ||
|
|
76484bd5c9 | ||
|
|
e4ed35fe01 | ||
|
|
f5e45c1a8a | ||
|
|
a2f83ff032 | ||
|
|
2b2f7a6dec | ||
|
|
49c15c0d44 | ||
|
|
1b938b2003 | ||
|
|
5f80760a8c | ||
|
|
dd59e872ff | ||
|
|
aa1a3b9a74 | ||
|
|
32953405b1 | ||
|
|
c1a3dd41dd | ||
|
|
63dc6a68df | ||
|
|
a39316e004 | ||
|
|
988b4d0254 | ||
|
|
f541636840 | ||
|
|
48613558d4 | ||
|
|
4b66ee2f8f | ||
|
|
abbde130ab | ||
|
|
ccb8144557 | ||
|
|
1240c78ef6 | ||
|
|
66c8b6f2bc | ||
|
|
6271a33d08 | ||
|
|
5364011a5b | ||
|
|
d78f42d2fd | ||
|
|
1a869547d7 | ||
|
|
e4bc9f6fb0 | ||
|
|
e5857161ff | ||
|
|
abdc4f39cb | ||
|
|
7ebca553ef | ||
|
|
c2962752eb | ||
|
|
ab5839b461 | ||
|
|
89a725a433 | ||
|
|
645609d441 | ||
|
|
fc4ea65936 | ||
|
|
d75cd820b0 | ||
|
|
cb3e08dda4 | ||
|
|
44a93c1922 | ||
|
|
9cba595fd0 | ||
|
|
56fc2764e4 | ||
|
|
0c4f1762c9 | ||
|
|
c2c865b0cb | ||
|
|
a66d318820 | ||
|
|
a16f72f52e | ||
|
|
99e2391b2a | ||
|
|
80c1cdf024 | ||
|
|
0fa5a6015e | ||
|
|
9d0a4f3d68 | ||
|
|
1a641392d9 | ||
|
|
36b817d008 | ||
|
|
24d19a5f78 | ||
|
|
3fb4a2b0ff | ||
|
|
0772cdda0f | ||
|
|
f6f072cb9a | ||
|
|
5265b12cc7 | ||
|
|
ff0875868e | ||
|
|
e79dbad602 | ||
|
|
6a9cc13e3e | ||
|
|
d1a6d6b1cf | ||
|
|
7a0ca05233 | ||
|
|
15884f368d | ||
|
|
b03fb9c2f6 | ||
|
|
3d4984133e | ||
|
|
13ae0ce7b0 | ||
|
|
3a67002cfe | ||
|
|
9f4d4e5adf | ||
|
|
d2fc14fb97 | ||
|
|
3730819857 | ||
|
|
297f08c683 | ||
|
|
61f556745a | ||
|
|
435f693892 | ||
|
|
72f78f8a56 | ||
|
|
2597fe78ba | ||
|
|
eb06006d6c | ||
|
|
c48dc097ff | ||
|
|
585257d340 | ||
|
|
675543240e | ||
|
|
7d1fe818be | ||
|
|
0a4641c24e | ||
|
|
e83f644c3f | ||
|
|
6b97a8be28 | ||
|
|
90798f14b5 | ||
|
|
8ae75e7f6e | ||
|
|
fc32b57798 | ||
|
|
337a188660 | ||
|
|
11d063e3c4 | ||
|
|
e846458009 | ||
|
|
2d123a11ad | ||
|
|
fcdf839b6b | ||
|
|
d55dd56fd2 | ||
|
|
e0d12b46d8 | ||
|
|
f3ed95d4de | ||
|
|
5baa8b5673 | ||
|
|
bb5303272b | ||
|
|
d55866d375 | ||
|
|
4b9e47cec9 | ||
|
|
7a06c4873e | ||
|
|
eeb1282f0c | ||
|
|
470abee092 | ||
|
|
39433f2a29 |
5
.gitignore
vendored
5
.gitignore
vendored
@@ -14,6 +14,9 @@ backend/server
|
|||||||
backend/sub2api
|
backend/sub2api
|
||||||
backend/main
|
backend/main
|
||||||
|
|
||||||
|
# Go 测试二进制
|
||||||
|
*.test
|
||||||
|
|
||||||
# 测试覆盖率
|
# 测试覆盖率
|
||||||
*.out
|
*.out
|
||||||
coverage.html
|
coverage.html
|
||||||
@@ -123,6 +126,4 @@ backend/cmd/server/server
|
|||||||
deploy/docker-compose.override.yml
|
deploy/docker-compose.override.yml
|
||||||
.gocache/
|
.gocache/
|
||||||
vite.config.js
|
vite.config.js
|
||||||
!docs/
|
|
||||||
docs/*
|
docs/*
|
||||||
!docs/dependency-security.md
|
|
||||||
|
|||||||
2
backend/.dockerignore
Normal file
2
backend/.dockerignore
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
.cache/
|
||||||
|
.DS_Store
|
||||||
@@ -18,6 +18,12 @@ linters:
|
|||||||
list-mode: original
|
list-mode: original
|
||||||
files:
|
files:
|
||||||
- "**/internal/service/**"
|
- "**/internal/service/**"
|
||||||
|
- "!**/internal/service/ops_aggregation_service.go"
|
||||||
|
- "!**/internal/service/ops_alert_evaluator_service.go"
|
||||||
|
- "!**/internal/service/ops_cleanup_service.go"
|
||||||
|
- "!**/internal/service/ops_metrics_collector.go"
|
||||||
|
- "!**/internal/service/ops_scheduled_report_service.go"
|
||||||
|
- "!**/internal/service/wire.go"
|
||||||
deny:
|
deny:
|
||||||
- pkg: github.com/Wei-Shaw/sub2api/internal/repository
|
- pkg: github.com/Wei-Shaw/sub2api/internal/repository
|
||||||
desc: "service must not import repository"
|
desc: "service must not import repository"
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ func main() {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
userRepo := repository.NewUserRepository(client, sqlDB)
|
userRepo := repository.NewUserRepository(client, sqlDB)
|
||||||
authService := service.NewAuthService(userRepo, cfg, nil, nil, nil, nil)
|
authService := service.NewAuthService(userRepo, cfg, nil, nil, nil, nil, nil)
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|||||||
@@ -62,6 +62,12 @@ func provideServiceBuildInfo(buildInfo handler.BuildInfo) service.BuildInfo {
|
|||||||
func provideCleanup(
|
func provideCleanup(
|
||||||
entClient *ent.Client,
|
entClient *ent.Client,
|
||||||
rdb *redis.Client,
|
rdb *redis.Client,
|
||||||
|
opsMetricsCollector *service.OpsMetricsCollector,
|
||||||
|
opsAggregation *service.OpsAggregationService,
|
||||||
|
opsAlertEvaluator *service.OpsAlertEvaluatorService,
|
||||||
|
opsCleanup *service.OpsCleanupService,
|
||||||
|
opsScheduledReport *service.OpsScheduledReportService,
|
||||||
|
schedulerSnapshot *service.SchedulerSnapshotService,
|
||||||
tokenRefresh *service.TokenRefreshService,
|
tokenRefresh *service.TokenRefreshService,
|
||||||
accountExpiry *service.AccountExpiryService,
|
accountExpiry *service.AccountExpiryService,
|
||||||
pricing *service.PricingService,
|
pricing *service.PricingService,
|
||||||
@@ -81,6 +87,42 @@ func provideCleanup(
|
|||||||
name string
|
name string
|
||||||
fn func() error
|
fn func() error
|
||||||
}{
|
}{
|
||||||
|
{"OpsScheduledReportService", func() error {
|
||||||
|
if opsScheduledReport != nil {
|
||||||
|
opsScheduledReport.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"OpsCleanupService", func() error {
|
||||||
|
if opsCleanup != nil {
|
||||||
|
opsCleanup.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"OpsAlertEvaluatorService", func() error {
|
||||||
|
if opsAlertEvaluator != nil {
|
||||||
|
opsAlertEvaluator.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"OpsAggregationService", func() error {
|
||||||
|
if opsAggregation != nil {
|
||||||
|
opsAggregation.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"OpsMetricsCollector", func() error {
|
||||||
|
if opsMetricsCollector != nil {
|
||||||
|
opsMetricsCollector.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"SchedulerSnapshotService", func() error {
|
||||||
|
if schedulerSnapshot != nil {
|
||||||
|
schedulerSnapshot.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
{"TokenRefreshService", func() error {
|
{"TokenRefreshService", func() error {
|
||||||
tokenRefresh.Stop()
|
tokenRefresh.Stop()
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -51,33 +51,40 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
|||||||
turnstileVerifier := repository.NewTurnstileVerifier()
|
turnstileVerifier := repository.NewTurnstileVerifier()
|
||||||
turnstileService := service.NewTurnstileService(settingService, turnstileVerifier)
|
turnstileService := service.NewTurnstileService(settingService, turnstileVerifier)
|
||||||
emailQueueService := service.ProvideEmailQueueService(emailService)
|
emailQueueService := service.ProvideEmailQueueService(emailService)
|
||||||
authService := service.NewAuthService(userRepository, configConfig, settingService, emailService, turnstileService, emailQueueService)
|
promoCodeRepository := repository.NewPromoCodeRepository(client)
|
||||||
userService := service.NewUserService(userRepository)
|
billingCache := repository.NewBillingCache(redisClient)
|
||||||
authHandler := handler.NewAuthHandler(configConfig, authService, userService, settingService)
|
userSubscriptionRepository := repository.NewUserSubscriptionRepository(client)
|
||||||
userHandler := handler.NewUserHandler(userService)
|
billingCacheService := service.NewBillingCacheService(billingCache, userRepository, userSubscriptionRepository, configConfig)
|
||||||
apiKeyRepository := repository.NewAPIKeyRepository(client)
|
apiKeyRepository := repository.NewAPIKeyRepository(client)
|
||||||
groupRepository := repository.NewGroupRepository(client, db)
|
groupRepository := repository.NewGroupRepository(client, db)
|
||||||
userSubscriptionRepository := repository.NewUserSubscriptionRepository(client)
|
|
||||||
apiKeyCache := repository.NewAPIKeyCache(redisClient)
|
apiKeyCache := repository.NewAPIKeyCache(redisClient)
|
||||||
apiKeyService := service.NewAPIKeyService(apiKeyRepository, userRepository, groupRepository, userSubscriptionRepository, apiKeyCache, configConfig)
|
apiKeyService := service.NewAPIKeyService(apiKeyRepository, userRepository, groupRepository, userSubscriptionRepository, apiKeyCache, configConfig)
|
||||||
|
apiKeyAuthCacheInvalidator := service.ProvideAPIKeyAuthCacheInvalidator(apiKeyService)
|
||||||
|
promoService := service.NewPromoService(promoCodeRepository, userRepository, billingCacheService, client, apiKeyAuthCacheInvalidator)
|
||||||
|
authService := service.NewAuthService(userRepository, configConfig, settingService, emailService, turnstileService, emailQueueService, promoService)
|
||||||
|
userService := service.NewUserService(userRepository, apiKeyAuthCacheInvalidator)
|
||||||
|
authHandler := handler.NewAuthHandler(configConfig, authService, userService, settingService, promoService)
|
||||||
|
userHandler := handler.NewUserHandler(userService)
|
||||||
apiKeyHandler := handler.NewAPIKeyHandler(apiKeyService)
|
apiKeyHandler := handler.NewAPIKeyHandler(apiKeyService)
|
||||||
usageLogRepository := repository.NewUsageLogRepository(client, db)
|
usageLogRepository := repository.NewUsageLogRepository(client, db)
|
||||||
usageService := service.NewUsageService(usageLogRepository, userRepository, client)
|
dashboardAggregationRepository := repository.NewDashboardAggregationRepository(db)
|
||||||
|
usageService := service.NewUsageService(usageLogRepository, userRepository, client, apiKeyAuthCacheInvalidator)
|
||||||
usageHandler := handler.NewUsageHandler(usageService, apiKeyService)
|
usageHandler := handler.NewUsageHandler(usageService, apiKeyService)
|
||||||
redeemCodeRepository := repository.NewRedeemCodeRepository(client)
|
redeemCodeRepository := repository.NewRedeemCodeRepository(client)
|
||||||
billingCache := repository.NewBillingCache(redisClient)
|
|
||||||
billingCacheService := service.NewBillingCacheService(billingCache, userRepository, userSubscriptionRepository, configConfig)
|
|
||||||
subscriptionService := service.NewSubscriptionService(groupRepository, userSubscriptionRepository, billingCacheService)
|
subscriptionService := service.NewSubscriptionService(groupRepository, userSubscriptionRepository, billingCacheService)
|
||||||
redeemCache := repository.NewRedeemCache(redisClient)
|
redeemCache := repository.NewRedeemCache(redisClient)
|
||||||
redeemService := service.NewRedeemService(redeemCodeRepository, userRepository, subscriptionService, redeemCache, billingCacheService, client)
|
redeemService := service.NewRedeemService(redeemCodeRepository, userRepository, subscriptionService, redeemCache, billingCacheService, client, apiKeyAuthCacheInvalidator)
|
||||||
redeemHandler := handler.NewRedeemHandler(redeemService)
|
redeemHandler := handler.NewRedeemHandler(redeemService)
|
||||||
subscriptionHandler := handler.NewSubscriptionHandler(subscriptionService)
|
subscriptionHandler := handler.NewSubscriptionHandler(subscriptionService)
|
||||||
dashboardService := service.NewDashboardService(usageLogRepository)
|
dashboardStatsCache := repository.NewDashboardCache(redisClient, configConfig)
|
||||||
dashboardHandler := admin.NewDashboardHandler(dashboardService)
|
timingWheelService := service.ProvideTimingWheelService()
|
||||||
|
dashboardAggregationService := service.ProvideDashboardAggregationService(dashboardAggregationRepository, timingWheelService, configConfig)
|
||||||
|
dashboardService := service.NewDashboardService(usageLogRepository, dashboardAggregationRepository, dashboardStatsCache, configConfig)
|
||||||
|
dashboardHandler := admin.NewDashboardHandler(dashboardService, dashboardAggregationService)
|
||||||
accountRepository := repository.NewAccountRepository(client, db)
|
accountRepository := repository.NewAccountRepository(client, db)
|
||||||
proxyRepository := repository.NewProxyRepository(client, db)
|
proxyRepository := repository.NewProxyRepository(client, db)
|
||||||
proxyExitInfoProber := repository.NewProxyExitInfoProber(configConfig)
|
proxyExitInfoProber := repository.NewProxyExitInfoProber(configConfig)
|
||||||
adminService := service.NewAdminService(userRepository, groupRepository, accountRepository, proxyRepository, apiKeyRepository, redeemCodeRepository, billingCacheService, proxyExitInfoProber)
|
adminService := service.NewAdminService(userRepository, groupRepository, accountRepository, proxyRepository, apiKeyRepository, redeemCodeRepository, billingCacheService, proxyExitInfoProber, apiKeyAuthCacheInvalidator)
|
||||||
adminUserHandler := admin.NewUserHandler(adminService)
|
adminUserHandler := admin.NewUserHandler(adminService)
|
||||||
groupHandler := admin.NewGroupHandler(adminService)
|
groupHandler := admin.NewGroupHandler(adminService)
|
||||||
claudeOAuthClient := repository.NewClaudeOAuthClient()
|
claudeOAuthClient := repository.NewClaudeOAuthClient()
|
||||||
@@ -90,7 +97,8 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
|||||||
antigravityOAuthService := service.NewAntigravityOAuthService(proxyRepository)
|
antigravityOAuthService := service.NewAntigravityOAuthService(proxyRepository)
|
||||||
geminiQuotaService := service.NewGeminiQuotaService(configConfig, settingRepository)
|
geminiQuotaService := service.NewGeminiQuotaService(configConfig, settingRepository)
|
||||||
tempUnschedCache := repository.NewTempUnschedCache(redisClient)
|
tempUnschedCache := repository.NewTempUnschedCache(redisClient)
|
||||||
rateLimitService := service.NewRateLimitService(accountRepository, usageLogRepository, configConfig, geminiQuotaService, tempUnschedCache)
|
timeoutCounterCache := repository.NewTimeoutCounterCache(redisClient)
|
||||||
|
rateLimitService := service.ProvideRateLimitService(accountRepository, usageLogRepository, configConfig, geminiQuotaService, tempUnschedCache, timeoutCounterCache, settingService)
|
||||||
claudeUsageFetcher := repository.NewClaudeUsageFetcher()
|
claudeUsageFetcher := repository.NewClaudeUsageFetcher()
|
||||||
antigravityQuotaFetcher := service.NewAntigravityQuotaFetcher(proxyRepository)
|
antigravityQuotaFetcher := service.NewAntigravityQuotaFetcher(proxyRepository)
|
||||||
usageCache := service.NewUsageCache()
|
usageCache := service.NewUsageCache()
|
||||||
@@ -104,6 +112,9 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
|||||||
accountTestService := service.NewAccountTestService(accountRepository, geminiTokenProvider, antigravityGatewayService, httpUpstream, configConfig)
|
accountTestService := service.NewAccountTestService(accountRepository, geminiTokenProvider, antigravityGatewayService, httpUpstream, configConfig)
|
||||||
concurrencyCache := repository.ProvideConcurrencyCache(redisClient, configConfig)
|
concurrencyCache := repository.ProvideConcurrencyCache(redisClient, configConfig)
|
||||||
concurrencyService := service.ProvideConcurrencyService(concurrencyCache, accountRepository, configConfig)
|
concurrencyService := service.ProvideConcurrencyService(concurrencyCache, accountRepository, configConfig)
|
||||||
|
schedulerCache := repository.NewSchedulerCache(redisClient)
|
||||||
|
schedulerOutboxRepository := repository.NewSchedulerOutboxRepository(db)
|
||||||
|
schedulerSnapshotService := service.ProvideSchedulerSnapshotService(schedulerCache, schedulerOutboxRepository, accountRepository, groupRepository, configConfig)
|
||||||
crsSyncService := service.NewCRSSyncService(accountRepository, proxyRepository, oAuthService, openAIOAuthService, geminiOAuthService, configConfig)
|
crsSyncService := service.NewCRSSyncService(accountRepository, proxyRepository, oAuthService, openAIOAuthService, geminiOAuthService, configConfig)
|
||||||
accountHandler := admin.NewAccountHandler(adminService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, rateLimitService, accountUsageService, accountTestService, concurrencyService, crsSyncService)
|
accountHandler := admin.NewAccountHandler(adminService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, rateLimitService, accountUsageService, accountTestService, concurrencyService, crsSyncService)
|
||||||
oAuthHandler := admin.NewOAuthHandler(oAuthService)
|
oAuthHandler := admin.NewOAuthHandler(oAuthService)
|
||||||
@@ -112,7 +123,23 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
|||||||
antigravityOAuthHandler := admin.NewAntigravityOAuthHandler(antigravityOAuthService)
|
antigravityOAuthHandler := admin.NewAntigravityOAuthHandler(antigravityOAuthService)
|
||||||
proxyHandler := admin.NewProxyHandler(adminService)
|
proxyHandler := admin.NewProxyHandler(adminService)
|
||||||
adminRedeemHandler := admin.NewRedeemHandler(adminService)
|
adminRedeemHandler := admin.NewRedeemHandler(adminService)
|
||||||
settingHandler := admin.NewSettingHandler(settingService, emailService, turnstileService)
|
promoHandler := admin.NewPromoHandler(promoService)
|
||||||
|
opsRepository := repository.NewOpsRepository(db)
|
||||||
|
pricingRemoteClient := repository.ProvidePricingRemoteClient(configConfig)
|
||||||
|
pricingService, err := service.ProvidePricingService(configConfig, pricingRemoteClient)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
billingService := service.NewBillingService(configConfig, pricingService)
|
||||||
|
identityCache := repository.NewIdentityCache(redisClient)
|
||||||
|
identityService := service.NewIdentityService(identityCache)
|
||||||
|
deferredService := service.ProvideDeferredService(accountRepository, timingWheelService)
|
||||||
|
gatewayService := service.NewGatewayService(accountRepository, groupRepository, usageLogRepository, userRepository, userSubscriptionRepository, gatewayCache, configConfig, schedulerSnapshotService, concurrencyService, billingService, rateLimitService, billingCacheService, identityService, httpUpstream, deferredService)
|
||||||
|
openAIGatewayService := service.NewOpenAIGatewayService(accountRepository, usageLogRepository, userRepository, userSubscriptionRepository, gatewayCache, configConfig, schedulerSnapshotService, concurrencyService, billingService, rateLimitService, billingCacheService, httpUpstream, deferredService)
|
||||||
|
geminiMessagesCompatService := service.NewGeminiMessagesCompatService(accountRepository, groupRepository, gatewayCache, schedulerSnapshotService, geminiTokenProvider, rateLimitService, httpUpstream, antigravityGatewayService, configConfig)
|
||||||
|
opsService := service.NewOpsService(opsRepository, settingRepository, configConfig, accountRepository, concurrencyService, gatewayService, openAIGatewayService, geminiMessagesCompatService, antigravityGatewayService)
|
||||||
|
settingHandler := admin.NewSettingHandler(settingService, emailService, turnstileService, opsService)
|
||||||
|
opsHandler := admin.NewOpsHandler(opsService)
|
||||||
updateCache := repository.NewUpdateCache(redisClient)
|
updateCache := repository.NewUpdateCache(redisClient)
|
||||||
gitHubReleaseClient := repository.ProvideGitHubReleaseClient(configConfig)
|
gitHubReleaseClient := repository.ProvideGitHubReleaseClient(configConfig)
|
||||||
serviceBuildInfo := provideServiceBuildInfo(buildInfo)
|
serviceBuildInfo := provideServiceBuildInfo(buildInfo)
|
||||||
@@ -124,32 +151,24 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
|||||||
userAttributeValueRepository := repository.NewUserAttributeValueRepository(client)
|
userAttributeValueRepository := repository.NewUserAttributeValueRepository(client)
|
||||||
userAttributeService := service.NewUserAttributeService(userAttributeDefinitionRepository, userAttributeValueRepository)
|
userAttributeService := service.NewUserAttributeService(userAttributeDefinitionRepository, userAttributeValueRepository)
|
||||||
userAttributeHandler := admin.NewUserAttributeHandler(userAttributeService)
|
userAttributeHandler := admin.NewUserAttributeHandler(userAttributeService)
|
||||||
adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, oAuthHandler, openAIOAuthHandler, geminiOAuthHandler, antigravityOAuthHandler, proxyHandler, adminRedeemHandler, settingHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler, userAttributeHandler)
|
adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, oAuthHandler, openAIOAuthHandler, geminiOAuthHandler, antigravityOAuthHandler, proxyHandler, adminRedeemHandler, promoHandler, settingHandler, opsHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler, userAttributeHandler)
|
||||||
pricingRemoteClient := repository.ProvidePricingRemoteClient(configConfig)
|
|
||||||
pricingService, err := service.ProvidePricingService(configConfig, pricingRemoteClient)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
billingService := service.NewBillingService(configConfig, pricingService)
|
|
||||||
identityCache := repository.NewIdentityCache(redisClient)
|
|
||||||
identityService := service.NewIdentityService(identityCache)
|
|
||||||
timingWheelService := service.ProvideTimingWheelService()
|
|
||||||
deferredService := service.ProvideDeferredService(accountRepository, timingWheelService)
|
|
||||||
gatewayService := service.NewGatewayService(accountRepository, groupRepository, usageLogRepository, userRepository, userSubscriptionRepository, gatewayCache, configConfig, concurrencyService, billingService, rateLimitService, billingCacheService, identityService, httpUpstream, deferredService)
|
|
||||||
geminiMessagesCompatService := service.NewGeminiMessagesCompatService(accountRepository, groupRepository, gatewayCache, geminiTokenProvider, rateLimitService, httpUpstream, antigravityGatewayService, configConfig)
|
|
||||||
gatewayHandler := handler.NewGatewayHandler(gatewayService, geminiMessagesCompatService, antigravityGatewayService, userService, concurrencyService, billingCacheService, configConfig)
|
gatewayHandler := handler.NewGatewayHandler(gatewayService, geminiMessagesCompatService, antigravityGatewayService, userService, concurrencyService, billingCacheService, configConfig)
|
||||||
openAIGatewayService := service.NewOpenAIGatewayService(accountRepository, usageLogRepository, userRepository, userSubscriptionRepository, gatewayCache, configConfig, concurrencyService, billingService, rateLimitService, billingCacheService, httpUpstream, deferredService)
|
|
||||||
openAIGatewayHandler := handler.NewOpenAIGatewayHandler(openAIGatewayService, concurrencyService, billingCacheService, configConfig)
|
openAIGatewayHandler := handler.NewOpenAIGatewayHandler(openAIGatewayService, concurrencyService, billingCacheService, configConfig)
|
||||||
handlerSettingHandler := handler.ProvideSettingHandler(settingService, buildInfo)
|
handlerSettingHandler := handler.ProvideSettingHandler(settingService, buildInfo)
|
||||||
handlers := handler.ProvideHandlers(authHandler, userHandler, apiKeyHandler, usageHandler, redeemHandler, subscriptionHandler, adminHandlers, gatewayHandler, openAIGatewayHandler, handlerSettingHandler)
|
handlers := handler.ProvideHandlers(authHandler, userHandler, apiKeyHandler, usageHandler, redeemHandler, subscriptionHandler, adminHandlers, gatewayHandler, openAIGatewayHandler, handlerSettingHandler)
|
||||||
jwtAuthMiddleware := middleware.NewJWTAuthMiddleware(authService, userService)
|
jwtAuthMiddleware := middleware.NewJWTAuthMiddleware(authService, userService)
|
||||||
adminAuthMiddleware := middleware.NewAdminAuthMiddleware(authService, userService, settingService)
|
adminAuthMiddleware := middleware.NewAdminAuthMiddleware(authService, userService, settingService)
|
||||||
apiKeyAuthMiddleware := middleware.NewAPIKeyAuthMiddleware(apiKeyService, subscriptionService, configConfig)
|
apiKeyAuthMiddleware := middleware.NewAPIKeyAuthMiddleware(apiKeyService, subscriptionService, configConfig)
|
||||||
engine := server.ProvideRouter(configConfig, handlers, jwtAuthMiddleware, adminAuthMiddleware, apiKeyAuthMiddleware, apiKeyService, subscriptionService)
|
engine := server.ProvideRouter(configConfig, handlers, jwtAuthMiddleware, adminAuthMiddleware, apiKeyAuthMiddleware, apiKeyService, subscriptionService, opsService, settingService, redisClient)
|
||||||
httpServer := server.ProvideHTTPServer(configConfig, engine)
|
httpServer := server.ProvideHTTPServer(configConfig, engine)
|
||||||
|
opsMetricsCollector := service.ProvideOpsMetricsCollector(opsRepository, settingRepository, accountRepository, concurrencyService, db, redisClient, configConfig)
|
||||||
|
opsAggregationService := service.ProvideOpsAggregationService(opsRepository, settingRepository, db, redisClient, configConfig)
|
||||||
|
opsAlertEvaluatorService := service.ProvideOpsAlertEvaluatorService(opsService, opsRepository, emailService, redisClient, configConfig)
|
||||||
|
opsCleanupService := service.ProvideOpsCleanupService(opsRepository, db, redisClient, configConfig)
|
||||||
|
opsScheduledReportService := service.ProvideOpsScheduledReportService(opsService, userService, emailService, redisClient, configConfig)
|
||||||
tokenRefreshService := service.ProvideTokenRefreshService(accountRepository, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, configConfig)
|
tokenRefreshService := service.ProvideTokenRefreshService(accountRepository, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, configConfig)
|
||||||
accountExpiryService := service.ProvideAccountExpiryService(accountRepository)
|
accountExpiryService := service.ProvideAccountExpiryService(accountRepository)
|
||||||
v := provideCleanup(client, redisClient, tokenRefreshService, accountExpiryService, pricingService, emailQueueService, billingCacheService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService)
|
v := provideCleanup(client, redisClient, opsMetricsCollector, opsAggregationService, opsAlertEvaluatorService, opsCleanupService, opsScheduledReportService, schedulerSnapshotService, tokenRefreshService, accountExpiryService, pricingService, emailQueueService, billingCacheService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService)
|
||||||
application := &Application{
|
application := &Application{
|
||||||
Server: httpServer,
|
Server: httpServer,
|
||||||
Cleanup: v,
|
Cleanup: v,
|
||||||
@@ -174,6 +193,12 @@ func provideServiceBuildInfo(buildInfo handler.BuildInfo) service.BuildInfo {
|
|||||||
func provideCleanup(
|
func provideCleanup(
|
||||||
entClient *ent.Client,
|
entClient *ent.Client,
|
||||||
rdb *redis.Client,
|
rdb *redis.Client,
|
||||||
|
opsMetricsCollector *service.OpsMetricsCollector,
|
||||||
|
opsAggregation *service.OpsAggregationService,
|
||||||
|
opsAlertEvaluator *service.OpsAlertEvaluatorService,
|
||||||
|
opsCleanup *service.OpsCleanupService,
|
||||||
|
opsScheduledReport *service.OpsScheduledReportService,
|
||||||
|
schedulerSnapshot *service.SchedulerSnapshotService,
|
||||||
tokenRefresh *service.TokenRefreshService,
|
tokenRefresh *service.TokenRefreshService,
|
||||||
accountExpiry *service.AccountExpiryService,
|
accountExpiry *service.AccountExpiryService,
|
||||||
pricing *service.PricingService,
|
pricing *service.PricingService,
|
||||||
@@ -192,6 +217,42 @@ func provideCleanup(
|
|||||||
name string
|
name string
|
||||||
fn func() error
|
fn func() error
|
||||||
}{
|
}{
|
||||||
|
{"OpsScheduledReportService", func() error {
|
||||||
|
if opsScheduledReport != nil {
|
||||||
|
opsScheduledReport.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"OpsCleanupService", func() error {
|
||||||
|
if opsCleanup != nil {
|
||||||
|
opsCleanup.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"OpsAlertEvaluatorService", func() error {
|
||||||
|
if opsAlertEvaluator != nil {
|
||||||
|
opsAlertEvaluator.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"OpsAggregationService", func() error {
|
||||||
|
if opsAggregation != nil {
|
||||||
|
opsAggregation.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"OpsMetricsCollector", func() error {
|
||||||
|
if opsMetricsCollector != nil {
|
||||||
|
opsMetricsCollector.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"SchedulerSnapshotService", func() error {
|
||||||
|
if schedulerSnapshot != nil {
|
||||||
|
schedulerSnapshot.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
{"TokenRefreshService", func() error {
|
{"TokenRefreshService", func() error {
|
||||||
tokenRefresh.Stop()
|
tokenRefresh.Stop()
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
|
|
||||||
"entgo.io/ent"
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
"entgo.io/ent/dialect/sql"
|
"entgo.io/ent/dialect/sql"
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
"entgo.io/ent/schema/field"
|
"entgo.io/ent/schema/field"
|
||||||
@@ -31,6 +32,7 @@ type AccountQuery struct {
|
|||||||
withProxy *ProxyQuery
|
withProxy *ProxyQuery
|
||||||
withUsageLogs *UsageLogQuery
|
withUsageLogs *UsageLogQuery
|
||||||
withAccountGroups *AccountGroupQuery
|
withAccountGroups *AccountGroupQuery
|
||||||
|
modifiers []func(*sql.Selector)
|
||||||
// intermediate query (i.e. traversal path).
|
// intermediate query (i.e. traversal path).
|
||||||
sql *sql.Selector
|
sql *sql.Selector
|
||||||
path func(context.Context) (*sql.Selector, error)
|
path func(context.Context) (*sql.Selector, error)
|
||||||
@@ -495,6 +497,9 @@ func (_q *AccountQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Acco
|
|||||||
node.Edges.loadedTypes = loadedTypes
|
node.Edges.loadedTypes = loadedTypes
|
||||||
return node.assignValues(columns, values)
|
return node.assignValues(columns, values)
|
||||||
}
|
}
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
for i := range hooks {
|
for i := range hooks {
|
||||||
hooks[i](ctx, _spec)
|
hooks[i](ctx, _spec)
|
||||||
}
|
}
|
||||||
@@ -690,6 +695,9 @@ func (_q *AccountQuery) loadAccountGroups(ctx context.Context, query *AccountGro
|
|||||||
|
|
||||||
func (_q *AccountQuery) sqlCount(ctx context.Context) (int, error) {
|
func (_q *AccountQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
_spec := _q.querySpec()
|
_spec := _q.querySpec()
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
_spec.Node.Columns = _q.ctx.Fields
|
_spec.Node.Columns = _q.ctx.Fields
|
||||||
if len(_q.ctx.Fields) > 0 {
|
if len(_q.ctx.Fields) > 0 {
|
||||||
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||||
@@ -755,6 +763,9 @@ func (_q *AccountQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||||
selector.Distinct()
|
selector.Distinct()
|
||||||
}
|
}
|
||||||
|
for _, m := range _q.modifiers {
|
||||||
|
m(selector)
|
||||||
|
}
|
||||||
for _, p := range _q.predicates {
|
for _, p := range _q.predicates {
|
||||||
p(selector)
|
p(selector)
|
||||||
}
|
}
|
||||||
@@ -772,6 +783,32 @@ func (_q *AccountQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
return selector
|
return selector
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||||
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||||
|
// either committed or rolled-back.
|
||||||
|
func (_q *AccountQuery) ForUpdate(opts ...sql.LockOption) *AccountQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForUpdate(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||||
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||||
|
// until your transaction commits.
|
||||||
|
func (_q *AccountQuery) ForShare(opts ...sql.LockOption) *AccountQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForShare(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
// AccountGroupBy is the group-by builder for Account entities.
|
// AccountGroupBy is the group-by builder for Account entities.
|
||||||
type AccountGroupBy struct {
|
type AccountGroupBy struct {
|
||||||
selector
|
selector
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
|
|
||||||
"entgo.io/ent"
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
"entgo.io/ent/dialect/sql"
|
"entgo.io/ent/dialect/sql"
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/account"
|
"github.com/Wei-Shaw/sub2api/ent/account"
|
||||||
@@ -25,6 +26,7 @@ type AccountGroupQuery struct {
|
|||||||
predicates []predicate.AccountGroup
|
predicates []predicate.AccountGroup
|
||||||
withAccount *AccountQuery
|
withAccount *AccountQuery
|
||||||
withGroup *GroupQuery
|
withGroup *GroupQuery
|
||||||
|
modifiers []func(*sql.Selector)
|
||||||
// intermediate query (i.e. traversal path).
|
// intermediate query (i.e. traversal path).
|
||||||
sql *sql.Selector
|
sql *sql.Selector
|
||||||
path func(context.Context) (*sql.Selector, error)
|
path func(context.Context) (*sql.Selector, error)
|
||||||
@@ -347,6 +349,9 @@ func (_q *AccountGroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]
|
|||||||
node.Edges.loadedTypes = loadedTypes
|
node.Edges.loadedTypes = loadedTypes
|
||||||
return node.assignValues(columns, values)
|
return node.assignValues(columns, values)
|
||||||
}
|
}
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
for i := range hooks {
|
for i := range hooks {
|
||||||
hooks[i](ctx, _spec)
|
hooks[i](ctx, _spec)
|
||||||
}
|
}
|
||||||
@@ -432,6 +437,9 @@ func (_q *AccountGroupQuery) loadGroup(ctx context.Context, query *GroupQuery, n
|
|||||||
|
|
||||||
func (_q *AccountGroupQuery) sqlCount(ctx context.Context) (int, error) {
|
func (_q *AccountGroupQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
_spec := _q.querySpec()
|
_spec := _q.querySpec()
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
_spec.Unique = false
|
_spec.Unique = false
|
||||||
_spec.Node.Columns = nil
|
_spec.Node.Columns = nil
|
||||||
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||||
@@ -495,6 +503,9 @@ func (_q *AccountGroupQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||||
selector.Distinct()
|
selector.Distinct()
|
||||||
}
|
}
|
||||||
|
for _, m := range _q.modifiers {
|
||||||
|
m(selector)
|
||||||
|
}
|
||||||
for _, p := range _q.predicates {
|
for _, p := range _q.predicates {
|
||||||
p(selector)
|
p(selector)
|
||||||
}
|
}
|
||||||
@@ -512,6 +523,32 @@ func (_q *AccountGroupQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
return selector
|
return selector
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||||
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||||
|
// either committed or rolled-back.
|
||||||
|
func (_q *AccountGroupQuery) ForUpdate(opts ...sql.LockOption) *AccountGroupQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForUpdate(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||||
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||||
|
// until your transaction commits.
|
||||||
|
func (_q *AccountGroupQuery) ForShare(opts ...sql.LockOption) *AccountGroupQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForShare(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
// AccountGroupGroupBy is the group-by builder for AccountGroup entities.
|
// AccountGroupGroupBy is the group-by builder for AccountGroup entities.
|
||||||
type AccountGroupGroupBy struct {
|
type AccountGroupGroupBy struct {
|
||||||
selector
|
selector
|
||||||
|
|||||||
@@ -3,6 +3,7 @@
|
|||||||
package ent
|
package ent
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@@ -35,6 +36,10 @@ type APIKey struct {
|
|||||||
GroupID *int64 `json:"group_id,omitempty"`
|
GroupID *int64 `json:"group_id,omitempty"`
|
||||||
// Status holds the value of the "status" field.
|
// Status holds the value of the "status" field.
|
||||||
Status string `json:"status,omitempty"`
|
Status string `json:"status,omitempty"`
|
||||||
|
// Allowed IPs/CIDRs, e.g. ["192.168.1.100", "10.0.0.0/8"]
|
||||||
|
IPWhitelist []string `json:"ip_whitelist,omitempty"`
|
||||||
|
// Blocked IPs/CIDRs
|
||||||
|
IPBlacklist []string `json:"ip_blacklist,omitempty"`
|
||||||
// Edges holds the relations/edges for other nodes in the graph.
|
// Edges holds the relations/edges for other nodes in the graph.
|
||||||
// The values are being populated by the APIKeyQuery when eager-loading is set.
|
// The values are being populated by the APIKeyQuery when eager-loading is set.
|
||||||
Edges APIKeyEdges `json:"edges"`
|
Edges APIKeyEdges `json:"edges"`
|
||||||
@@ -90,6 +95,8 @@ func (*APIKey) scanValues(columns []string) ([]any, error) {
|
|||||||
values := make([]any, len(columns))
|
values := make([]any, len(columns))
|
||||||
for i := range columns {
|
for i := range columns {
|
||||||
switch columns[i] {
|
switch columns[i] {
|
||||||
|
case apikey.FieldIPWhitelist, apikey.FieldIPBlacklist:
|
||||||
|
values[i] = new([]byte)
|
||||||
case apikey.FieldID, apikey.FieldUserID, apikey.FieldGroupID:
|
case apikey.FieldID, apikey.FieldUserID, apikey.FieldGroupID:
|
||||||
values[i] = new(sql.NullInt64)
|
values[i] = new(sql.NullInt64)
|
||||||
case apikey.FieldKey, apikey.FieldName, apikey.FieldStatus:
|
case apikey.FieldKey, apikey.FieldName, apikey.FieldStatus:
|
||||||
@@ -167,6 +174,22 @@ func (_m *APIKey) assignValues(columns []string, values []any) error {
|
|||||||
} else if value.Valid {
|
} else if value.Valid {
|
||||||
_m.Status = value.String
|
_m.Status = value.String
|
||||||
}
|
}
|
||||||
|
case apikey.FieldIPWhitelist:
|
||||||
|
if value, ok := values[i].(*[]byte); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field ip_whitelist", values[i])
|
||||||
|
} else if value != nil && len(*value) > 0 {
|
||||||
|
if err := json.Unmarshal(*value, &_m.IPWhitelist); err != nil {
|
||||||
|
return fmt.Errorf("unmarshal field ip_whitelist: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case apikey.FieldIPBlacklist:
|
||||||
|
if value, ok := values[i].(*[]byte); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field ip_blacklist", values[i])
|
||||||
|
} else if value != nil && len(*value) > 0 {
|
||||||
|
if err := json.Unmarshal(*value, &_m.IPBlacklist); err != nil {
|
||||||
|
return fmt.Errorf("unmarshal field ip_blacklist: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
_m.selectValues.Set(columns[i], values[i])
|
_m.selectValues.Set(columns[i], values[i])
|
||||||
}
|
}
|
||||||
@@ -245,6 +268,12 @@ func (_m *APIKey) String() string {
|
|||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
builder.WriteString("status=")
|
builder.WriteString("status=")
|
||||||
builder.WriteString(_m.Status)
|
builder.WriteString(_m.Status)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("ip_whitelist=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.IPWhitelist))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("ip_blacklist=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.IPBlacklist))
|
||||||
builder.WriteByte(')')
|
builder.WriteByte(')')
|
||||||
return builder.String()
|
return builder.String()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -31,6 +31,10 @@ const (
|
|||||||
FieldGroupID = "group_id"
|
FieldGroupID = "group_id"
|
||||||
// FieldStatus holds the string denoting the status field in the database.
|
// FieldStatus holds the string denoting the status field in the database.
|
||||||
FieldStatus = "status"
|
FieldStatus = "status"
|
||||||
|
// FieldIPWhitelist holds the string denoting the ip_whitelist field in the database.
|
||||||
|
FieldIPWhitelist = "ip_whitelist"
|
||||||
|
// FieldIPBlacklist holds the string denoting the ip_blacklist field in the database.
|
||||||
|
FieldIPBlacklist = "ip_blacklist"
|
||||||
// EdgeUser holds the string denoting the user edge name in mutations.
|
// EdgeUser holds the string denoting the user edge name in mutations.
|
||||||
EdgeUser = "user"
|
EdgeUser = "user"
|
||||||
// EdgeGroup holds the string denoting the group edge name in mutations.
|
// EdgeGroup holds the string denoting the group edge name in mutations.
|
||||||
@@ -73,6 +77,8 @@ var Columns = []string{
|
|||||||
FieldName,
|
FieldName,
|
||||||
FieldGroupID,
|
FieldGroupID,
|
||||||
FieldStatus,
|
FieldStatus,
|
||||||
|
FieldIPWhitelist,
|
||||||
|
FieldIPBlacklist,
|
||||||
}
|
}
|
||||||
|
|
||||||
// ValidColumn reports if the column name is valid (part of the table columns).
|
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||||
|
|||||||
@@ -470,6 +470,26 @@ func StatusContainsFold(v string) predicate.APIKey {
|
|||||||
return predicate.APIKey(sql.FieldContainsFold(FieldStatus, v))
|
return predicate.APIKey(sql.FieldContainsFold(FieldStatus, v))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IPWhitelistIsNil applies the IsNil predicate on the "ip_whitelist" field.
|
||||||
|
func IPWhitelistIsNil() predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldIsNull(FieldIPWhitelist))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPWhitelistNotNil applies the NotNil predicate on the "ip_whitelist" field.
|
||||||
|
func IPWhitelistNotNil() predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNotNull(FieldIPWhitelist))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPBlacklistIsNil applies the IsNil predicate on the "ip_blacklist" field.
|
||||||
|
func IPBlacklistIsNil() predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldIsNull(FieldIPBlacklist))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPBlacklistNotNil applies the NotNil predicate on the "ip_blacklist" field.
|
||||||
|
func IPBlacklistNotNil() predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNotNull(FieldIPBlacklist))
|
||||||
|
}
|
||||||
|
|
||||||
// HasUser applies the HasEdge predicate on the "user" edge.
|
// HasUser applies the HasEdge predicate on the "user" edge.
|
||||||
func HasUser() predicate.APIKey {
|
func HasUser() predicate.APIKey {
|
||||||
return predicate.APIKey(func(s *sql.Selector) {
|
return predicate.APIKey(func(s *sql.Selector) {
|
||||||
|
|||||||
@@ -113,6 +113,18 @@ func (_c *APIKeyCreate) SetNillableStatus(v *string) *APIKeyCreate {
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetIPWhitelist sets the "ip_whitelist" field.
|
||||||
|
func (_c *APIKeyCreate) SetIPWhitelist(v []string) *APIKeyCreate {
|
||||||
|
_c.mutation.SetIPWhitelist(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetIPBlacklist sets the "ip_blacklist" field.
|
||||||
|
func (_c *APIKeyCreate) SetIPBlacklist(v []string) *APIKeyCreate {
|
||||||
|
_c.mutation.SetIPBlacklist(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
// SetUser sets the "user" edge to the User entity.
|
// SetUser sets the "user" edge to the User entity.
|
||||||
func (_c *APIKeyCreate) SetUser(v *User) *APIKeyCreate {
|
func (_c *APIKeyCreate) SetUser(v *User) *APIKeyCreate {
|
||||||
return _c.SetUserID(v.ID)
|
return _c.SetUserID(v.ID)
|
||||||
@@ -285,6 +297,14 @@ func (_c *APIKeyCreate) createSpec() (*APIKey, *sqlgraph.CreateSpec) {
|
|||||||
_spec.SetField(apikey.FieldStatus, field.TypeString, value)
|
_spec.SetField(apikey.FieldStatus, field.TypeString, value)
|
||||||
_node.Status = value
|
_node.Status = value
|
||||||
}
|
}
|
||||||
|
if value, ok := _c.mutation.IPWhitelist(); ok {
|
||||||
|
_spec.SetField(apikey.FieldIPWhitelist, field.TypeJSON, value)
|
||||||
|
_node.IPWhitelist = value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.IPBlacklist(); ok {
|
||||||
|
_spec.SetField(apikey.FieldIPBlacklist, field.TypeJSON, value)
|
||||||
|
_node.IPBlacklist = value
|
||||||
|
}
|
||||||
if nodes := _c.mutation.UserIDs(); len(nodes) > 0 {
|
if nodes := _c.mutation.UserIDs(); len(nodes) > 0 {
|
||||||
edge := &sqlgraph.EdgeSpec{
|
edge := &sqlgraph.EdgeSpec{
|
||||||
Rel: sqlgraph.M2O,
|
Rel: sqlgraph.M2O,
|
||||||
@@ -483,6 +503,42 @@ func (u *APIKeyUpsert) UpdateStatus() *APIKeyUpsert {
|
|||||||
return u
|
return u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetIPWhitelist sets the "ip_whitelist" field.
|
||||||
|
func (u *APIKeyUpsert) SetIPWhitelist(v []string) *APIKeyUpsert {
|
||||||
|
u.Set(apikey.FieldIPWhitelist, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateIPWhitelist sets the "ip_whitelist" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsert) UpdateIPWhitelist() *APIKeyUpsert {
|
||||||
|
u.SetExcluded(apikey.FieldIPWhitelist)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearIPWhitelist clears the value of the "ip_whitelist" field.
|
||||||
|
func (u *APIKeyUpsert) ClearIPWhitelist() *APIKeyUpsert {
|
||||||
|
u.SetNull(apikey.FieldIPWhitelist)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetIPBlacklist sets the "ip_blacklist" field.
|
||||||
|
func (u *APIKeyUpsert) SetIPBlacklist(v []string) *APIKeyUpsert {
|
||||||
|
u.Set(apikey.FieldIPBlacklist, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateIPBlacklist sets the "ip_blacklist" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsert) UpdateIPBlacklist() *APIKeyUpsert {
|
||||||
|
u.SetExcluded(apikey.FieldIPBlacklist)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearIPBlacklist clears the value of the "ip_blacklist" field.
|
||||||
|
func (u *APIKeyUpsert) ClearIPBlacklist() *APIKeyUpsert {
|
||||||
|
u.SetNull(apikey.FieldIPBlacklist)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
// UpdateNewValues updates the mutable fields using the new values that were set on create.
|
// UpdateNewValues updates the mutable fields using the new values that were set on create.
|
||||||
// Using this option is equivalent to using:
|
// Using this option is equivalent to using:
|
||||||
//
|
//
|
||||||
@@ -640,6 +696,48 @@ func (u *APIKeyUpsertOne) UpdateStatus() *APIKeyUpsertOne {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetIPWhitelist sets the "ip_whitelist" field.
|
||||||
|
func (u *APIKeyUpsertOne) SetIPWhitelist(v []string) *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.SetIPWhitelist(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateIPWhitelist sets the "ip_whitelist" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsertOne) UpdateIPWhitelist() *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.UpdateIPWhitelist()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearIPWhitelist clears the value of the "ip_whitelist" field.
|
||||||
|
func (u *APIKeyUpsertOne) ClearIPWhitelist() *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.ClearIPWhitelist()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetIPBlacklist sets the "ip_blacklist" field.
|
||||||
|
func (u *APIKeyUpsertOne) SetIPBlacklist(v []string) *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.SetIPBlacklist(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateIPBlacklist sets the "ip_blacklist" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsertOne) UpdateIPBlacklist() *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.UpdateIPBlacklist()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearIPBlacklist clears the value of the "ip_blacklist" field.
|
||||||
|
func (u *APIKeyUpsertOne) ClearIPBlacklist() *APIKeyUpsertOne {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.ClearIPBlacklist()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// Exec executes the query.
|
// Exec executes the query.
|
||||||
func (u *APIKeyUpsertOne) Exec(ctx context.Context) error {
|
func (u *APIKeyUpsertOne) Exec(ctx context.Context) error {
|
||||||
if len(u.create.conflict) == 0 {
|
if len(u.create.conflict) == 0 {
|
||||||
@@ -963,6 +1061,48 @@ func (u *APIKeyUpsertBulk) UpdateStatus() *APIKeyUpsertBulk {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetIPWhitelist sets the "ip_whitelist" field.
|
||||||
|
func (u *APIKeyUpsertBulk) SetIPWhitelist(v []string) *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.SetIPWhitelist(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateIPWhitelist sets the "ip_whitelist" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsertBulk) UpdateIPWhitelist() *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.UpdateIPWhitelist()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearIPWhitelist clears the value of the "ip_whitelist" field.
|
||||||
|
func (u *APIKeyUpsertBulk) ClearIPWhitelist() *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.ClearIPWhitelist()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetIPBlacklist sets the "ip_blacklist" field.
|
||||||
|
func (u *APIKeyUpsertBulk) SetIPBlacklist(v []string) *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.SetIPBlacklist(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateIPBlacklist sets the "ip_blacklist" field to the value that was provided on create.
|
||||||
|
func (u *APIKeyUpsertBulk) UpdateIPBlacklist() *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.UpdateIPBlacklist()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearIPBlacklist clears the value of the "ip_blacklist" field.
|
||||||
|
func (u *APIKeyUpsertBulk) ClearIPBlacklist() *APIKeyUpsertBulk {
|
||||||
|
return u.Update(func(s *APIKeyUpsert) {
|
||||||
|
s.ClearIPBlacklist()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// Exec executes the query.
|
// Exec executes the query.
|
||||||
func (u *APIKeyUpsertBulk) Exec(ctx context.Context) error {
|
func (u *APIKeyUpsertBulk) Exec(ctx context.Context) error {
|
||||||
if u.create.err != nil {
|
if u.create.err != nil {
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
|
|
||||||
"entgo.io/ent"
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
"entgo.io/ent/dialect/sql"
|
"entgo.io/ent/dialect/sql"
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
"entgo.io/ent/schema/field"
|
"entgo.io/ent/schema/field"
|
||||||
@@ -29,6 +30,7 @@ type APIKeyQuery struct {
|
|||||||
withUser *UserQuery
|
withUser *UserQuery
|
||||||
withGroup *GroupQuery
|
withGroup *GroupQuery
|
||||||
withUsageLogs *UsageLogQuery
|
withUsageLogs *UsageLogQuery
|
||||||
|
modifiers []func(*sql.Selector)
|
||||||
// intermediate query (i.e. traversal path).
|
// intermediate query (i.e. traversal path).
|
||||||
sql *sql.Selector
|
sql *sql.Selector
|
||||||
path func(context.Context) (*sql.Selector, error)
|
path func(context.Context) (*sql.Selector, error)
|
||||||
@@ -458,6 +460,9 @@ func (_q *APIKeyQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*APIKe
|
|||||||
node.Edges.loadedTypes = loadedTypes
|
node.Edges.loadedTypes = loadedTypes
|
||||||
return node.assignValues(columns, values)
|
return node.assignValues(columns, values)
|
||||||
}
|
}
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
for i := range hooks {
|
for i := range hooks {
|
||||||
hooks[i](ctx, _spec)
|
hooks[i](ctx, _spec)
|
||||||
}
|
}
|
||||||
@@ -583,6 +588,9 @@ func (_q *APIKeyQuery) loadUsageLogs(ctx context.Context, query *UsageLogQuery,
|
|||||||
|
|
||||||
func (_q *APIKeyQuery) sqlCount(ctx context.Context) (int, error) {
|
func (_q *APIKeyQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
_spec := _q.querySpec()
|
_spec := _q.querySpec()
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
_spec.Node.Columns = _q.ctx.Fields
|
_spec.Node.Columns = _q.ctx.Fields
|
||||||
if len(_q.ctx.Fields) > 0 {
|
if len(_q.ctx.Fields) > 0 {
|
||||||
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||||
@@ -651,6 +659,9 @@ func (_q *APIKeyQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||||
selector.Distinct()
|
selector.Distinct()
|
||||||
}
|
}
|
||||||
|
for _, m := range _q.modifiers {
|
||||||
|
m(selector)
|
||||||
|
}
|
||||||
for _, p := range _q.predicates {
|
for _, p := range _q.predicates {
|
||||||
p(selector)
|
p(selector)
|
||||||
}
|
}
|
||||||
@@ -668,6 +679,32 @@ func (_q *APIKeyQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
return selector
|
return selector
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||||
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||||
|
// either committed or rolled-back.
|
||||||
|
func (_q *APIKeyQuery) ForUpdate(opts ...sql.LockOption) *APIKeyQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForUpdate(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||||
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||||
|
// until your transaction commits.
|
||||||
|
func (_q *APIKeyQuery) ForShare(opts ...sql.LockOption) *APIKeyQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForShare(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
// APIKeyGroupBy is the group-by builder for APIKey entities.
|
// APIKeyGroupBy is the group-by builder for APIKey entities.
|
||||||
type APIKeyGroupBy struct {
|
type APIKeyGroupBy struct {
|
||||||
selector
|
selector
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
|
|
||||||
"entgo.io/ent/dialect/sql"
|
"entgo.io/ent/dialect/sql"
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/dialect/sql/sqljson"
|
||||||
"entgo.io/ent/schema/field"
|
"entgo.io/ent/schema/field"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/group"
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
@@ -133,6 +134,42 @@ func (_u *APIKeyUpdate) SetNillableStatus(v *string) *APIKeyUpdate {
|
|||||||
return _u
|
return _u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetIPWhitelist sets the "ip_whitelist" field.
|
||||||
|
func (_u *APIKeyUpdate) SetIPWhitelist(v []string) *APIKeyUpdate {
|
||||||
|
_u.mutation.SetIPWhitelist(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendIPWhitelist appends value to the "ip_whitelist" field.
|
||||||
|
func (_u *APIKeyUpdate) AppendIPWhitelist(v []string) *APIKeyUpdate {
|
||||||
|
_u.mutation.AppendIPWhitelist(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearIPWhitelist clears the value of the "ip_whitelist" field.
|
||||||
|
func (_u *APIKeyUpdate) ClearIPWhitelist() *APIKeyUpdate {
|
||||||
|
_u.mutation.ClearIPWhitelist()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetIPBlacklist sets the "ip_blacklist" field.
|
||||||
|
func (_u *APIKeyUpdate) SetIPBlacklist(v []string) *APIKeyUpdate {
|
||||||
|
_u.mutation.SetIPBlacklist(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendIPBlacklist appends value to the "ip_blacklist" field.
|
||||||
|
func (_u *APIKeyUpdate) AppendIPBlacklist(v []string) *APIKeyUpdate {
|
||||||
|
_u.mutation.AppendIPBlacklist(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearIPBlacklist clears the value of the "ip_blacklist" field.
|
||||||
|
func (_u *APIKeyUpdate) ClearIPBlacklist() *APIKeyUpdate {
|
||||||
|
_u.mutation.ClearIPBlacklist()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
// SetUser sets the "user" edge to the User entity.
|
// SetUser sets the "user" edge to the User entity.
|
||||||
func (_u *APIKeyUpdate) SetUser(v *User) *APIKeyUpdate {
|
func (_u *APIKeyUpdate) SetUser(v *User) *APIKeyUpdate {
|
||||||
return _u.SetUserID(v.ID)
|
return _u.SetUserID(v.ID)
|
||||||
@@ -291,6 +328,28 @@ func (_u *APIKeyUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
|||||||
if value, ok := _u.mutation.Status(); ok {
|
if value, ok := _u.mutation.Status(); ok {
|
||||||
_spec.SetField(apikey.FieldStatus, field.TypeString, value)
|
_spec.SetField(apikey.FieldStatus, field.TypeString, value)
|
||||||
}
|
}
|
||||||
|
if value, ok := _u.mutation.IPWhitelist(); ok {
|
||||||
|
_spec.SetField(apikey.FieldIPWhitelist, field.TypeJSON, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AppendedIPWhitelist(); ok {
|
||||||
|
_spec.AddModifier(func(u *sql.UpdateBuilder) {
|
||||||
|
sqljson.Append(u, apikey.FieldIPWhitelist, value)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if _u.mutation.IPWhitelistCleared() {
|
||||||
|
_spec.ClearField(apikey.FieldIPWhitelist, field.TypeJSON)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.IPBlacklist(); ok {
|
||||||
|
_spec.SetField(apikey.FieldIPBlacklist, field.TypeJSON, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AppendedIPBlacklist(); ok {
|
||||||
|
_spec.AddModifier(func(u *sql.UpdateBuilder) {
|
||||||
|
sqljson.Append(u, apikey.FieldIPBlacklist, value)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if _u.mutation.IPBlacklistCleared() {
|
||||||
|
_spec.ClearField(apikey.FieldIPBlacklist, field.TypeJSON)
|
||||||
|
}
|
||||||
if _u.mutation.UserCleared() {
|
if _u.mutation.UserCleared() {
|
||||||
edge := &sqlgraph.EdgeSpec{
|
edge := &sqlgraph.EdgeSpec{
|
||||||
Rel: sqlgraph.M2O,
|
Rel: sqlgraph.M2O,
|
||||||
@@ -516,6 +575,42 @@ func (_u *APIKeyUpdateOne) SetNillableStatus(v *string) *APIKeyUpdateOne {
|
|||||||
return _u
|
return _u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetIPWhitelist sets the "ip_whitelist" field.
|
||||||
|
func (_u *APIKeyUpdateOne) SetIPWhitelist(v []string) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.SetIPWhitelist(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendIPWhitelist appends value to the "ip_whitelist" field.
|
||||||
|
func (_u *APIKeyUpdateOne) AppendIPWhitelist(v []string) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.AppendIPWhitelist(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearIPWhitelist clears the value of the "ip_whitelist" field.
|
||||||
|
func (_u *APIKeyUpdateOne) ClearIPWhitelist() *APIKeyUpdateOne {
|
||||||
|
_u.mutation.ClearIPWhitelist()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetIPBlacklist sets the "ip_blacklist" field.
|
||||||
|
func (_u *APIKeyUpdateOne) SetIPBlacklist(v []string) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.SetIPBlacklist(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendIPBlacklist appends value to the "ip_blacklist" field.
|
||||||
|
func (_u *APIKeyUpdateOne) AppendIPBlacklist(v []string) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.AppendIPBlacklist(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearIPBlacklist clears the value of the "ip_blacklist" field.
|
||||||
|
func (_u *APIKeyUpdateOne) ClearIPBlacklist() *APIKeyUpdateOne {
|
||||||
|
_u.mutation.ClearIPBlacklist()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
// SetUser sets the "user" edge to the User entity.
|
// SetUser sets the "user" edge to the User entity.
|
||||||
func (_u *APIKeyUpdateOne) SetUser(v *User) *APIKeyUpdateOne {
|
func (_u *APIKeyUpdateOne) SetUser(v *User) *APIKeyUpdateOne {
|
||||||
return _u.SetUserID(v.ID)
|
return _u.SetUserID(v.ID)
|
||||||
@@ -704,6 +799,28 @@ func (_u *APIKeyUpdateOne) sqlSave(ctx context.Context) (_node *APIKey, err erro
|
|||||||
if value, ok := _u.mutation.Status(); ok {
|
if value, ok := _u.mutation.Status(); ok {
|
||||||
_spec.SetField(apikey.FieldStatus, field.TypeString, value)
|
_spec.SetField(apikey.FieldStatus, field.TypeString, value)
|
||||||
}
|
}
|
||||||
|
if value, ok := _u.mutation.IPWhitelist(); ok {
|
||||||
|
_spec.SetField(apikey.FieldIPWhitelist, field.TypeJSON, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AppendedIPWhitelist(); ok {
|
||||||
|
_spec.AddModifier(func(u *sql.UpdateBuilder) {
|
||||||
|
sqljson.Append(u, apikey.FieldIPWhitelist, value)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if _u.mutation.IPWhitelistCleared() {
|
||||||
|
_spec.ClearField(apikey.FieldIPWhitelist, field.TypeJSON)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.IPBlacklist(); ok {
|
||||||
|
_spec.SetField(apikey.FieldIPBlacklist, field.TypeJSON, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AppendedIPBlacklist(); ok {
|
||||||
|
_spec.AddModifier(func(u *sql.UpdateBuilder) {
|
||||||
|
sqljson.Append(u, apikey.FieldIPBlacklist, value)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if _u.mutation.IPBlacklistCleared() {
|
||||||
|
_spec.ClearField(apikey.FieldIPBlacklist, field.TypeJSON)
|
||||||
|
}
|
||||||
if _u.mutation.UserCleared() {
|
if _u.mutation.UserCleared() {
|
||||||
edge := &sqlgraph.EdgeSpec{
|
edge := &sqlgraph.EdgeSpec{
|
||||||
Rel: sqlgraph.M2O,
|
Rel: sqlgraph.M2O,
|
||||||
|
|||||||
@@ -19,6 +19,8 @@ import (
|
|||||||
"github.com/Wei-Shaw/sub2api/ent/accountgroup"
|
"github.com/Wei-Shaw/sub2api/ent/accountgroup"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/group"
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/proxy"
|
"github.com/Wei-Shaw/sub2api/ent/proxy"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/redeemcode"
|
"github.com/Wei-Shaw/sub2api/ent/redeemcode"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/setting"
|
"github.com/Wei-Shaw/sub2api/ent/setting"
|
||||||
@@ -45,6 +47,10 @@ type Client struct {
|
|||||||
AccountGroup *AccountGroupClient
|
AccountGroup *AccountGroupClient
|
||||||
// Group is the client for interacting with the Group builders.
|
// Group is the client for interacting with the Group builders.
|
||||||
Group *GroupClient
|
Group *GroupClient
|
||||||
|
// PromoCode is the client for interacting with the PromoCode builders.
|
||||||
|
PromoCode *PromoCodeClient
|
||||||
|
// PromoCodeUsage is the client for interacting with the PromoCodeUsage builders.
|
||||||
|
PromoCodeUsage *PromoCodeUsageClient
|
||||||
// Proxy is the client for interacting with the Proxy builders.
|
// Proxy is the client for interacting with the Proxy builders.
|
||||||
Proxy *ProxyClient
|
Proxy *ProxyClient
|
||||||
// RedeemCode is the client for interacting with the RedeemCode builders.
|
// RedeemCode is the client for interacting with the RedeemCode builders.
|
||||||
@@ -78,6 +84,8 @@ func (c *Client) init() {
|
|||||||
c.Account = NewAccountClient(c.config)
|
c.Account = NewAccountClient(c.config)
|
||||||
c.AccountGroup = NewAccountGroupClient(c.config)
|
c.AccountGroup = NewAccountGroupClient(c.config)
|
||||||
c.Group = NewGroupClient(c.config)
|
c.Group = NewGroupClient(c.config)
|
||||||
|
c.PromoCode = NewPromoCodeClient(c.config)
|
||||||
|
c.PromoCodeUsage = NewPromoCodeUsageClient(c.config)
|
||||||
c.Proxy = NewProxyClient(c.config)
|
c.Proxy = NewProxyClient(c.config)
|
||||||
c.RedeemCode = NewRedeemCodeClient(c.config)
|
c.RedeemCode = NewRedeemCodeClient(c.config)
|
||||||
c.Setting = NewSettingClient(c.config)
|
c.Setting = NewSettingClient(c.config)
|
||||||
@@ -183,6 +191,8 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) {
|
|||||||
Account: NewAccountClient(cfg),
|
Account: NewAccountClient(cfg),
|
||||||
AccountGroup: NewAccountGroupClient(cfg),
|
AccountGroup: NewAccountGroupClient(cfg),
|
||||||
Group: NewGroupClient(cfg),
|
Group: NewGroupClient(cfg),
|
||||||
|
PromoCode: NewPromoCodeClient(cfg),
|
||||||
|
PromoCodeUsage: NewPromoCodeUsageClient(cfg),
|
||||||
Proxy: NewProxyClient(cfg),
|
Proxy: NewProxyClient(cfg),
|
||||||
RedeemCode: NewRedeemCodeClient(cfg),
|
RedeemCode: NewRedeemCodeClient(cfg),
|
||||||
Setting: NewSettingClient(cfg),
|
Setting: NewSettingClient(cfg),
|
||||||
@@ -215,6 +225,8 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error)
|
|||||||
Account: NewAccountClient(cfg),
|
Account: NewAccountClient(cfg),
|
||||||
AccountGroup: NewAccountGroupClient(cfg),
|
AccountGroup: NewAccountGroupClient(cfg),
|
||||||
Group: NewGroupClient(cfg),
|
Group: NewGroupClient(cfg),
|
||||||
|
PromoCode: NewPromoCodeClient(cfg),
|
||||||
|
PromoCodeUsage: NewPromoCodeUsageClient(cfg),
|
||||||
Proxy: NewProxyClient(cfg),
|
Proxy: NewProxyClient(cfg),
|
||||||
RedeemCode: NewRedeemCodeClient(cfg),
|
RedeemCode: NewRedeemCodeClient(cfg),
|
||||||
Setting: NewSettingClient(cfg),
|
Setting: NewSettingClient(cfg),
|
||||||
@@ -253,9 +265,9 @@ func (c *Client) Close() error {
|
|||||||
// In order to add hooks to a specific client, call: `client.Node.Use(...)`.
|
// In order to add hooks to a specific client, call: `client.Node.Use(...)`.
|
||||||
func (c *Client) Use(hooks ...Hook) {
|
func (c *Client) Use(hooks ...Hook) {
|
||||||
for _, n := range []interface{ Use(...Hook) }{
|
for _, n := range []interface{ Use(...Hook) }{
|
||||||
c.APIKey, c.Account, c.AccountGroup, c.Group, c.Proxy, c.RedeemCode, c.Setting,
|
c.APIKey, c.Account, c.AccountGroup, c.Group, c.PromoCode, c.PromoCodeUsage,
|
||||||
c.UsageLog, c.User, c.UserAllowedGroup, c.UserAttributeDefinition,
|
c.Proxy, c.RedeemCode, c.Setting, c.UsageLog, c.User, c.UserAllowedGroup,
|
||||||
c.UserAttributeValue, c.UserSubscription,
|
c.UserAttributeDefinition, c.UserAttributeValue, c.UserSubscription,
|
||||||
} {
|
} {
|
||||||
n.Use(hooks...)
|
n.Use(hooks...)
|
||||||
}
|
}
|
||||||
@@ -265,9 +277,9 @@ func (c *Client) Use(hooks ...Hook) {
|
|||||||
// In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`.
|
// In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`.
|
||||||
func (c *Client) Intercept(interceptors ...Interceptor) {
|
func (c *Client) Intercept(interceptors ...Interceptor) {
|
||||||
for _, n := range []interface{ Intercept(...Interceptor) }{
|
for _, n := range []interface{ Intercept(...Interceptor) }{
|
||||||
c.APIKey, c.Account, c.AccountGroup, c.Group, c.Proxy, c.RedeemCode, c.Setting,
|
c.APIKey, c.Account, c.AccountGroup, c.Group, c.PromoCode, c.PromoCodeUsage,
|
||||||
c.UsageLog, c.User, c.UserAllowedGroup, c.UserAttributeDefinition,
|
c.Proxy, c.RedeemCode, c.Setting, c.UsageLog, c.User, c.UserAllowedGroup,
|
||||||
c.UserAttributeValue, c.UserSubscription,
|
c.UserAttributeDefinition, c.UserAttributeValue, c.UserSubscription,
|
||||||
} {
|
} {
|
||||||
n.Intercept(interceptors...)
|
n.Intercept(interceptors...)
|
||||||
}
|
}
|
||||||
@@ -284,6 +296,10 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) {
|
|||||||
return c.AccountGroup.mutate(ctx, m)
|
return c.AccountGroup.mutate(ctx, m)
|
||||||
case *GroupMutation:
|
case *GroupMutation:
|
||||||
return c.Group.mutate(ctx, m)
|
return c.Group.mutate(ctx, m)
|
||||||
|
case *PromoCodeMutation:
|
||||||
|
return c.PromoCode.mutate(ctx, m)
|
||||||
|
case *PromoCodeUsageMutation:
|
||||||
|
return c.PromoCodeUsage.mutate(ctx, m)
|
||||||
case *ProxyMutation:
|
case *ProxyMutation:
|
||||||
return c.Proxy.mutate(ctx, m)
|
return c.Proxy.mutate(ctx, m)
|
||||||
case *RedeemCodeMutation:
|
case *RedeemCodeMutation:
|
||||||
@@ -1068,6 +1084,320 @@ func (c *GroupClient) mutate(ctx context.Context, m *GroupMutation) (Value, erro
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PromoCodeClient is a client for the PromoCode schema.
|
||||||
|
type PromoCodeClient struct {
|
||||||
|
config
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPromoCodeClient returns a client for the PromoCode from the given config.
|
||||||
|
func NewPromoCodeClient(c config) *PromoCodeClient {
|
||||||
|
return &PromoCodeClient{config: c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use adds a list of mutation hooks to the hooks stack.
|
||||||
|
// A call to `Use(f, g, h)` equals to `promocode.Hooks(f(g(h())))`.
|
||||||
|
func (c *PromoCodeClient) Use(hooks ...Hook) {
|
||||||
|
c.hooks.PromoCode = append(c.hooks.PromoCode, hooks...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Intercept adds a list of query interceptors to the interceptors stack.
|
||||||
|
// A call to `Intercept(f, g, h)` equals to `promocode.Intercept(f(g(h())))`.
|
||||||
|
func (c *PromoCodeClient) Intercept(interceptors ...Interceptor) {
|
||||||
|
c.inters.PromoCode = append(c.inters.PromoCode, interceptors...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create returns a builder for creating a PromoCode entity.
|
||||||
|
func (c *PromoCodeClient) Create() *PromoCodeCreate {
|
||||||
|
mutation := newPromoCodeMutation(c.config, OpCreate)
|
||||||
|
return &PromoCodeCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateBulk returns a builder for creating a bulk of PromoCode entities.
|
||||||
|
func (c *PromoCodeClient) CreateBulk(builders ...*PromoCodeCreate) *PromoCodeCreateBulk {
|
||||||
|
return &PromoCodeCreateBulk{config: c.config, builders: builders}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
|
||||||
|
// a builder and applies setFunc on it.
|
||||||
|
func (c *PromoCodeClient) MapCreateBulk(slice any, setFunc func(*PromoCodeCreate, int)) *PromoCodeCreateBulk {
|
||||||
|
rv := reflect.ValueOf(slice)
|
||||||
|
if rv.Kind() != reflect.Slice {
|
||||||
|
return &PromoCodeCreateBulk{err: fmt.Errorf("calling to PromoCodeClient.MapCreateBulk with wrong type %T, need slice", slice)}
|
||||||
|
}
|
||||||
|
builders := make([]*PromoCodeCreate, rv.Len())
|
||||||
|
for i := 0; i < rv.Len(); i++ {
|
||||||
|
builders[i] = c.Create()
|
||||||
|
setFunc(builders[i], i)
|
||||||
|
}
|
||||||
|
return &PromoCodeCreateBulk{config: c.config, builders: builders}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update returns an update builder for PromoCode.
|
||||||
|
func (c *PromoCodeClient) Update() *PromoCodeUpdate {
|
||||||
|
mutation := newPromoCodeMutation(c.config, OpUpdate)
|
||||||
|
return &PromoCodeUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateOne returns an update builder for the given entity.
|
||||||
|
func (c *PromoCodeClient) UpdateOne(_m *PromoCode) *PromoCodeUpdateOne {
|
||||||
|
mutation := newPromoCodeMutation(c.config, OpUpdateOne, withPromoCode(_m))
|
||||||
|
return &PromoCodeUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateOneID returns an update builder for the given id.
|
||||||
|
func (c *PromoCodeClient) UpdateOneID(id int64) *PromoCodeUpdateOne {
|
||||||
|
mutation := newPromoCodeMutation(c.config, OpUpdateOne, withPromoCodeID(id))
|
||||||
|
return &PromoCodeUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete returns a delete builder for PromoCode.
|
||||||
|
func (c *PromoCodeClient) Delete() *PromoCodeDelete {
|
||||||
|
mutation := newPromoCodeMutation(c.config, OpDelete)
|
||||||
|
return &PromoCodeDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteOne returns a builder for deleting the given entity.
|
||||||
|
func (c *PromoCodeClient) DeleteOne(_m *PromoCode) *PromoCodeDeleteOne {
|
||||||
|
return c.DeleteOneID(_m.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteOneID returns a builder for deleting the given entity by its id.
|
||||||
|
func (c *PromoCodeClient) DeleteOneID(id int64) *PromoCodeDeleteOne {
|
||||||
|
builder := c.Delete().Where(promocode.ID(id))
|
||||||
|
builder.mutation.id = &id
|
||||||
|
builder.mutation.op = OpDeleteOne
|
||||||
|
return &PromoCodeDeleteOne{builder}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query returns a query builder for PromoCode.
|
||||||
|
func (c *PromoCodeClient) Query() *PromoCodeQuery {
|
||||||
|
return &PromoCodeQuery{
|
||||||
|
config: c.config,
|
||||||
|
ctx: &QueryContext{Type: TypePromoCode},
|
||||||
|
inters: c.Interceptors(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns a PromoCode entity by its id.
|
||||||
|
func (c *PromoCodeClient) Get(ctx context.Context, id int64) (*PromoCode, error) {
|
||||||
|
return c.Query().Where(promocode.ID(id)).Only(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetX is like Get, but panics if an error occurs.
|
||||||
|
func (c *PromoCodeClient) GetX(ctx context.Context, id int64) *PromoCode {
|
||||||
|
obj, err := c.Get(ctx, id)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return obj
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryUsageRecords queries the usage_records edge of a PromoCode.
|
||||||
|
func (c *PromoCodeClient) QueryUsageRecords(_m *PromoCode) *PromoCodeUsageQuery {
|
||||||
|
query := (&PromoCodeUsageClient{config: c.config}).Query()
|
||||||
|
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
|
||||||
|
id := _m.ID
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(promocode.Table, promocode.FieldID, id),
|
||||||
|
sqlgraph.To(promocodeusage.Table, promocodeusage.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, promocode.UsageRecordsTable, promocode.UsageRecordsColumn),
|
||||||
|
)
|
||||||
|
fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step)
|
||||||
|
return fromV, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hooks returns the client hooks.
|
||||||
|
func (c *PromoCodeClient) Hooks() []Hook {
|
||||||
|
return c.hooks.PromoCode
|
||||||
|
}
|
||||||
|
|
||||||
|
// Interceptors returns the client interceptors.
|
||||||
|
func (c *PromoCodeClient) Interceptors() []Interceptor {
|
||||||
|
return c.inters.PromoCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PromoCodeClient) mutate(ctx context.Context, m *PromoCodeMutation) (Value, error) {
|
||||||
|
switch m.Op() {
|
||||||
|
case OpCreate:
|
||||||
|
return (&PromoCodeCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||||
|
case OpUpdate:
|
||||||
|
return (&PromoCodeUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||||
|
case OpUpdateOne:
|
||||||
|
return (&PromoCodeUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||||
|
case OpDelete, OpDeleteOne:
|
||||||
|
return (&PromoCodeDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("ent: unknown PromoCode mutation op: %q", m.Op())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeUsageClient is a client for the PromoCodeUsage schema.
|
||||||
|
type PromoCodeUsageClient struct {
|
||||||
|
config
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPromoCodeUsageClient returns a client for the PromoCodeUsage from the given config.
|
||||||
|
func NewPromoCodeUsageClient(c config) *PromoCodeUsageClient {
|
||||||
|
return &PromoCodeUsageClient{config: c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use adds a list of mutation hooks to the hooks stack.
|
||||||
|
// A call to `Use(f, g, h)` equals to `promocodeusage.Hooks(f(g(h())))`.
|
||||||
|
func (c *PromoCodeUsageClient) Use(hooks ...Hook) {
|
||||||
|
c.hooks.PromoCodeUsage = append(c.hooks.PromoCodeUsage, hooks...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Intercept adds a list of query interceptors to the interceptors stack.
|
||||||
|
// A call to `Intercept(f, g, h)` equals to `promocodeusage.Intercept(f(g(h())))`.
|
||||||
|
func (c *PromoCodeUsageClient) Intercept(interceptors ...Interceptor) {
|
||||||
|
c.inters.PromoCodeUsage = append(c.inters.PromoCodeUsage, interceptors...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create returns a builder for creating a PromoCodeUsage entity.
|
||||||
|
func (c *PromoCodeUsageClient) Create() *PromoCodeUsageCreate {
|
||||||
|
mutation := newPromoCodeUsageMutation(c.config, OpCreate)
|
||||||
|
return &PromoCodeUsageCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateBulk returns a builder for creating a bulk of PromoCodeUsage entities.
|
||||||
|
func (c *PromoCodeUsageClient) CreateBulk(builders ...*PromoCodeUsageCreate) *PromoCodeUsageCreateBulk {
|
||||||
|
return &PromoCodeUsageCreateBulk{config: c.config, builders: builders}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
|
||||||
|
// a builder and applies setFunc on it.
|
||||||
|
func (c *PromoCodeUsageClient) MapCreateBulk(slice any, setFunc func(*PromoCodeUsageCreate, int)) *PromoCodeUsageCreateBulk {
|
||||||
|
rv := reflect.ValueOf(slice)
|
||||||
|
if rv.Kind() != reflect.Slice {
|
||||||
|
return &PromoCodeUsageCreateBulk{err: fmt.Errorf("calling to PromoCodeUsageClient.MapCreateBulk with wrong type %T, need slice", slice)}
|
||||||
|
}
|
||||||
|
builders := make([]*PromoCodeUsageCreate, rv.Len())
|
||||||
|
for i := 0; i < rv.Len(); i++ {
|
||||||
|
builders[i] = c.Create()
|
||||||
|
setFunc(builders[i], i)
|
||||||
|
}
|
||||||
|
return &PromoCodeUsageCreateBulk{config: c.config, builders: builders}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update returns an update builder for PromoCodeUsage.
|
||||||
|
func (c *PromoCodeUsageClient) Update() *PromoCodeUsageUpdate {
|
||||||
|
mutation := newPromoCodeUsageMutation(c.config, OpUpdate)
|
||||||
|
return &PromoCodeUsageUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateOne returns an update builder for the given entity.
|
||||||
|
func (c *PromoCodeUsageClient) UpdateOne(_m *PromoCodeUsage) *PromoCodeUsageUpdateOne {
|
||||||
|
mutation := newPromoCodeUsageMutation(c.config, OpUpdateOne, withPromoCodeUsage(_m))
|
||||||
|
return &PromoCodeUsageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateOneID returns an update builder for the given id.
|
||||||
|
func (c *PromoCodeUsageClient) UpdateOneID(id int64) *PromoCodeUsageUpdateOne {
|
||||||
|
mutation := newPromoCodeUsageMutation(c.config, OpUpdateOne, withPromoCodeUsageID(id))
|
||||||
|
return &PromoCodeUsageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete returns a delete builder for PromoCodeUsage.
|
||||||
|
func (c *PromoCodeUsageClient) Delete() *PromoCodeUsageDelete {
|
||||||
|
mutation := newPromoCodeUsageMutation(c.config, OpDelete)
|
||||||
|
return &PromoCodeUsageDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteOne returns a builder for deleting the given entity.
|
||||||
|
func (c *PromoCodeUsageClient) DeleteOne(_m *PromoCodeUsage) *PromoCodeUsageDeleteOne {
|
||||||
|
return c.DeleteOneID(_m.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteOneID returns a builder for deleting the given entity by its id.
|
||||||
|
func (c *PromoCodeUsageClient) DeleteOneID(id int64) *PromoCodeUsageDeleteOne {
|
||||||
|
builder := c.Delete().Where(promocodeusage.ID(id))
|
||||||
|
builder.mutation.id = &id
|
||||||
|
builder.mutation.op = OpDeleteOne
|
||||||
|
return &PromoCodeUsageDeleteOne{builder}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query returns a query builder for PromoCodeUsage.
|
||||||
|
func (c *PromoCodeUsageClient) Query() *PromoCodeUsageQuery {
|
||||||
|
return &PromoCodeUsageQuery{
|
||||||
|
config: c.config,
|
||||||
|
ctx: &QueryContext{Type: TypePromoCodeUsage},
|
||||||
|
inters: c.Interceptors(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns a PromoCodeUsage entity by its id.
|
||||||
|
func (c *PromoCodeUsageClient) Get(ctx context.Context, id int64) (*PromoCodeUsage, error) {
|
||||||
|
return c.Query().Where(promocodeusage.ID(id)).Only(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetX is like Get, but panics if an error occurs.
|
||||||
|
func (c *PromoCodeUsageClient) GetX(ctx context.Context, id int64) *PromoCodeUsage {
|
||||||
|
obj, err := c.Get(ctx, id)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return obj
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryPromoCode queries the promo_code edge of a PromoCodeUsage.
|
||||||
|
func (c *PromoCodeUsageClient) QueryPromoCode(_m *PromoCodeUsage) *PromoCodeQuery {
|
||||||
|
query := (&PromoCodeClient{config: c.config}).Query()
|
||||||
|
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
|
||||||
|
id := _m.ID
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(promocodeusage.Table, promocodeusage.FieldID, id),
|
||||||
|
sqlgraph.To(promocode.Table, promocode.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, promocodeusage.PromoCodeTable, promocodeusage.PromoCodeColumn),
|
||||||
|
)
|
||||||
|
fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step)
|
||||||
|
return fromV, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryUser queries the user edge of a PromoCodeUsage.
|
||||||
|
func (c *PromoCodeUsageClient) QueryUser(_m *PromoCodeUsage) *UserQuery {
|
||||||
|
query := (&UserClient{config: c.config}).Query()
|
||||||
|
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
|
||||||
|
id := _m.ID
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(promocodeusage.Table, promocodeusage.FieldID, id),
|
||||||
|
sqlgraph.To(user.Table, user.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, promocodeusage.UserTable, promocodeusage.UserColumn),
|
||||||
|
)
|
||||||
|
fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step)
|
||||||
|
return fromV, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hooks returns the client hooks.
|
||||||
|
func (c *PromoCodeUsageClient) Hooks() []Hook {
|
||||||
|
return c.hooks.PromoCodeUsage
|
||||||
|
}
|
||||||
|
|
||||||
|
// Interceptors returns the client interceptors.
|
||||||
|
func (c *PromoCodeUsageClient) Interceptors() []Interceptor {
|
||||||
|
return c.inters.PromoCodeUsage
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *PromoCodeUsageClient) mutate(ctx context.Context, m *PromoCodeUsageMutation) (Value, error) {
|
||||||
|
switch m.Op() {
|
||||||
|
case OpCreate:
|
||||||
|
return (&PromoCodeUsageCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||||
|
case OpUpdate:
|
||||||
|
return (&PromoCodeUsageUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||||
|
case OpUpdateOne:
|
||||||
|
return (&PromoCodeUsageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||||
|
case OpDelete, OpDeleteOne:
|
||||||
|
return (&PromoCodeUsageDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("ent: unknown PromoCodeUsage mutation op: %q", m.Op())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ProxyClient is a client for the Proxy schema.
|
// ProxyClient is a client for the Proxy schema.
|
||||||
type ProxyClient struct {
|
type ProxyClient struct {
|
||||||
config
|
config
|
||||||
@@ -1950,6 +2280,22 @@ func (c *UserClient) QueryAttributeValues(_m *User) *UserAttributeValueQuery {
|
|||||||
return query
|
return query
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// QueryPromoCodeUsages queries the promo_code_usages edge of a User.
|
||||||
|
func (c *UserClient) QueryPromoCodeUsages(_m *User) *PromoCodeUsageQuery {
|
||||||
|
query := (&PromoCodeUsageClient{config: c.config}).Query()
|
||||||
|
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
|
||||||
|
id := _m.ID
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(user.Table, user.FieldID, id),
|
||||||
|
sqlgraph.To(promocodeusage.Table, promocodeusage.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, user.PromoCodeUsagesTable, user.PromoCodeUsagesColumn),
|
||||||
|
)
|
||||||
|
fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step)
|
||||||
|
return fromV, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
// QueryUserAllowedGroups queries the user_allowed_groups edge of a User.
|
// QueryUserAllowedGroups queries the user_allowed_groups edge of a User.
|
||||||
func (c *UserClient) QueryUserAllowedGroups(_m *User) *UserAllowedGroupQuery {
|
func (c *UserClient) QueryUserAllowedGroups(_m *User) *UserAllowedGroupQuery {
|
||||||
query := (&UserAllowedGroupClient{config: c.config}).Query()
|
query := (&UserAllowedGroupClient{config: c.config}).Query()
|
||||||
@@ -2627,14 +2973,14 @@ func (c *UserSubscriptionClient) mutate(ctx context.Context, m *UserSubscription
|
|||||||
// hooks and interceptors per client, for fast access.
|
// hooks and interceptors per client, for fast access.
|
||||||
type (
|
type (
|
||||||
hooks struct {
|
hooks struct {
|
||||||
APIKey, Account, AccountGroup, Group, Proxy, RedeemCode, Setting, UsageLog,
|
APIKey, Account, AccountGroup, Group, PromoCode, PromoCodeUsage, Proxy,
|
||||||
User, UserAllowedGroup, UserAttributeDefinition, UserAttributeValue,
|
RedeemCode, Setting, UsageLog, User, UserAllowedGroup, UserAttributeDefinition,
|
||||||
UserSubscription []ent.Hook
|
UserAttributeValue, UserSubscription []ent.Hook
|
||||||
}
|
}
|
||||||
inters struct {
|
inters struct {
|
||||||
APIKey, Account, AccountGroup, Group, Proxy, RedeemCode, Setting, UsageLog,
|
APIKey, Account, AccountGroup, Group, PromoCode, PromoCodeUsage, Proxy,
|
||||||
User, UserAllowedGroup, UserAttributeDefinition, UserAttributeValue,
|
RedeemCode, Setting, UsageLog, User, UserAllowedGroup, UserAttributeDefinition,
|
||||||
UserSubscription []ent.Interceptor
|
UserAttributeValue, UserSubscription []ent.Interceptor
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -16,6 +16,8 @@ import (
|
|||||||
"github.com/Wei-Shaw/sub2api/ent/accountgroup"
|
"github.com/Wei-Shaw/sub2api/ent/accountgroup"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/group"
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/proxy"
|
"github.com/Wei-Shaw/sub2api/ent/proxy"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/redeemcode"
|
"github.com/Wei-Shaw/sub2api/ent/redeemcode"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/setting"
|
"github.com/Wei-Shaw/sub2api/ent/setting"
|
||||||
@@ -89,6 +91,8 @@ func checkColumn(t, c string) error {
|
|||||||
account.Table: account.ValidColumn,
|
account.Table: account.ValidColumn,
|
||||||
accountgroup.Table: accountgroup.ValidColumn,
|
accountgroup.Table: accountgroup.ValidColumn,
|
||||||
group.Table: group.ValidColumn,
|
group.Table: group.ValidColumn,
|
||||||
|
promocode.Table: promocode.ValidColumn,
|
||||||
|
promocodeusage.Table: promocodeusage.ValidColumn,
|
||||||
proxy.Table: proxy.ValidColumn,
|
proxy.Table: proxy.ValidColumn,
|
||||||
redeemcode.Table: redeemcode.ValidColumn,
|
redeemcode.Table: redeemcode.ValidColumn,
|
||||||
setting.Table: setting.ValidColumn,
|
setting.Table: setting.ValidColumn,
|
||||||
|
|||||||
@@ -2,4 +2,5 @@
|
|||||||
package ent
|
package ent
|
||||||
|
|
||||||
// 启用 sql/execquery 以生成 ExecContext/QueryContext 的透传接口,便于事务内执行原生 SQL。
|
// 启用 sql/execquery 以生成 ExecContext/QueryContext 的透传接口,便于事务内执行原生 SQL。
|
||||||
//go:generate go run -mod=mod entgo.io/ent/cmd/ent generate --feature sql/upsert,intercept,sql/execquery --idtype int64 ./schema
|
// 启用 sql/lock 以支持 FOR UPDATE 行锁。
|
||||||
|
//go:generate go run -mod=mod entgo.io/ent/cmd/ent generate --feature sql/upsert,intercept,sql/execquery,sql/lock --idtype int64 ./schema
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
|
|
||||||
"entgo.io/ent"
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
"entgo.io/ent/dialect/sql"
|
"entgo.io/ent/dialect/sql"
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
"entgo.io/ent/schema/field"
|
"entgo.io/ent/schema/field"
|
||||||
@@ -39,6 +40,7 @@ type GroupQuery struct {
|
|||||||
withAllowedUsers *UserQuery
|
withAllowedUsers *UserQuery
|
||||||
withAccountGroups *AccountGroupQuery
|
withAccountGroups *AccountGroupQuery
|
||||||
withUserAllowedGroups *UserAllowedGroupQuery
|
withUserAllowedGroups *UserAllowedGroupQuery
|
||||||
|
modifiers []func(*sql.Selector)
|
||||||
// intermediate query (i.e. traversal path).
|
// intermediate query (i.e. traversal path).
|
||||||
sql *sql.Selector
|
sql *sql.Selector
|
||||||
path func(context.Context) (*sql.Selector, error)
|
path func(context.Context) (*sql.Selector, error)
|
||||||
@@ -643,6 +645,9 @@ func (_q *GroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Group,
|
|||||||
node.Edges.loadedTypes = loadedTypes
|
node.Edges.loadedTypes = loadedTypes
|
||||||
return node.assignValues(columns, values)
|
return node.assignValues(columns, values)
|
||||||
}
|
}
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
for i := range hooks {
|
for i := range hooks {
|
||||||
hooks[i](ctx, _spec)
|
hooks[i](ctx, _spec)
|
||||||
}
|
}
|
||||||
@@ -1025,6 +1030,9 @@ func (_q *GroupQuery) loadUserAllowedGroups(ctx context.Context, query *UserAllo
|
|||||||
|
|
||||||
func (_q *GroupQuery) sqlCount(ctx context.Context) (int, error) {
|
func (_q *GroupQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
_spec := _q.querySpec()
|
_spec := _q.querySpec()
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
_spec.Node.Columns = _q.ctx.Fields
|
_spec.Node.Columns = _q.ctx.Fields
|
||||||
if len(_q.ctx.Fields) > 0 {
|
if len(_q.ctx.Fields) > 0 {
|
||||||
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||||
@@ -1087,6 +1095,9 @@ func (_q *GroupQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||||
selector.Distinct()
|
selector.Distinct()
|
||||||
}
|
}
|
||||||
|
for _, m := range _q.modifiers {
|
||||||
|
m(selector)
|
||||||
|
}
|
||||||
for _, p := range _q.predicates {
|
for _, p := range _q.predicates {
|
||||||
p(selector)
|
p(selector)
|
||||||
}
|
}
|
||||||
@@ -1104,6 +1115,32 @@ func (_q *GroupQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
return selector
|
return selector
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||||
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||||
|
// either committed or rolled-back.
|
||||||
|
func (_q *GroupQuery) ForUpdate(opts ...sql.LockOption) *GroupQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForUpdate(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||||
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||||
|
// until your transaction commits.
|
||||||
|
func (_q *GroupQuery) ForShare(opts ...sql.LockOption) *GroupQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForShare(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
// GroupGroupBy is the group-by builder for Group entities.
|
// GroupGroupBy is the group-by builder for Group entities.
|
||||||
type GroupGroupBy struct {
|
type GroupGroupBy struct {
|
||||||
selector
|
selector
|
||||||
|
|||||||
@@ -57,6 +57,30 @@ func (f GroupFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error
|
|||||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.GroupMutation", m)
|
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.GroupMutation", m)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The PromoCodeFunc type is an adapter to allow the use of ordinary
|
||||||
|
// function as PromoCode mutator.
|
||||||
|
type PromoCodeFunc func(context.Context, *ent.PromoCodeMutation) (ent.Value, error)
|
||||||
|
|
||||||
|
// Mutate calls f(ctx, m).
|
||||||
|
func (f PromoCodeFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||||
|
if mv, ok := m.(*ent.PromoCodeMutation); ok {
|
||||||
|
return f(ctx, mv)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.PromoCodeMutation", m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The PromoCodeUsageFunc type is an adapter to allow the use of ordinary
|
||||||
|
// function as PromoCodeUsage mutator.
|
||||||
|
type PromoCodeUsageFunc func(context.Context, *ent.PromoCodeUsageMutation) (ent.Value, error)
|
||||||
|
|
||||||
|
// Mutate calls f(ctx, m).
|
||||||
|
func (f PromoCodeUsageFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||||
|
if mv, ok := m.(*ent.PromoCodeUsageMutation); ok {
|
||||||
|
return f(ctx, mv)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.PromoCodeUsageMutation", m)
|
||||||
|
}
|
||||||
|
|
||||||
// The ProxyFunc type is an adapter to allow the use of ordinary
|
// The ProxyFunc type is an adapter to allow the use of ordinary
|
||||||
// function as Proxy mutator.
|
// function as Proxy mutator.
|
||||||
type ProxyFunc func(context.Context, *ent.ProxyMutation) (ent.Value, error)
|
type ProxyFunc func(context.Context, *ent.ProxyMutation) (ent.Value, error)
|
||||||
|
|||||||
@@ -13,6 +13,8 @@ import (
|
|||||||
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/group"
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/proxy"
|
"github.com/Wei-Shaw/sub2api/ent/proxy"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/redeemcode"
|
"github.com/Wei-Shaw/sub2api/ent/redeemcode"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/setting"
|
"github.com/Wei-Shaw/sub2api/ent/setting"
|
||||||
@@ -188,6 +190,60 @@ func (f TraverseGroup) Traverse(ctx context.Context, q ent.Query) error {
|
|||||||
return fmt.Errorf("unexpected query type %T. expect *ent.GroupQuery", q)
|
return fmt.Errorf("unexpected query type %T. expect *ent.GroupQuery", q)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The PromoCodeFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||||
|
type PromoCodeFunc func(context.Context, *ent.PromoCodeQuery) (ent.Value, error)
|
||||||
|
|
||||||
|
// Query calls f(ctx, q).
|
||||||
|
func (f PromoCodeFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||||
|
if q, ok := q.(*ent.PromoCodeQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected query type %T. expect *ent.PromoCodeQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The TraversePromoCode type is an adapter to allow the use of ordinary function as Traverser.
|
||||||
|
type TraversePromoCode func(context.Context, *ent.PromoCodeQuery) error
|
||||||
|
|
||||||
|
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||||
|
func (f TraversePromoCode) Intercept(next ent.Querier) ent.Querier {
|
||||||
|
return next
|
||||||
|
}
|
||||||
|
|
||||||
|
// Traverse calls f(ctx, q).
|
||||||
|
func (f TraversePromoCode) Traverse(ctx context.Context, q ent.Query) error {
|
||||||
|
if q, ok := q.(*ent.PromoCodeQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("unexpected query type %T. expect *ent.PromoCodeQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The PromoCodeUsageFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||||
|
type PromoCodeUsageFunc func(context.Context, *ent.PromoCodeUsageQuery) (ent.Value, error)
|
||||||
|
|
||||||
|
// Query calls f(ctx, q).
|
||||||
|
func (f PromoCodeUsageFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||||
|
if q, ok := q.(*ent.PromoCodeUsageQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected query type %T. expect *ent.PromoCodeUsageQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The TraversePromoCodeUsage type is an adapter to allow the use of ordinary function as Traverser.
|
||||||
|
type TraversePromoCodeUsage func(context.Context, *ent.PromoCodeUsageQuery) error
|
||||||
|
|
||||||
|
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||||
|
func (f TraversePromoCodeUsage) Intercept(next ent.Querier) ent.Querier {
|
||||||
|
return next
|
||||||
|
}
|
||||||
|
|
||||||
|
// Traverse calls f(ctx, q).
|
||||||
|
func (f TraversePromoCodeUsage) Traverse(ctx context.Context, q ent.Query) error {
|
||||||
|
if q, ok := q.(*ent.PromoCodeUsageQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("unexpected query type %T. expect *ent.PromoCodeUsageQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
// The ProxyFunc type is an adapter to allow the use of ordinary function as a Querier.
|
// The ProxyFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||||
type ProxyFunc func(context.Context, *ent.ProxyQuery) (ent.Value, error)
|
type ProxyFunc func(context.Context, *ent.ProxyQuery) (ent.Value, error)
|
||||||
|
|
||||||
@@ -442,6 +498,10 @@ func NewQuery(q ent.Query) (Query, error) {
|
|||||||
return &query[*ent.AccountGroupQuery, predicate.AccountGroup, accountgroup.OrderOption]{typ: ent.TypeAccountGroup, tq: q}, nil
|
return &query[*ent.AccountGroupQuery, predicate.AccountGroup, accountgroup.OrderOption]{typ: ent.TypeAccountGroup, tq: q}, nil
|
||||||
case *ent.GroupQuery:
|
case *ent.GroupQuery:
|
||||||
return &query[*ent.GroupQuery, predicate.Group, group.OrderOption]{typ: ent.TypeGroup, tq: q}, nil
|
return &query[*ent.GroupQuery, predicate.Group, group.OrderOption]{typ: ent.TypeGroup, tq: q}, nil
|
||||||
|
case *ent.PromoCodeQuery:
|
||||||
|
return &query[*ent.PromoCodeQuery, predicate.PromoCode, promocode.OrderOption]{typ: ent.TypePromoCode, tq: q}, nil
|
||||||
|
case *ent.PromoCodeUsageQuery:
|
||||||
|
return &query[*ent.PromoCodeUsageQuery, predicate.PromoCodeUsage, promocodeusage.OrderOption]{typ: ent.TypePromoCodeUsage, tq: q}, nil
|
||||||
case *ent.ProxyQuery:
|
case *ent.ProxyQuery:
|
||||||
return &query[*ent.ProxyQuery, predicate.Proxy, proxy.OrderOption]{typ: ent.TypeProxy, tq: q}, nil
|
return &query[*ent.ProxyQuery, predicate.Proxy, proxy.OrderOption]{typ: ent.TypeProxy, tq: q}, nil
|
||||||
case *ent.RedeemCodeQuery:
|
case *ent.RedeemCodeQuery:
|
||||||
|
|||||||
@@ -18,6 +18,8 @@ var (
|
|||||||
{Name: "key", Type: field.TypeString, Unique: true, Size: 128},
|
{Name: "key", Type: field.TypeString, Unique: true, Size: 128},
|
||||||
{Name: "name", Type: field.TypeString, Size: 100},
|
{Name: "name", Type: field.TypeString, Size: 100},
|
||||||
{Name: "status", Type: field.TypeString, Size: 20, Default: "active"},
|
{Name: "status", Type: field.TypeString, Size: 20, Default: "active"},
|
||||||
|
{Name: "ip_whitelist", Type: field.TypeJSON, Nullable: true},
|
||||||
|
{Name: "ip_blacklist", Type: field.TypeJSON, Nullable: true},
|
||||||
{Name: "group_id", Type: field.TypeInt64, Nullable: true},
|
{Name: "group_id", Type: field.TypeInt64, Nullable: true},
|
||||||
{Name: "user_id", Type: field.TypeInt64},
|
{Name: "user_id", Type: field.TypeInt64},
|
||||||
}
|
}
|
||||||
@@ -29,13 +31,13 @@ var (
|
|||||||
ForeignKeys: []*schema.ForeignKey{
|
ForeignKeys: []*schema.ForeignKey{
|
||||||
{
|
{
|
||||||
Symbol: "api_keys_groups_api_keys",
|
Symbol: "api_keys_groups_api_keys",
|
||||||
Columns: []*schema.Column{APIKeysColumns[7]},
|
Columns: []*schema.Column{APIKeysColumns[9]},
|
||||||
RefColumns: []*schema.Column{GroupsColumns[0]},
|
RefColumns: []*schema.Column{GroupsColumns[0]},
|
||||||
OnDelete: schema.SetNull,
|
OnDelete: schema.SetNull,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Symbol: "api_keys_users_api_keys",
|
Symbol: "api_keys_users_api_keys",
|
||||||
Columns: []*schema.Column{APIKeysColumns[8]},
|
Columns: []*schema.Column{APIKeysColumns[10]},
|
||||||
RefColumns: []*schema.Column{UsersColumns[0]},
|
RefColumns: []*schema.Column{UsersColumns[0]},
|
||||||
OnDelete: schema.NoAction,
|
OnDelete: schema.NoAction,
|
||||||
},
|
},
|
||||||
@@ -44,12 +46,12 @@ var (
|
|||||||
{
|
{
|
||||||
Name: "apikey_user_id",
|
Name: "apikey_user_id",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{APIKeysColumns[8]},
|
Columns: []*schema.Column{APIKeysColumns[10]},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "apikey_group_id",
|
Name: "apikey_group_id",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{APIKeysColumns[7]},
|
Columns: []*schema.Column{APIKeysColumns[9]},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "apikey_status",
|
Name: "apikey_status",
|
||||||
@@ -257,6 +259,82 @@ var (
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
// PromoCodesColumns holds the columns for the "promo_codes" table.
|
||||||
|
PromoCodesColumns = []*schema.Column{
|
||||||
|
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||||
|
{Name: "code", Type: field.TypeString, Unique: true, Size: 32},
|
||||||
|
{Name: "bonus_amount", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||||
|
{Name: "max_uses", Type: field.TypeInt, Default: 0},
|
||||||
|
{Name: "used_count", Type: field.TypeInt, Default: 0},
|
||||||
|
{Name: "status", Type: field.TypeString, Size: 20, Default: "active"},
|
||||||
|
{Name: "expires_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "notes", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}},
|
||||||
|
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
}
|
||||||
|
// PromoCodesTable holds the schema information for the "promo_codes" table.
|
||||||
|
PromoCodesTable = &schema.Table{
|
||||||
|
Name: "promo_codes",
|
||||||
|
Columns: PromoCodesColumns,
|
||||||
|
PrimaryKey: []*schema.Column{PromoCodesColumns[0]},
|
||||||
|
Indexes: []*schema.Index{
|
||||||
|
{
|
||||||
|
Name: "promocode_status",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{PromoCodesColumns[5]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "promocode_expires_at",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{PromoCodesColumns[6]},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// PromoCodeUsagesColumns holds the columns for the "promo_code_usages" table.
|
||||||
|
PromoCodeUsagesColumns = []*schema.Column{
|
||||||
|
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||||
|
{Name: "bonus_amount", Type: field.TypeFloat64, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||||
|
{Name: "used_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "promo_code_id", Type: field.TypeInt64},
|
||||||
|
{Name: "user_id", Type: field.TypeInt64},
|
||||||
|
}
|
||||||
|
// PromoCodeUsagesTable holds the schema information for the "promo_code_usages" table.
|
||||||
|
PromoCodeUsagesTable = &schema.Table{
|
||||||
|
Name: "promo_code_usages",
|
||||||
|
Columns: PromoCodeUsagesColumns,
|
||||||
|
PrimaryKey: []*schema.Column{PromoCodeUsagesColumns[0]},
|
||||||
|
ForeignKeys: []*schema.ForeignKey{
|
||||||
|
{
|
||||||
|
Symbol: "promo_code_usages_promo_codes_usage_records",
|
||||||
|
Columns: []*schema.Column{PromoCodeUsagesColumns[3]},
|
||||||
|
RefColumns: []*schema.Column{PromoCodesColumns[0]},
|
||||||
|
OnDelete: schema.NoAction,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Symbol: "promo_code_usages_users_promo_code_usages",
|
||||||
|
Columns: []*schema.Column{PromoCodeUsagesColumns[4]},
|
||||||
|
RefColumns: []*schema.Column{UsersColumns[0]},
|
||||||
|
OnDelete: schema.NoAction,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Indexes: []*schema.Index{
|
||||||
|
{
|
||||||
|
Name: "promocodeusage_promo_code_id",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{PromoCodeUsagesColumns[3]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "promocodeusage_user_id",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{PromoCodeUsagesColumns[4]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "promocodeusage_promo_code_id_user_id",
|
||||||
|
Unique: true,
|
||||||
|
Columns: []*schema.Column{PromoCodeUsagesColumns[3], PromoCodeUsagesColumns[4]},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
// ProxiesColumns holds the columns for the "proxies" table.
|
// ProxiesColumns holds the columns for the "proxies" table.
|
||||||
ProxiesColumns = []*schema.Column{
|
ProxiesColumns = []*schema.Column{
|
||||||
{Name: "id", Type: field.TypeInt64, Increment: true},
|
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||||
@@ -376,6 +454,7 @@ var (
|
|||||||
{Name: "duration_ms", Type: field.TypeInt, Nullable: true},
|
{Name: "duration_ms", Type: field.TypeInt, Nullable: true},
|
||||||
{Name: "first_token_ms", Type: field.TypeInt, Nullable: true},
|
{Name: "first_token_ms", Type: field.TypeInt, Nullable: true},
|
||||||
{Name: "user_agent", Type: field.TypeString, Nullable: true, Size: 512},
|
{Name: "user_agent", Type: field.TypeString, Nullable: true, Size: 512},
|
||||||
|
{Name: "ip_address", Type: field.TypeString, Nullable: true, Size: 45},
|
||||||
{Name: "image_count", Type: field.TypeInt, Default: 0},
|
{Name: "image_count", Type: field.TypeInt, Default: 0},
|
||||||
{Name: "image_size", Type: field.TypeString, Nullable: true, Size: 10},
|
{Name: "image_size", Type: field.TypeString, Nullable: true, Size: 10},
|
||||||
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
@@ -393,31 +472,31 @@ var (
|
|||||||
ForeignKeys: []*schema.ForeignKey{
|
ForeignKeys: []*schema.ForeignKey{
|
||||||
{
|
{
|
||||||
Symbol: "usage_logs_api_keys_usage_logs",
|
Symbol: "usage_logs_api_keys_usage_logs",
|
||||||
Columns: []*schema.Column{UsageLogsColumns[24]},
|
Columns: []*schema.Column{UsageLogsColumns[25]},
|
||||||
RefColumns: []*schema.Column{APIKeysColumns[0]},
|
RefColumns: []*schema.Column{APIKeysColumns[0]},
|
||||||
OnDelete: schema.NoAction,
|
OnDelete: schema.NoAction,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Symbol: "usage_logs_accounts_usage_logs",
|
Symbol: "usage_logs_accounts_usage_logs",
|
||||||
Columns: []*schema.Column{UsageLogsColumns[25]},
|
Columns: []*schema.Column{UsageLogsColumns[26]},
|
||||||
RefColumns: []*schema.Column{AccountsColumns[0]},
|
RefColumns: []*schema.Column{AccountsColumns[0]},
|
||||||
OnDelete: schema.NoAction,
|
OnDelete: schema.NoAction,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Symbol: "usage_logs_groups_usage_logs",
|
Symbol: "usage_logs_groups_usage_logs",
|
||||||
Columns: []*schema.Column{UsageLogsColumns[26]},
|
Columns: []*schema.Column{UsageLogsColumns[27]},
|
||||||
RefColumns: []*schema.Column{GroupsColumns[0]},
|
RefColumns: []*schema.Column{GroupsColumns[0]},
|
||||||
OnDelete: schema.SetNull,
|
OnDelete: schema.SetNull,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Symbol: "usage_logs_users_usage_logs",
|
Symbol: "usage_logs_users_usage_logs",
|
||||||
Columns: []*schema.Column{UsageLogsColumns[27]},
|
Columns: []*schema.Column{UsageLogsColumns[28]},
|
||||||
RefColumns: []*schema.Column{UsersColumns[0]},
|
RefColumns: []*schema.Column{UsersColumns[0]},
|
||||||
OnDelete: schema.NoAction,
|
OnDelete: schema.NoAction,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Symbol: "usage_logs_user_subscriptions_usage_logs",
|
Symbol: "usage_logs_user_subscriptions_usage_logs",
|
||||||
Columns: []*schema.Column{UsageLogsColumns[28]},
|
Columns: []*schema.Column{UsageLogsColumns[29]},
|
||||||
RefColumns: []*schema.Column{UserSubscriptionsColumns[0]},
|
RefColumns: []*schema.Column{UserSubscriptionsColumns[0]},
|
||||||
OnDelete: schema.SetNull,
|
OnDelete: schema.SetNull,
|
||||||
},
|
},
|
||||||
@@ -426,32 +505,32 @@ var (
|
|||||||
{
|
{
|
||||||
Name: "usagelog_user_id",
|
Name: "usagelog_user_id",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{UsageLogsColumns[27]},
|
Columns: []*schema.Column{UsageLogsColumns[28]},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "usagelog_api_key_id",
|
Name: "usagelog_api_key_id",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{UsageLogsColumns[24]},
|
Columns: []*schema.Column{UsageLogsColumns[25]},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "usagelog_account_id",
|
Name: "usagelog_account_id",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{UsageLogsColumns[25]},
|
Columns: []*schema.Column{UsageLogsColumns[26]},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "usagelog_group_id",
|
Name: "usagelog_group_id",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{UsageLogsColumns[26]},
|
Columns: []*schema.Column{UsageLogsColumns[27]},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "usagelog_subscription_id",
|
Name: "usagelog_subscription_id",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{UsageLogsColumns[28]},
|
Columns: []*schema.Column{UsageLogsColumns[29]},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "usagelog_created_at",
|
Name: "usagelog_created_at",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{UsageLogsColumns[23]},
|
Columns: []*schema.Column{UsageLogsColumns[24]},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "usagelog_model",
|
Name: "usagelog_model",
|
||||||
@@ -466,12 +545,12 @@ var (
|
|||||||
{
|
{
|
||||||
Name: "usagelog_user_id_created_at",
|
Name: "usagelog_user_id_created_at",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{UsageLogsColumns[27], UsageLogsColumns[23]},
|
Columns: []*schema.Column{UsageLogsColumns[28], UsageLogsColumns[24]},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "usagelog_api_key_id_created_at",
|
Name: "usagelog_api_key_id_created_at",
|
||||||
Unique: false,
|
Unique: false,
|
||||||
Columns: []*schema.Column{UsageLogsColumns[24], UsageLogsColumns[23]},
|
Columns: []*schema.Column{UsageLogsColumns[25], UsageLogsColumns[24]},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -717,6 +796,8 @@ var (
|
|||||||
AccountsTable,
|
AccountsTable,
|
||||||
AccountGroupsTable,
|
AccountGroupsTable,
|
||||||
GroupsTable,
|
GroupsTable,
|
||||||
|
PromoCodesTable,
|
||||||
|
PromoCodeUsagesTable,
|
||||||
ProxiesTable,
|
ProxiesTable,
|
||||||
RedeemCodesTable,
|
RedeemCodesTable,
|
||||||
SettingsTable,
|
SettingsTable,
|
||||||
@@ -747,6 +828,14 @@ func init() {
|
|||||||
GroupsTable.Annotation = &entsql.Annotation{
|
GroupsTable.Annotation = &entsql.Annotation{
|
||||||
Table: "groups",
|
Table: "groups",
|
||||||
}
|
}
|
||||||
|
PromoCodesTable.Annotation = &entsql.Annotation{
|
||||||
|
Table: "promo_codes",
|
||||||
|
}
|
||||||
|
PromoCodeUsagesTable.ForeignKeys[0].RefTable = PromoCodesTable
|
||||||
|
PromoCodeUsagesTable.ForeignKeys[1].RefTable = UsersTable
|
||||||
|
PromoCodeUsagesTable.Annotation = &entsql.Annotation{
|
||||||
|
Table: "promo_code_usages",
|
||||||
|
}
|
||||||
ProxiesTable.Annotation = &entsql.Annotation{
|
ProxiesTable.Annotation = &entsql.Annotation{
|
||||||
Table: "proxies",
|
Table: "proxies",
|
||||||
}
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -18,6 +18,12 @@ type AccountGroup func(*sql.Selector)
|
|||||||
// Group is the predicate function for group builders.
|
// Group is the predicate function for group builders.
|
||||||
type Group func(*sql.Selector)
|
type Group func(*sql.Selector)
|
||||||
|
|
||||||
|
// PromoCode is the predicate function for promocode builders.
|
||||||
|
type PromoCode func(*sql.Selector)
|
||||||
|
|
||||||
|
// PromoCodeUsage is the predicate function for promocodeusage builders.
|
||||||
|
type PromoCodeUsage func(*sql.Selector)
|
||||||
|
|
||||||
// Proxy is the predicate function for proxy builders.
|
// Proxy is the predicate function for proxy builders.
|
||||||
type Proxy func(*sql.Selector)
|
type Proxy func(*sql.Selector)
|
||||||
|
|
||||||
|
|||||||
228
backend/ent/promocode.go
Normal file
228
backend/ent/promocode.go
Normal file
@@ -0,0 +1,228 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PromoCode is the model entity for the PromoCode schema.
|
||||||
|
type PromoCode struct {
|
||||||
|
config `json:"-"`
|
||||||
|
// ID of the ent.
|
||||||
|
ID int64 `json:"id,omitempty"`
|
||||||
|
// 优惠码
|
||||||
|
Code string `json:"code,omitempty"`
|
||||||
|
// 赠送余额金额
|
||||||
|
BonusAmount float64 `json:"bonus_amount,omitempty"`
|
||||||
|
// 最大使用次数,0表示无限制
|
||||||
|
MaxUses int `json:"max_uses,omitempty"`
|
||||||
|
// 已使用次数
|
||||||
|
UsedCount int `json:"used_count,omitempty"`
|
||||||
|
// 状态: active, disabled
|
||||||
|
Status string `json:"status,omitempty"`
|
||||||
|
// 过期时间,null表示永不过期
|
||||||
|
ExpiresAt *time.Time `json:"expires_at,omitempty"`
|
||||||
|
// 备注
|
||||||
|
Notes *string `json:"notes,omitempty"`
|
||||||
|
// CreatedAt holds the value of the "created_at" field.
|
||||||
|
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// UpdatedAt holds the value of the "updated_at" field.
|
||||||
|
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||||
|
// Edges holds the relations/edges for other nodes in the graph.
|
||||||
|
// The values are being populated by the PromoCodeQuery when eager-loading is set.
|
||||||
|
Edges PromoCodeEdges `json:"edges"`
|
||||||
|
selectValues sql.SelectValues
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeEdges holds the relations/edges for other nodes in the graph.
|
||||||
|
type PromoCodeEdges struct {
|
||||||
|
// UsageRecords holds the value of the usage_records edge.
|
||||||
|
UsageRecords []*PromoCodeUsage `json:"usage_records,omitempty"`
|
||||||
|
// loadedTypes holds the information for reporting if a
|
||||||
|
// type was loaded (or requested) in eager-loading or not.
|
||||||
|
loadedTypes [1]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsageRecordsOrErr returns the UsageRecords value or an error if the edge
|
||||||
|
// was not loaded in eager-loading.
|
||||||
|
func (e PromoCodeEdges) UsageRecordsOrErr() ([]*PromoCodeUsage, error) {
|
||||||
|
if e.loadedTypes[0] {
|
||||||
|
return e.UsageRecords, nil
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "usage_records"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanValues returns the types for scanning values from sql.Rows.
|
||||||
|
func (*PromoCode) scanValues(columns []string) ([]any, error) {
|
||||||
|
values := make([]any, len(columns))
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case promocode.FieldBonusAmount:
|
||||||
|
values[i] = new(sql.NullFloat64)
|
||||||
|
case promocode.FieldID, promocode.FieldMaxUses, promocode.FieldUsedCount:
|
||||||
|
values[i] = new(sql.NullInt64)
|
||||||
|
case promocode.FieldCode, promocode.FieldStatus, promocode.FieldNotes:
|
||||||
|
values[i] = new(sql.NullString)
|
||||||
|
case promocode.FieldExpiresAt, promocode.FieldCreatedAt, promocode.FieldUpdatedAt:
|
||||||
|
values[i] = new(sql.NullTime)
|
||||||
|
default:
|
||||||
|
values[i] = new(sql.UnknownType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||||
|
// to the PromoCode fields.
|
||||||
|
func (_m *PromoCode) assignValues(columns []string, values []any) error {
|
||||||
|
if m, n := len(values), len(columns); m < n {
|
||||||
|
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||||
|
}
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case promocode.FieldID:
|
||||||
|
value, ok := values[i].(*sql.NullInt64)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field id", value)
|
||||||
|
}
|
||||||
|
_m.ID = int64(value.Int64)
|
||||||
|
case promocode.FieldCode:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field code", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Code = value.String
|
||||||
|
}
|
||||||
|
case promocode.FieldBonusAmount:
|
||||||
|
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field bonus_amount", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.BonusAmount = value.Float64
|
||||||
|
}
|
||||||
|
case promocode.FieldMaxUses:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field max_uses", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.MaxUses = int(value.Int64)
|
||||||
|
}
|
||||||
|
case promocode.FieldUsedCount:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field used_count", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.UsedCount = int(value.Int64)
|
||||||
|
}
|
||||||
|
case promocode.FieldStatus:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field status", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Status = value.String
|
||||||
|
}
|
||||||
|
case promocode.FieldExpiresAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field expires_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.ExpiresAt = new(time.Time)
|
||||||
|
*_m.ExpiresAt = value.Time
|
||||||
|
}
|
||||||
|
case promocode.FieldNotes:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field notes", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Notes = new(string)
|
||||||
|
*_m.Notes = value.String
|
||||||
|
}
|
||||||
|
case promocode.FieldCreatedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.CreatedAt = value.Time
|
||||||
|
}
|
||||||
|
case promocode.FieldUpdatedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.UpdatedAt = value.Time
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
_m.selectValues.Set(columns[i], values[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the ent.Value that was dynamically selected and assigned to the PromoCode.
|
||||||
|
// This includes values selected through modifiers, order, etc.
|
||||||
|
func (_m *PromoCode) Value(name string) (ent.Value, error) {
|
||||||
|
return _m.selectValues.Get(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryUsageRecords queries the "usage_records" edge of the PromoCode entity.
|
||||||
|
func (_m *PromoCode) QueryUsageRecords() *PromoCodeUsageQuery {
|
||||||
|
return NewPromoCodeClient(_m.config).QueryUsageRecords(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update returns a builder for updating this PromoCode.
|
||||||
|
// Note that you need to call PromoCode.Unwrap() before calling this method if this PromoCode
|
||||||
|
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||||
|
func (_m *PromoCode) Update() *PromoCodeUpdateOne {
|
||||||
|
return NewPromoCodeClient(_m.config).UpdateOne(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap unwraps the PromoCode entity that was returned from a transaction after it was closed,
|
||||||
|
// so that all future queries will be executed through the driver which created the transaction.
|
||||||
|
func (_m *PromoCode) Unwrap() *PromoCode {
|
||||||
|
_tx, ok := _m.config.driver.(*txDriver)
|
||||||
|
if !ok {
|
||||||
|
panic("ent: PromoCode is not a transactional entity")
|
||||||
|
}
|
||||||
|
_m.config.driver = _tx.drv
|
||||||
|
return _m
|
||||||
|
}
|
||||||
|
|
||||||
|
// String implements the fmt.Stringer.
|
||||||
|
func (_m *PromoCode) String() string {
|
||||||
|
var builder strings.Builder
|
||||||
|
builder.WriteString("PromoCode(")
|
||||||
|
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
||||||
|
builder.WriteString("code=")
|
||||||
|
builder.WriteString(_m.Code)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("bonus_amount=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.BonusAmount))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("max_uses=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.MaxUses))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("used_count=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.UsedCount))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("status=")
|
||||||
|
builder.WriteString(_m.Status)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.ExpiresAt; v != nil {
|
||||||
|
builder.WriteString("expires_at=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.Notes; v != nil {
|
||||||
|
builder.WriteString("notes=")
|
||||||
|
builder.WriteString(*v)
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("created_at=")
|
||||||
|
builder.WriteString(_m.CreatedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("updated_at=")
|
||||||
|
builder.WriteString(_m.UpdatedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteByte(')')
|
||||||
|
return builder.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodes is a parsable slice of PromoCode.
|
||||||
|
type PromoCodes []*PromoCode
|
||||||
165
backend/ent/promocode/promocode.go
Normal file
165
backend/ent/promocode/promocode.go
Normal file
@@ -0,0 +1,165 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package promocode
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Label holds the string label denoting the promocode type in the database.
|
||||||
|
Label = "promo_code"
|
||||||
|
// FieldID holds the string denoting the id field in the database.
|
||||||
|
FieldID = "id"
|
||||||
|
// FieldCode holds the string denoting the code field in the database.
|
||||||
|
FieldCode = "code"
|
||||||
|
// FieldBonusAmount holds the string denoting the bonus_amount field in the database.
|
||||||
|
FieldBonusAmount = "bonus_amount"
|
||||||
|
// FieldMaxUses holds the string denoting the max_uses field in the database.
|
||||||
|
FieldMaxUses = "max_uses"
|
||||||
|
// FieldUsedCount holds the string denoting the used_count field in the database.
|
||||||
|
FieldUsedCount = "used_count"
|
||||||
|
// FieldStatus holds the string denoting the status field in the database.
|
||||||
|
FieldStatus = "status"
|
||||||
|
// FieldExpiresAt holds the string denoting the expires_at field in the database.
|
||||||
|
FieldExpiresAt = "expires_at"
|
||||||
|
// FieldNotes holds the string denoting the notes field in the database.
|
||||||
|
FieldNotes = "notes"
|
||||||
|
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||||
|
FieldCreatedAt = "created_at"
|
||||||
|
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
|
||||||
|
FieldUpdatedAt = "updated_at"
|
||||||
|
// EdgeUsageRecords holds the string denoting the usage_records edge name in mutations.
|
||||||
|
EdgeUsageRecords = "usage_records"
|
||||||
|
// Table holds the table name of the promocode in the database.
|
||||||
|
Table = "promo_codes"
|
||||||
|
// UsageRecordsTable is the table that holds the usage_records relation/edge.
|
||||||
|
UsageRecordsTable = "promo_code_usages"
|
||||||
|
// UsageRecordsInverseTable is the table name for the PromoCodeUsage entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "promocodeusage" package.
|
||||||
|
UsageRecordsInverseTable = "promo_code_usages"
|
||||||
|
// UsageRecordsColumn is the table column denoting the usage_records relation/edge.
|
||||||
|
UsageRecordsColumn = "promo_code_id"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Columns holds all SQL columns for promocode fields.
|
||||||
|
var Columns = []string{
|
||||||
|
FieldID,
|
||||||
|
FieldCode,
|
||||||
|
FieldBonusAmount,
|
||||||
|
FieldMaxUses,
|
||||||
|
FieldUsedCount,
|
||||||
|
FieldStatus,
|
||||||
|
FieldExpiresAt,
|
||||||
|
FieldNotes,
|
||||||
|
FieldCreatedAt,
|
||||||
|
FieldUpdatedAt,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||||
|
func ValidColumn(column string) bool {
|
||||||
|
for i := range Columns {
|
||||||
|
if column == Columns[i] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// CodeValidator is a validator for the "code" field. It is called by the builders before save.
|
||||||
|
CodeValidator func(string) error
|
||||||
|
// DefaultBonusAmount holds the default value on creation for the "bonus_amount" field.
|
||||||
|
DefaultBonusAmount float64
|
||||||
|
// DefaultMaxUses holds the default value on creation for the "max_uses" field.
|
||||||
|
DefaultMaxUses int
|
||||||
|
// DefaultUsedCount holds the default value on creation for the "used_count" field.
|
||||||
|
DefaultUsedCount int
|
||||||
|
// DefaultStatus holds the default value on creation for the "status" field.
|
||||||
|
DefaultStatus string
|
||||||
|
// StatusValidator is a validator for the "status" field. It is called by the builders before save.
|
||||||
|
StatusValidator func(string) error
|
||||||
|
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||||
|
DefaultCreatedAt func() time.Time
|
||||||
|
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
|
||||||
|
DefaultUpdatedAt func() time.Time
|
||||||
|
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
|
||||||
|
UpdateDefaultUpdatedAt func() time.Time
|
||||||
|
)
|
||||||
|
|
||||||
|
// OrderOption defines the ordering options for the PromoCode queries.
|
||||||
|
type OrderOption func(*sql.Selector)
|
||||||
|
|
||||||
|
// ByID orders the results by the id field.
|
||||||
|
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByCode orders the results by the code field.
|
||||||
|
func ByCode(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldCode, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByBonusAmount orders the results by the bonus_amount field.
|
||||||
|
func ByBonusAmount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldBonusAmount, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByMaxUses orders the results by the max_uses field.
|
||||||
|
func ByMaxUses(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldMaxUses, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUsedCount orders the results by the used_count field.
|
||||||
|
func ByUsedCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUsedCount, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByStatus orders the results by the status field.
|
||||||
|
func ByStatus(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldStatus, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByExpiresAt orders the results by the expires_at field.
|
||||||
|
func ByExpiresAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldExpiresAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByNotes orders the results by the notes field.
|
||||||
|
func ByNotes(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldNotes, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByCreatedAt orders the results by the created_at field.
|
||||||
|
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUpdatedAt orders the results by the updated_at field.
|
||||||
|
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUsageRecordsCount orders the results by usage_records count.
|
||||||
|
func ByUsageRecordsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborsCount(s, newUsageRecordsStep(), opts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUsageRecords orders the results by usage_records terms.
|
||||||
|
func ByUsageRecords(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newUsageRecordsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func newUsageRecordsStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(UsageRecordsInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, UsageRecordsTable, UsageRecordsColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
594
backend/ent/promocode/where.go
Normal file
594
backend/ent/promocode/where.go
Normal file
@@ -0,0 +1,594 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package promocode
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ID filters vertices based on their ID field.
|
||||||
|
func ID(id int64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDEQ applies the EQ predicate on the ID field.
|
||||||
|
func IDEQ(id int64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDNEQ applies the NEQ predicate on the ID field.
|
||||||
|
func IDNEQ(id int64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDIn applies the In predicate on the ID field.
|
||||||
|
func IDIn(ids ...int64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldIn(FieldID, ids...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDNotIn applies the NotIn predicate on the ID field.
|
||||||
|
func IDNotIn(ids ...int64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNotIn(FieldID, ids...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDGT applies the GT predicate on the ID field.
|
||||||
|
func IDGT(id int64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGT(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDGTE applies the GTE predicate on the ID field.
|
||||||
|
func IDGTE(id int64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGTE(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDLT applies the LT predicate on the ID field.
|
||||||
|
func IDLT(id int64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLT(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDLTE applies the LTE predicate on the ID field.
|
||||||
|
func IDLTE(id int64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLTE(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Code applies equality check predicate on the "code" field. It's identical to CodeEQ.
|
||||||
|
func Code(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmount applies equality check predicate on the "bonus_amount" field. It's identical to BonusAmountEQ.
|
||||||
|
func BonusAmount(v float64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldBonusAmount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxUses applies equality check predicate on the "max_uses" field. It's identical to MaxUsesEQ.
|
||||||
|
func MaxUses(v int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldMaxUses, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedCount applies equality check predicate on the "used_count" field. It's identical to UsedCountEQ.
|
||||||
|
func UsedCount(v int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldUsedCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Status applies equality check predicate on the "status" field. It's identical to StatusEQ.
|
||||||
|
func Status(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpiresAt applies equality check predicate on the "expires_at" field. It's identical to ExpiresAtEQ.
|
||||||
|
func ExpiresAt(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldExpiresAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Notes applies equality check predicate on the "notes" field. It's identical to NotesEQ.
|
||||||
|
func Notes(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
||||||
|
func CreatedAt(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
|
||||||
|
func UpdatedAt(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeEQ applies the EQ predicate on the "code" field.
|
||||||
|
func CodeEQ(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeNEQ applies the NEQ predicate on the "code" field.
|
||||||
|
func CodeNEQ(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNEQ(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeIn applies the In predicate on the "code" field.
|
||||||
|
func CodeIn(vs ...string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldIn(FieldCode, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeNotIn applies the NotIn predicate on the "code" field.
|
||||||
|
func CodeNotIn(vs ...string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNotIn(FieldCode, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeGT applies the GT predicate on the "code" field.
|
||||||
|
func CodeGT(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGT(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeGTE applies the GTE predicate on the "code" field.
|
||||||
|
func CodeGTE(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGTE(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeLT applies the LT predicate on the "code" field.
|
||||||
|
func CodeLT(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLT(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeLTE applies the LTE predicate on the "code" field.
|
||||||
|
func CodeLTE(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLTE(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeContains applies the Contains predicate on the "code" field.
|
||||||
|
func CodeContains(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldContains(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeHasPrefix applies the HasPrefix predicate on the "code" field.
|
||||||
|
func CodeHasPrefix(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldHasPrefix(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeHasSuffix applies the HasSuffix predicate on the "code" field.
|
||||||
|
func CodeHasSuffix(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldHasSuffix(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeEqualFold applies the EqualFold predicate on the "code" field.
|
||||||
|
func CodeEqualFold(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEqualFold(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeContainsFold applies the ContainsFold predicate on the "code" field.
|
||||||
|
func CodeContainsFold(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldContainsFold(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmountEQ applies the EQ predicate on the "bonus_amount" field.
|
||||||
|
func BonusAmountEQ(v float64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldBonusAmount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmountNEQ applies the NEQ predicate on the "bonus_amount" field.
|
||||||
|
func BonusAmountNEQ(v float64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNEQ(FieldBonusAmount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmountIn applies the In predicate on the "bonus_amount" field.
|
||||||
|
func BonusAmountIn(vs ...float64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldIn(FieldBonusAmount, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmountNotIn applies the NotIn predicate on the "bonus_amount" field.
|
||||||
|
func BonusAmountNotIn(vs ...float64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNotIn(FieldBonusAmount, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmountGT applies the GT predicate on the "bonus_amount" field.
|
||||||
|
func BonusAmountGT(v float64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGT(FieldBonusAmount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmountGTE applies the GTE predicate on the "bonus_amount" field.
|
||||||
|
func BonusAmountGTE(v float64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGTE(FieldBonusAmount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmountLT applies the LT predicate on the "bonus_amount" field.
|
||||||
|
func BonusAmountLT(v float64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLT(FieldBonusAmount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmountLTE applies the LTE predicate on the "bonus_amount" field.
|
||||||
|
func BonusAmountLTE(v float64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLTE(FieldBonusAmount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxUsesEQ applies the EQ predicate on the "max_uses" field.
|
||||||
|
func MaxUsesEQ(v int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldMaxUses, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxUsesNEQ applies the NEQ predicate on the "max_uses" field.
|
||||||
|
func MaxUsesNEQ(v int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNEQ(FieldMaxUses, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxUsesIn applies the In predicate on the "max_uses" field.
|
||||||
|
func MaxUsesIn(vs ...int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldIn(FieldMaxUses, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxUsesNotIn applies the NotIn predicate on the "max_uses" field.
|
||||||
|
func MaxUsesNotIn(vs ...int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNotIn(FieldMaxUses, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxUsesGT applies the GT predicate on the "max_uses" field.
|
||||||
|
func MaxUsesGT(v int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGT(FieldMaxUses, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxUsesGTE applies the GTE predicate on the "max_uses" field.
|
||||||
|
func MaxUsesGTE(v int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGTE(FieldMaxUses, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxUsesLT applies the LT predicate on the "max_uses" field.
|
||||||
|
func MaxUsesLT(v int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLT(FieldMaxUses, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxUsesLTE applies the LTE predicate on the "max_uses" field.
|
||||||
|
func MaxUsesLTE(v int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLTE(FieldMaxUses, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedCountEQ applies the EQ predicate on the "used_count" field.
|
||||||
|
func UsedCountEQ(v int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldUsedCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedCountNEQ applies the NEQ predicate on the "used_count" field.
|
||||||
|
func UsedCountNEQ(v int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNEQ(FieldUsedCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedCountIn applies the In predicate on the "used_count" field.
|
||||||
|
func UsedCountIn(vs ...int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldIn(FieldUsedCount, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedCountNotIn applies the NotIn predicate on the "used_count" field.
|
||||||
|
func UsedCountNotIn(vs ...int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNotIn(FieldUsedCount, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedCountGT applies the GT predicate on the "used_count" field.
|
||||||
|
func UsedCountGT(v int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGT(FieldUsedCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedCountGTE applies the GTE predicate on the "used_count" field.
|
||||||
|
func UsedCountGTE(v int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGTE(FieldUsedCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedCountLT applies the LT predicate on the "used_count" field.
|
||||||
|
func UsedCountLT(v int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLT(FieldUsedCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedCountLTE applies the LTE predicate on the "used_count" field.
|
||||||
|
func UsedCountLTE(v int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLTE(FieldUsedCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusEQ applies the EQ predicate on the "status" field.
|
||||||
|
func StatusEQ(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusNEQ applies the NEQ predicate on the "status" field.
|
||||||
|
func StatusNEQ(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNEQ(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusIn applies the In predicate on the "status" field.
|
||||||
|
func StatusIn(vs ...string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldIn(FieldStatus, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusNotIn applies the NotIn predicate on the "status" field.
|
||||||
|
func StatusNotIn(vs ...string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNotIn(FieldStatus, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusGT applies the GT predicate on the "status" field.
|
||||||
|
func StatusGT(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGT(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusGTE applies the GTE predicate on the "status" field.
|
||||||
|
func StatusGTE(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGTE(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusLT applies the LT predicate on the "status" field.
|
||||||
|
func StatusLT(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLT(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusLTE applies the LTE predicate on the "status" field.
|
||||||
|
func StatusLTE(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLTE(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusContains applies the Contains predicate on the "status" field.
|
||||||
|
func StatusContains(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldContains(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusHasPrefix applies the HasPrefix predicate on the "status" field.
|
||||||
|
func StatusHasPrefix(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldHasPrefix(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusHasSuffix applies the HasSuffix predicate on the "status" field.
|
||||||
|
func StatusHasSuffix(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldHasSuffix(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusEqualFold applies the EqualFold predicate on the "status" field.
|
||||||
|
func StatusEqualFold(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEqualFold(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusContainsFold applies the ContainsFold predicate on the "status" field.
|
||||||
|
func StatusContainsFold(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldContainsFold(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpiresAtEQ applies the EQ predicate on the "expires_at" field.
|
||||||
|
func ExpiresAtEQ(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldExpiresAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpiresAtNEQ applies the NEQ predicate on the "expires_at" field.
|
||||||
|
func ExpiresAtNEQ(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNEQ(FieldExpiresAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpiresAtIn applies the In predicate on the "expires_at" field.
|
||||||
|
func ExpiresAtIn(vs ...time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldIn(FieldExpiresAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpiresAtNotIn applies the NotIn predicate on the "expires_at" field.
|
||||||
|
func ExpiresAtNotIn(vs ...time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNotIn(FieldExpiresAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpiresAtGT applies the GT predicate on the "expires_at" field.
|
||||||
|
func ExpiresAtGT(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGT(FieldExpiresAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpiresAtGTE applies the GTE predicate on the "expires_at" field.
|
||||||
|
func ExpiresAtGTE(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGTE(FieldExpiresAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpiresAtLT applies the LT predicate on the "expires_at" field.
|
||||||
|
func ExpiresAtLT(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLT(FieldExpiresAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpiresAtLTE applies the LTE predicate on the "expires_at" field.
|
||||||
|
func ExpiresAtLTE(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLTE(FieldExpiresAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpiresAtIsNil applies the IsNil predicate on the "expires_at" field.
|
||||||
|
func ExpiresAtIsNil() predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldIsNull(FieldExpiresAt))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpiresAtNotNil applies the NotNil predicate on the "expires_at" field.
|
||||||
|
func ExpiresAtNotNil() predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNotNull(FieldExpiresAt))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesEQ applies the EQ predicate on the "notes" field.
|
||||||
|
func NotesEQ(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesNEQ applies the NEQ predicate on the "notes" field.
|
||||||
|
func NotesNEQ(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNEQ(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesIn applies the In predicate on the "notes" field.
|
||||||
|
func NotesIn(vs ...string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldIn(FieldNotes, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesNotIn applies the NotIn predicate on the "notes" field.
|
||||||
|
func NotesNotIn(vs ...string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNotIn(FieldNotes, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesGT applies the GT predicate on the "notes" field.
|
||||||
|
func NotesGT(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGT(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesGTE applies the GTE predicate on the "notes" field.
|
||||||
|
func NotesGTE(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGTE(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesLT applies the LT predicate on the "notes" field.
|
||||||
|
func NotesLT(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLT(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesLTE applies the LTE predicate on the "notes" field.
|
||||||
|
func NotesLTE(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLTE(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesContains applies the Contains predicate on the "notes" field.
|
||||||
|
func NotesContains(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldContains(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesHasPrefix applies the HasPrefix predicate on the "notes" field.
|
||||||
|
func NotesHasPrefix(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldHasPrefix(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesHasSuffix applies the HasSuffix predicate on the "notes" field.
|
||||||
|
func NotesHasSuffix(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldHasSuffix(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesIsNil applies the IsNil predicate on the "notes" field.
|
||||||
|
func NotesIsNil() predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldIsNull(FieldNotes))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesNotNil applies the NotNil predicate on the "notes" field.
|
||||||
|
func NotesNotNil() predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNotNull(FieldNotes))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesEqualFold applies the EqualFold predicate on the "notes" field.
|
||||||
|
func NotesEqualFold(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEqualFold(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesContainsFold applies the ContainsFold predicate on the "notes" field.
|
||||||
|
func NotesContainsFold(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldContainsFold(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||||
|
func CreatedAtEQ(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
|
||||||
|
func CreatedAtNEQ(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtIn applies the In predicate on the "created_at" field.
|
||||||
|
func CreatedAtIn(vs ...time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldIn(FieldCreatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
|
||||||
|
func CreatedAtNotIn(vs ...time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNotIn(FieldCreatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtGT applies the GT predicate on the "created_at" field.
|
||||||
|
func CreatedAtGT(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGT(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
|
||||||
|
func CreatedAtGTE(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGTE(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtLT applies the LT predicate on the "created_at" field.
|
||||||
|
func CreatedAtLT(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLT(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
|
||||||
|
func CreatedAtLTE(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLTE(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtEQ(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtNEQ(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNEQ(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtIn applies the In predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtIn(vs ...time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldIn(FieldUpdatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtNotIn(vs ...time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNotIn(FieldUpdatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtGT(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGT(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtGTE(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGTE(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtLT(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLT(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtLTE(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLTE(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasUsageRecords applies the HasEdge predicate on the "usage_records" edge.
|
||||||
|
func HasUsageRecords() predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(func(s *sql.Selector) {
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, UsageRecordsTable, UsageRecordsColumn),
|
||||||
|
)
|
||||||
|
sqlgraph.HasNeighbors(s, step)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasUsageRecordsWith applies the HasEdge predicate on the "usage_records" edge with a given conditions (other predicates).
|
||||||
|
func HasUsageRecordsWith(preds ...predicate.PromoCodeUsage) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(func(s *sql.Selector) {
|
||||||
|
step := newUsageRecordsStep()
|
||||||
|
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||||
|
for _, p := range preds {
|
||||||
|
p(s)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// And groups predicates with the AND operator between them.
|
||||||
|
func And(predicates ...predicate.PromoCode) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.AndPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Or groups predicates with the OR operator between them.
|
||||||
|
func Or(predicates ...predicate.PromoCode) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.OrPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not applies the not operator on the given predicate.
|
||||||
|
func Not(p predicate.PromoCode) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.NotPredicates(p))
|
||||||
|
}
|
||||||
1081
backend/ent/promocode_create.go
Normal file
1081
backend/ent/promocode_create.go
Normal file
File diff suppressed because it is too large
Load Diff
88
backend/ent/promocode_delete.go
Normal file
88
backend/ent/promocode_delete.go
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PromoCodeDelete is the builder for deleting a PromoCode entity.
|
||||||
|
type PromoCodeDelete struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *PromoCodeMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the PromoCodeDelete builder.
|
||||||
|
func (_d *PromoCodeDelete) Where(ps ...predicate.PromoCode) *PromoCodeDelete {
|
||||||
|
_d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||||
|
func (_d *PromoCodeDelete) Exec(ctx context.Context) (int, error) {
|
||||||
|
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *PromoCodeDelete) ExecX(ctx context.Context) int {
|
||||||
|
n, err := _d.Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_d *PromoCodeDelete) sqlExec(ctx context.Context) (int, error) {
|
||||||
|
_spec := sqlgraph.NewDeleteSpec(promocode.Table, sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64))
|
||||||
|
if ps := _d.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
||||||
|
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
_d.mutation.done = true
|
||||||
|
return affected, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeDeleteOne is the builder for deleting a single PromoCode entity.
|
||||||
|
type PromoCodeDeleteOne struct {
|
||||||
|
_d *PromoCodeDelete
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the PromoCodeDelete builder.
|
||||||
|
func (_d *PromoCodeDeleteOne) Where(ps ...predicate.PromoCode) *PromoCodeDeleteOne {
|
||||||
|
_d._d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query.
|
||||||
|
func (_d *PromoCodeDeleteOne) Exec(ctx context.Context) error {
|
||||||
|
n, err := _d._d.Exec(ctx)
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
return err
|
||||||
|
case n == 0:
|
||||||
|
return &NotFoundError{promocode.Label}
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *PromoCodeDeleteOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _d.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
643
backend/ent/promocode_query.go
Normal file
643
backend/ent/promocode_query.go
Normal file
@@ -0,0 +1,643 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql/driver"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PromoCodeQuery is the builder for querying PromoCode entities.
|
||||||
|
type PromoCodeQuery struct {
|
||||||
|
config
|
||||||
|
ctx *QueryContext
|
||||||
|
order []promocode.OrderOption
|
||||||
|
inters []Interceptor
|
||||||
|
predicates []predicate.PromoCode
|
||||||
|
withUsageRecords *PromoCodeUsageQuery
|
||||||
|
modifiers []func(*sql.Selector)
|
||||||
|
// intermediate query (i.e. traversal path).
|
||||||
|
sql *sql.Selector
|
||||||
|
path func(context.Context) (*sql.Selector, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where adds a new predicate for the PromoCodeQuery builder.
|
||||||
|
func (_q *PromoCodeQuery) Where(ps ...predicate.PromoCode) *PromoCodeQuery {
|
||||||
|
_q.predicates = append(_q.predicates, ps...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Limit the number of records to be returned by this query.
|
||||||
|
func (_q *PromoCodeQuery) Limit(limit int) *PromoCodeQuery {
|
||||||
|
_q.ctx.Limit = &limit
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Offset to start from.
|
||||||
|
func (_q *PromoCodeQuery) Offset(offset int) *PromoCodeQuery {
|
||||||
|
_q.ctx.Offset = &offset
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unique configures the query builder to filter duplicate records on query.
|
||||||
|
// By default, unique is set to true, and can be disabled using this method.
|
||||||
|
func (_q *PromoCodeQuery) Unique(unique bool) *PromoCodeQuery {
|
||||||
|
_q.ctx.Unique = &unique
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Order specifies how the records should be ordered.
|
||||||
|
func (_q *PromoCodeQuery) Order(o ...promocode.OrderOption) *PromoCodeQuery {
|
||||||
|
_q.order = append(_q.order, o...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryUsageRecords chains the current query on the "usage_records" edge.
|
||||||
|
func (_q *PromoCodeQuery) QueryUsageRecords() *PromoCodeUsageQuery {
|
||||||
|
query := (&PromoCodeUsageClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(promocode.Table, promocode.FieldID, selector),
|
||||||
|
sqlgraph.To(promocodeusage.Table, promocodeusage.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, promocode.UsageRecordsTable, promocode.UsageRecordsColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// First returns the first PromoCode entity from the query.
|
||||||
|
// Returns a *NotFoundError when no PromoCode was found.
|
||||||
|
func (_q *PromoCodeQuery) First(ctx context.Context) (*PromoCode, error) {
|
||||||
|
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nil, &NotFoundError{promocode.Label}
|
||||||
|
}
|
||||||
|
return nodes[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstX is like First, but panics if an error occurs.
|
||||||
|
func (_q *PromoCodeQuery) FirstX(ctx context.Context) *PromoCode {
|
||||||
|
node, err := _q.First(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstID returns the first PromoCode ID from the query.
|
||||||
|
// Returns a *NotFoundError when no PromoCode ID was found.
|
||||||
|
func (_q *PromoCodeQuery) FirstID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
err = &NotFoundError{promocode.Label}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return ids[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||||
|
func (_q *PromoCodeQuery) FirstIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.FirstID(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only returns a single PromoCode entity found by the query, ensuring it only returns one.
|
||||||
|
// Returns a *NotSingularError when more than one PromoCode entity is found.
|
||||||
|
// Returns a *NotFoundError when no PromoCode entities are found.
|
||||||
|
func (_q *PromoCodeQuery) Only(ctx context.Context) (*PromoCode, error) {
|
||||||
|
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch len(nodes) {
|
||||||
|
case 1:
|
||||||
|
return nodes[0], nil
|
||||||
|
case 0:
|
||||||
|
return nil, &NotFoundError{promocode.Label}
|
||||||
|
default:
|
||||||
|
return nil, &NotSingularError{promocode.Label}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyX is like Only, but panics if an error occurs.
|
||||||
|
func (_q *PromoCodeQuery) OnlyX(ctx context.Context) *PromoCode {
|
||||||
|
node, err := _q.Only(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyID is like Only, but returns the only PromoCode ID in the query.
|
||||||
|
// Returns a *NotSingularError when more than one PromoCode ID is found.
|
||||||
|
// Returns a *NotFoundError when no entities are found.
|
||||||
|
func (_q *PromoCodeQuery) OnlyID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch len(ids) {
|
||||||
|
case 1:
|
||||||
|
id = ids[0]
|
||||||
|
case 0:
|
||||||
|
err = &NotFoundError{promocode.Label}
|
||||||
|
default:
|
||||||
|
err = &NotSingularError{promocode.Label}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||||
|
func (_q *PromoCodeQuery) OnlyIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.OnlyID(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// All executes the query and returns a list of PromoCodes.
|
||||||
|
func (_q *PromoCodeQuery) All(ctx context.Context) ([]*PromoCode, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
qr := querierAll[[]*PromoCode, *PromoCodeQuery]()
|
||||||
|
return withInterceptors[[]*PromoCode](ctx, _q, qr, _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllX is like All, but panics if an error occurs.
|
||||||
|
func (_q *PromoCodeQuery) AllX(ctx context.Context) []*PromoCode {
|
||||||
|
nodes, err := _q.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDs executes the query and returns a list of PromoCode IDs.
|
||||||
|
func (_q *PromoCodeQuery) IDs(ctx context.Context) (ids []int64, err error) {
|
||||||
|
if _q.ctx.Unique == nil && _q.path != nil {
|
||||||
|
_q.Unique(true)
|
||||||
|
}
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
|
||||||
|
if err = _q.Select(promocode.FieldID).Scan(ctx, &ids); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ids, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDsX is like IDs, but panics if an error occurs.
|
||||||
|
func (_q *PromoCodeQuery) IDsX(ctx context.Context) []int64 {
|
||||||
|
ids, err := _q.IDs(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return ids
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the count of the given query.
|
||||||
|
func (_q *PromoCodeQuery) Count(ctx context.Context) (int, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return withInterceptors[int](ctx, _q, querierCount[*PromoCodeQuery](), _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountX is like Count, but panics if an error occurs.
|
||||||
|
func (_q *PromoCodeQuery) CountX(ctx context.Context) int {
|
||||||
|
count, err := _q.Count(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exist returns true if the query has elements in the graph.
|
||||||
|
func (_q *PromoCodeQuery) Exist(ctx context.Context) (bool, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
||||||
|
switch _, err := _q.FirstID(ctx); {
|
||||||
|
case IsNotFound(err):
|
||||||
|
return false, nil
|
||||||
|
case err != nil:
|
||||||
|
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||||
|
default:
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExistX is like Exist, but panics if an error occurs.
|
||||||
|
func (_q *PromoCodeQuery) ExistX(ctx context.Context) bool {
|
||||||
|
exist, err := _q.Exist(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return exist
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clone returns a duplicate of the PromoCodeQuery builder, including all associated steps. It can be
|
||||||
|
// used to prepare common query builders and use them differently after the clone is made.
|
||||||
|
func (_q *PromoCodeQuery) Clone() *PromoCodeQuery {
|
||||||
|
if _q == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &PromoCodeQuery{
|
||||||
|
config: _q.config,
|
||||||
|
ctx: _q.ctx.Clone(),
|
||||||
|
order: append([]promocode.OrderOption{}, _q.order...),
|
||||||
|
inters: append([]Interceptor{}, _q.inters...),
|
||||||
|
predicates: append([]predicate.PromoCode{}, _q.predicates...),
|
||||||
|
withUsageRecords: _q.withUsageRecords.Clone(),
|
||||||
|
// clone intermediate query.
|
||||||
|
sql: _q.sql.Clone(),
|
||||||
|
path: _q.path,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithUsageRecords tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "usage_records" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *PromoCodeQuery) WithUsageRecords(opts ...func(*PromoCodeUsageQuery)) *PromoCodeQuery {
|
||||||
|
query := (&PromoCodeUsageClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withUsageRecords = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupBy is used to group vertices by one or more fields/columns.
|
||||||
|
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// Code string `json:"code,omitempty"`
|
||||||
|
// Count int `json:"count,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.PromoCode.Query().
|
||||||
|
// GroupBy(promocode.FieldCode).
|
||||||
|
// Aggregate(ent.Count()).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *PromoCodeQuery) GroupBy(field string, fields ...string) *PromoCodeGroupBy {
|
||||||
|
_q.ctx.Fields = append([]string{field}, fields...)
|
||||||
|
grbuild := &PromoCodeGroupBy{build: _q}
|
||||||
|
grbuild.flds = &_q.ctx.Fields
|
||||||
|
grbuild.label = promocode.Label
|
||||||
|
grbuild.scan = grbuild.Scan
|
||||||
|
return grbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows the selection one or more fields/columns for the given query,
|
||||||
|
// instead of selecting all fields in the entity.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// Code string `json:"code,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.PromoCode.Query().
|
||||||
|
// Select(promocode.FieldCode).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *PromoCodeQuery) Select(fields ...string) *PromoCodeSelect {
|
||||||
|
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
||||||
|
sbuild := &PromoCodeSelect{PromoCodeQuery: _q}
|
||||||
|
sbuild.label = promocode.Label
|
||||||
|
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
||||||
|
return sbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate returns a PromoCodeSelect configured with the given aggregations.
|
||||||
|
func (_q *PromoCodeQuery) Aggregate(fns ...AggregateFunc) *PromoCodeSelect {
|
||||||
|
return _q.Select().Aggregate(fns...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *PromoCodeQuery) prepareQuery(ctx context.Context) error {
|
||||||
|
for _, inter := range _q.inters {
|
||||||
|
if inter == nil {
|
||||||
|
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||||
|
}
|
||||||
|
if trv, ok := inter.(Traverser); ok {
|
||||||
|
if err := trv.Traverse(ctx, _q); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, f := range _q.ctx.Fields {
|
||||||
|
if !promocode.ValidColumn(f) {
|
||||||
|
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _q.path != nil {
|
||||||
|
prev, err := _q.path(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_q.sql = prev
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *PromoCodeQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*PromoCode, error) {
|
||||||
|
var (
|
||||||
|
nodes = []*PromoCode{}
|
||||||
|
_spec = _q.querySpec()
|
||||||
|
loadedTypes = [1]bool{
|
||||||
|
_q.withUsageRecords != nil,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||||
|
return (*PromoCode).scanValues(nil, columns)
|
||||||
|
}
|
||||||
|
_spec.Assign = func(columns []string, values []any) error {
|
||||||
|
node := &PromoCode{config: _q.config}
|
||||||
|
nodes = append(nodes, node)
|
||||||
|
node.Edges.loadedTypes = loadedTypes
|
||||||
|
return node.assignValues(columns, values)
|
||||||
|
}
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
for i := range hooks {
|
||||||
|
hooks[i](ctx, _spec)
|
||||||
|
}
|
||||||
|
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
if query := _q.withUsageRecords; query != nil {
|
||||||
|
if err := _q.loadUsageRecords(ctx, query, nodes,
|
||||||
|
func(n *PromoCode) { n.Edges.UsageRecords = []*PromoCodeUsage{} },
|
||||||
|
func(n *PromoCode, e *PromoCodeUsage) { n.Edges.UsageRecords = append(n.Edges.UsageRecords, e) }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *PromoCodeQuery) loadUsageRecords(ctx context.Context, query *PromoCodeUsageQuery, nodes []*PromoCode, init func(*PromoCode), assign func(*PromoCode, *PromoCodeUsage)) error {
|
||||||
|
fks := make([]driver.Value, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64]*PromoCode)
|
||||||
|
for i := range nodes {
|
||||||
|
fks = append(fks, nodes[i].ID)
|
||||||
|
nodeids[nodes[i].ID] = nodes[i]
|
||||||
|
if init != nil {
|
||||||
|
init(nodes[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(query.ctx.Fields) > 0 {
|
||||||
|
query.ctx.AppendFieldOnce(promocodeusage.FieldPromoCodeID)
|
||||||
|
}
|
||||||
|
query.Where(predicate.PromoCodeUsage(func(s *sql.Selector) {
|
||||||
|
s.Where(sql.InValues(s.C(promocode.UsageRecordsColumn), fks...))
|
||||||
|
}))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
fk := n.PromoCodeID
|
||||||
|
node, ok := nodeids[fk]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected referenced foreign-key "promo_code_id" returned %v for node %v`, fk, n.ID)
|
||||||
|
}
|
||||||
|
assign(node, n)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *PromoCodeQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
|
_spec := _q.querySpec()
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
_spec.Node.Columns = _q.ctx.Fields
|
||||||
|
if len(_q.ctx.Fields) > 0 {
|
||||||
|
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||||
|
}
|
||||||
|
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *PromoCodeQuery) querySpec() *sqlgraph.QuerySpec {
|
||||||
|
_spec := sqlgraph.NewQuerySpec(promocode.Table, promocode.Columns, sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64))
|
||||||
|
_spec.From = _q.sql
|
||||||
|
if unique := _q.ctx.Unique; unique != nil {
|
||||||
|
_spec.Unique = *unique
|
||||||
|
} else if _q.path != nil {
|
||||||
|
_spec.Unique = true
|
||||||
|
}
|
||||||
|
if fields := _q.ctx.Fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, promocode.FieldID)
|
||||||
|
for i := range fields {
|
||||||
|
if fields[i] != promocode.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _q.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
_spec.Limit = *limit
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
_spec.Offset = *offset
|
||||||
|
}
|
||||||
|
if ps := _q.order; len(ps) > 0 {
|
||||||
|
_spec.Order = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return _spec
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *PromoCodeQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||||
|
builder := sql.Dialect(_q.driver.Dialect())
|
||||||
|
t1 := builder.Table(promocode.Table)
|
||||||
|
columns := _q.ctx.Fields
|
||||||
|
if len(columns) == 0 {
|
||||||
|
columns = promocode.Columns
|
||||||
|
}
|
||||||
|
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||||
|
if _q.sql != nil {
|
||||||
|
selector = _q.sql
|
||||||
|
selector.Select(selector.Columns(columns...)...)
|
||||||
|
}
|
||||||
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||||
|
selector.Distinct()
|
||||||
|
}
|
||||||
|
for _, m := range _q.modifiers {
|
||||||
|
m(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.predicates {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.order {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
// limit is mandatory for offset clause. We start
|
||||||
|
// with default value, and override it below if needed.
|
||||||
|
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
selector.Limit(*limit)
|
||||||
|
}
|
||||||
|
return selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||||
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||||
|
// either committed or rolled-back.
|
||||||
|
func (_q *PromoCodeQuery) ForUpdate(opts ...sql.LockOption) *PromoCodeQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForUpdate(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||||
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||||
|
// until your transaction commits.
|
||||||
|
func (_q *PromoCodeQuery) ForShare(opts ...sql.LockOption) *PromoCodeQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForShare(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeGroupBy is the group-by builder for PromoCode entities.
|
||||||
|
type PromoCodeGroupBy struct {
|
||||||
|
selector
|
||||||
|
build *PromoCodeQuery
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the group-by query.
|
||||||
|
func (_g *PromoCodeGroupBy) Aggregate(fns ...AggregateFunc) *PromoCodeGroupBy {
|
||||||
|
_g.fns = append(_g.fns, fns...)
|
||||||
|
return _g
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_g *PromoCodeGroupBy) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
||||||
|
if err := _g.build.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*PromoCodeQuery, *PromoCodeGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_g *PromoCodeGroupBy) sqlScan(ctx context.Context, root *PromoCodeQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx).Select()
|
||||||
|
aggregation := make([]string, 0, len(_g.fns))
|
||||||
|
for _, fn := range _g.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
if len(selector.SelectedColumns()) == 0 {
|
||||||
|
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
||||||
|
for _, f := range *_g.flds {
|
||||||
|
columns = append(columns, selector.C(f))
|
||||||
|
}
|
||||||
|
columns = append(columns, aggregation...)
|
||||||
|
selector.Select(columns...)
|
||||||
|
}
|
||||||
|
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeSelect is the builder for selecting fields of PromoCode entities.
|
||||||
|
type PromoCodeSelect struct {
|
||||||
|
*PromoCodeQuery
|
||||||
|
selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the selector query.
|
||||||
|
func (_s *PromoCodeSelect) Aggregate(fns ...AggregateFunc) *PromoCodeSelect {
|
||||||
|
_s.fns = append(_s.fns, fns...)
|
||||||
|
return _s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_s *PromoCodeSelect) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
||||||
|
if err := _s.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*PromoCodeQuery, *PromoCodeSelect](ctx, _s.PromoCodeQuery, _s, _s.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_s *PromoCodeSelect) sqlScan(ctx context.Context, root *PromoCodeQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx)
|
||||||
|
aggregation := make([]string, 0, len(_s.fns))
|
||||||
|
for _, fn := range _s.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
switch n := len(*_s.selector.flds); {
|
||||||
|
case n == 0 && len(aggregation) > 0:
|
||||||
|
selector.Select(aggregation...)
|
||||||
|
case n != 0 && len(aggregation) > 0:
|
||||||
|
selector.AppendSelect(aggregation...)
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
745
backend/ent/promocode_update.go
Normal file
745
backend/ent/promocode_update.go
Normal file
@@ -0,0 +1,745 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PromoCodeUpdate is the builder for updating PromoCode entities.
|
||||||
|
type PromoCodeUpdate struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *PromoCodeMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the PromoCodeUpdate builder.
|
||||||
|
func (_u *PromoCodeUpdate) Where(ps ...predicate.PromoCode) *PromoCodeUpdate {
|
||||||
|
_u.mutation.Where(ps...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetCode sets the "code" field.
|
||||||
|
func (_u *PromoCodeUpdate) SetCode(v string) *PromoCodeUpdate {
|
||||||
|
_u.mutation.SetCode(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableCode sets the "code" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUpdate) SetNillableCode(v *string) *PromoCodeUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetCode(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBonusAmount sets the "bonus_amount" field.
|
||||||
|
func (_u *PromoCodeUpdate) SetBonusAmount(v float64) *PromoCodeUpdate {
|
||||||
|
_u.mutation.ResetBonusAmount()
|
||||||
|
_u.mutation.SetBonusAmount(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableBonusAmount sets the "bonus_amount" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUpdate) SetNillableBonusAmount(v *float64) *PromoCodeUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetBonusAmount(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddBonusAmount adds value to the "bonus_amount" field.
|
||||||
|
func (_u *PromoCodeUpdate) AddBonusAmount(v float64) *PromoCodeUpdate {
|
||||||
|
_u.mutation.AddBonusAmount(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetMaxUses sets the "max_uses" field.
|
||||||
|
func (_u *PromoCodeUpdate) SetMaxUses(v int) *PromoCodeUpdate {
|
||||||
|
_u.mutation.ResetMaxUses()
|
||||||
|
_u.mutation.SetMaxUses(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableMaxUses sets the "max_uses" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUpdate) SetNillableMaxUses(v *int) *PromoCodeUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetMaxUses(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddMaxUses adds value to the "max_uses" field.
|
||||||
|
func (_u *PromoCodeUpdate) AddMaxUses(v int) *PromoCodeUpdate {
|
||||||
|
_u.mutation.AddMaxUses(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsedCount sets the "used_count" field.
|
||||||
|
func (_u *PromoCodeUpdate) SetUsedCount(v int) *PromoCodeUpdate {
|
||||||
|
_u.mutation.ResetUsedCount()
|
||||||
|
_u.mutation.SetUsedCount(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUsedCount sets the "used_count" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUpdate) SetNillableUsedCount(v *int) *PromoCodeUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUsedCount(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUsedCount adds value to the "used_count" field.
|
||||||
|
func (_u *PromoCodeUpdate) AddUsedCount(v int) *PromoCodeUpdate {
|
||||||
|
_u.mutation.AddUsedCount(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetStatus sets the "status" field.
|
||||||
|
func (_u *PromoCodeUpdate) SetStatus(v string) *PromoCodeUpdate {
|
||||||
|
_u.mutation.SetStatus(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableStatus sets the "status" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUpdate) SetNillableStatus(v *string) *PromoCodeUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetStatus(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetExpiresAt sets the "expires_at" field.
|
||||||
|
func (_u *PromoCodeUpdate) SetExpiresAt(v time.Time) *PromoCodeUpdate {
|
||||||
|
_u.mutation.SetExpiresAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUpdate) SetNillableExpiresAt(v *time.Time) *PromoCodeUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetExpiresAt(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearExpiresAt clears the value of the "expires_at" field.
|
||||||
|
func (_u *PromoCodeUpdate) ClearExpiresAt() *PromoCodeUpdate {
|
||||||
|
_u.mutation.ClearExpiresAt()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNotes sets the "notes" field.
|
||||||
|
func (_u *PromoCodeUpdate) SetNotes(v string) *PromoCodeUpdate {
|
||||||
|
_u.mutation.SetNotes(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableNotes sets the "notes" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUpdate) SetNillableNotes(v *string) *PromoCodeUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetNotes(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearNotes clears the value of the "notes" field.
|
||||||
|
func (_u *PromoCodeUpdate) ClearNotes() *PromoCodeUpdate {
|
||||||
|
_u.mutation.ClearNotes()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUpdatedAt sets the "updated_at" field.
|
||||||
|
func (_u *PromoCodeUpdate) SetUpdatedAt(v time.Time) *PromoCodeUpdate {
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUsageRecordIDs adds the "usage_records" edge to the PromoCodeUsage entity by IDs.
|
||||||
|
func (_u *PromoCodeUpdate) AddUsageRecordIDs(ids ...int64) *PromoCodeUpdate {
|
||||||
|
_u.mutation.AddUsageRecordIDs(ids...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUsageRecords adds the "usage_records" edges to the PromoCodeUsage entity.
|
||||||
|
func (_u *PromoCodeUpdate) AddUsageRecords(v ...*PromoCodeUsage) *PromoCodeUpdate {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _u.AddUsageRecordIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the PromoCodeMutation object of the builder.
|
||||||
|
func (_u *PromoCodeUpdate) Mutation() *PromoCodeMutation {
|
||||||
|
return _u.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearUsageRecords clears all "usage_records" edges to the PromoCodeUsage entity.
|
||||||
|
func (_u *PromoCodeUpdate) ClearUsageRecords() *PromoCodeUpdate {
|
||||||
|
_u.mutation.ClearUsageRecords()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveUsageRecordIDs removes the "usage_records" edge to PromoCodeUsage entities by IDs.
|
||||||
|
func (_u *PromoCodeUpdate) RemoveUsageRecordIDs(ids ...int64) *PromoCodeUpdate {
|
||||||
|
_u.mutation.RemoveUsageRecordIDs(ids...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveUsageRecords removes "usage_records" edges to PromoCodeUsage entities.
|
||||||
|
func (_u *PromoCodeUpdate) RemoveUsageRecords(v ...*PromoCodeUsage) *PromoCodeUpdate {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _u.RemoveUsageRecordIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||||
|
func (_u *PromoCodeUpdate) Save(ctx context.Context) (int, error) {
|
||||||
|
_u.defaults()
|
||||||
|
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_u *PromoCodeUpdate) SaveX(ctx context.Context) int {
|
||||||
|
affected, err := _u.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return affected
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (_u *PromoCodeUpdate) Exec(ctx context.Context) error {
|
||||||
|
_, err := _u.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_u *PromoCodeUpdate) ExecX(ctx context.Context) {
|
||||||
|
if err := _u.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaults sets the default values of the builder before save.
|
||||||
|
func (_u *PromoCodeUpdate) defaults() {
|
||||||
|
if _, ok := _u.mutation.UpdatedAt(); !ok {
|
||||||
|
v := promocode.UpdateDefaultUpdatedAt()
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_u *PromoCodeUpdate) check() error {
|
||||||
|
if v, ok := _u.mutation.Code(); ok {
|
||||||
|
if err := promocode.CodeValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "code", err: fmt.Errorf(`ent: validator failed for field "PromoCode.code": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.Status(); ok {
|
||||||
|
if err := promocode.StatusValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "PromoCode.status": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_u *PromoCodeUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
||||||
|
if err := _u.check(); err != nil {
|
||||||
|
return _node, err
|
||||||
|
}
|
||||||
|
_spec := sqlgraph.NewUpdateSpec(promocode.Table, promocode.Columns, sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64))
|
||||||
|
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Code(); ok {
|
||||||
|
_spec.SetField(promocode.FieldCode, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.BonusAmount(); ok {
|
||||||
|
_spec.SetField(promocode.FieldBonusAmount, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedBonusAmount(); ok {
|
||||||
|
_spec.AddField(promocode.FieldBonusAmount, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.MaxUses(); ok {
|
||||||
|
_spec.SetField(promocode.FieldMaxUses, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedMaxUses(); ok {
|
||||||
|
_spec.AddField(promocode.FieldMaxUses, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.UsedCount(); ok {
|
||||||
|
_spec.SetField(promocode.FieldUsedCount, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedUsedCount(); ok {
|
||||||
|
_spec.AddField(promocode.FieldUsedCount, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Status(); ok {
|
||||||
|
_spec.SetField(promocode.FieldStatus, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.ExpiresAt(); ok {
|
||||||
|
_spec.SetField(promocode.FieldExpiresAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.ExpiresAtCleared() {
|
||||||
|
_spec.ClearField(promocode.FieldExpiresAt, field.TypeTime)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Notes(); ok {
|
||||||
|
_spec.SetField(promocode.FieldNotes, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.NotesCleared() {
|
||||||
|
_spec.ClearField(promocode.FieldNotes, field.TypeString)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.UpdatedAt(); ok {
|
||||||
|
_spec.SetField(promocode.FieldUpdatedAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.UsageRecordsCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: promocode.UsageRecordsTable,
|
||||||
|
Columns: []string{promocode.UsageRecordsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.RemovedUsageRecordsIDs(); len(nodes) > 0 && !_u.mutation.UsageRecordsCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: promocode.UsageRecordsTable,
|
||||||
|
Columns: []string{promocode.UsageRecordsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.UsageRecordsIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: promocode.UsageRecordsTable,
|
||||||
|
Columns: []string{promocode.UsageRecordsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil {
|
||||||
|
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||||
|
err = &NotFoundError{promocode.Label}
|
||||||
|
} else if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
_u.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeUpdateOne is the builder for updating a single PromoCode entity.
|
||||||
|
type PromoCodeUpdateOne struct {
|
||||||
|
config
|
||||||
|
fields []string
|
||||||
|
hooks []Hook
|
||||||
|
mutation *PromoCodeMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetCode sets the "code" field.
|
||||||
|
func (_u *PromoCodeUpdateOne) SetCode(v string) *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.SetCode(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableCode sets the "code" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUpdateOne) SetNillableCode(v *string) *PromoCodeUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetCode(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBonusAmount sets the "bonus_amount" field.
|
||||||
|
func (_u *PromoCodeUpdateOne) SetBonusAmount(v float64) *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.ResetBonusAmount()
|
||||||
|
_u.mutation.SetBonusAmount(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableBonusAmount sets the "bonus_amount" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUpdateOne) SetNillableBonusAmount(v *float64) *PromoCodeUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetBonusAmount(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddBonusAmount adds value to the "bonus_amount" field.
|
||||||
|
func (_u *PromoCodeUpdateOne) AddBonusAmount(v float64) *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.AddBonusAmount(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetMaxUses sets the "max_uses" field.
|
||||||
|
func (_u *PromoCodeUpdateOne) SetMaxUses(v int) *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.ResetMaxUses()
|
||||||
|
_u.mutation.SetMaxUses(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableMaxUses sets the "max_uses" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUpdateOne) SetNillableMaxUses(v *int) *PromoCodeUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetMaxUses(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddMaxUses adds value to the "max_uses" field.
|
||||||
|
func (_u *PromoCodeUpdateOne) AddMaxUses(v int) *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.AddMaxUses(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsedCount sets the "used_count" field.
|
||||||
|
func (_u *PromoCodeUpdateOne) SetUsedCount(v int) *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.ResetUsedCount()
|
||||||
|
_u.mutation.SetUsedCount(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUsedCount sets the "used_count" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUpdateOne) SetNillableUsedCount(v *int) *PromoCodeUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUsedCount(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUsedCount adds value to the "used_count" field.
|
||||||
|
func (_u *PromoCodeUpdateOne) AddUsedCount(v int) *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.AddUsedCount(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetStatus sets the "status" field.
|
||||||
|
func (_u *PromoCodeUpdateOne) SetStatus(v string) *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.SetStatus(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableStatus sets the "status" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUpdateOne) SetNillableStatus(v *string) *PromoCodeUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetStatus(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetExpiresAt sets the "expires_at" field.
|
||||||
|
func (_u *PromoCodeUpdateOne) SetExpiresAt(v time.Time) *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.SetExpiresAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUpdateOne) SetNillableExpiresAt(v *time.Time) *PromoCodeUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetExpiresAt(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearExpiresAt clears the value of the "expires_at" field.
|
||||||
|
func (_u *PromoCodeUpdateOne) ClearExpiresAt() *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.ClearExpiresAt()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNotes sets the "notes" field.
|
||||||
|
func (_u *PromoCodeUpdateOne) SetNotes(v string) *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.SetNotes(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableNotes sets the "notes" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUpdateOne) SetNillableNotes(v *string) *PromoCodeUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetNotes(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearNotes clears the value of the "notes" field.
|
||||||
|
func (_u *PromoCodeUpdateOne) ClearNotes() *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.ClearNotes()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUpdatedAt sets the "updated_at" field.
|
||||||
|
func (_u *PromoCodeUpdateOne) SetUpdatedAt(v time.Time) *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUsageRecordIDs adds the "usage_records" edge to the PromoCodeUsage entity by IDs.
|
||||||
|
func (_u *PromoCodeUpdateOne) AddUsageRecordIDs(ids ...int64) *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.AddUsageRecordIDs(ids...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUsageRecords adds the "usage_records" edges to the PromoCodeUsage entity.
|
||||||
|
func (_u *PromoCodeUpdateOne) AddUsageRecords(v ...*PromoCodeUsage) *PromoCodeUpdateOne {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _u.AddUsageRecordIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the PromoCodeMutation object of the builder.
|
||||||
|
func (_u *PromoCodeUpdateOne) Mutation() *PromoCodeMutation {
|
||||||
|
return _u.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearUsageRecords clears all "usage_records" edges to the PromoCodeUsage entity.
|
||||||
|
func (_u *PromoCodeUpdateOne) ClearUsageRecords() *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.ClearUsageRecords()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveUsageRecordIDs removes the "usage_records" edge to PromoCodeUsage entities by IDs.
|
||||||
|
func (_u *PromoCodeUpdateOne) RemoveUsageRecordIDs(ids ...int64) *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.RemoveUsageRecordIDs(ids...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveUsageRecords removes "usage_records" edges to PromoCodeUsage entities.
|
||||||
|
func (_u *PromoCodeUpdateOne) RemoveUsageRecords(v ...*PromoCodeUsage) *PromoCodeUpdateOne {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _u.RemoveUsageRecordIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the PromoCodeUpdate builder.
|
||||||
|
func (_u *PromoCodeUpdateOne) Where(ps ...predicate.PromoCode) *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.Where(ps...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||||
|
// The default is selecting all fields defined in the entity schema.
|
||||||
|
func (_u *PromoCodeUpdateOne) Select(field string, fields ...string) *PromoCodeUpdateOne {
|
||||||
|
_u.fields = append([]string{field}, fields...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save executes the query and returns the updated PromoCode entity.
|
||||||
|
func (_u *PromoCodeUpdateOne) Save(ctx context.Context) (*PromoCode, error) {
|
||||||
|
_u.defaults()
|
||||||
|
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_u *PromoCodeUpdateOne) SaveX(ctx context.Context) *PromoCode {
|
||||||
|
node, err := _u.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query on the entity.
|
||||||
|
func (_u *PromoCodeUpdateOne) Exec(ctx context.Context) error {
|
||||||
|
_, err := _u.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_u *PromoCodeUpdateOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _u.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaults sets the default values of the builder before save.
|
||||||
|
func (_u *PromoCodeUpdateOne) defaults() {
|
||||||
|
if _, ok := _u.mutation.UpdatedAt(); !ok {
|
||||||
|
v := promocode.UpdateDefaultUpdatedAt()
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_u *PromoCodeUpdateOne) check() error {
|
||||||
|
if v, ok := _u.mutation.Code(); ok {
|
||||||
|
if err := promocode.CodeValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "code", err: fmt.Errorf(`ent: validator failed for field "PromoCode.code": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.Status(); ok {
|
||||||
|
if err := promocode.StatusValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "PromoCode.status": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_u *PromoCodeUpdateOne) sqlSave(ctx context.Context) (_node *PromoCode, err error) {
|
||||||
|
if err := _u.check(); err != nil {
|
||||||
|
return _node, err
|
||||||
|
}
|
||||||
|
_spec := sqlgraph.NewUpdateSpec(promocode.Table, promocode.Columns, sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64))
|
||||||
|
id, ok := _u.mutation.ID()
|
||||||
|
if !ok {
|
||||||
|
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "PromoCode.id" for update`)}
|
||||||
|
}
|
||||||
|
_spec.Node.ID.Value = id
|
||||||
|
if fields := _u.fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, promocode.FieldID)
|
||||||
|
for _, f := range fields {
|
||||||
|
if !promocode.ValidColumn(f) {
|
||||||
|
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
if f != promocode.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Code(); ok {
|
||||||
|
_spec.SetField(promocode.FieldCode, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.BonusAmount(); ok {
|
||||||
|
_spec.SetField(promocode.FieldBonusAmount, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedBonusAmount(); ok {
|
||||||
|
_spec.AddField(promocode.FieldBonusAmount, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.MaxUses(); ok {
|
||||||
|
_spec.SetField(promocode.FieldMaxUses, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedMaxUses(); ok {
|
||||||
|
_spec.AddField(promocode.FieldMaxUses, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.UsedCount(); ok {
|
||||||
|
_spec.SetField(promocode.FieldUsedCount, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedUsedCount(); ok {
|
||||||
|
_spec.AddField(promocode.FieldUsedCount, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Status(); ok {
|
||||||
|
_spec.SetField(promocode.FieldStatus, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.ExpiresAt(); ok {
|
||||||
|
_spec.SetField(promocode.FieldExpiresAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.ExpiresAtCleared() {
|
||||||
|
_spec.ClearField(promocode.FieldExpiresAt, field.TypeTime)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Notes(); ok {
|
||||||
|
_spec.SetField(promocode.FieldNotes, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.NotesCleared() {
|
||||||
|
_spec.ClearField(promocode.FieldNotes, field.TypeString)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.UpdatedAt(); ok {
|
||||||
|
_spec.SetField(promocode.FieldUpdatedAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.UsageRecordsCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: promocode.UsageRecordsTable,
|
||||||
|
Columns: []string{promocode.UsageRecordsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.RemovedUsageRecordsIDs(); len(nodes) > 0 && !_u.mutation.UsageRecordsCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: promocode.UsageRecordsTable,
|
||||||
|
Columns: []string{promocode.UsageRecordsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.UsageRecordsIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: promocode.UsageRecordsTable,
|
||||||
|
Columns: []string{promocode.UsageRecordsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
_node = &PromoCode{config: _u.config}
|
||||||
|
_spec.Assign = _node.assignValues
|
||||||
|
_spec.ScanValues = _node.scanValues
|
||||||
|
if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil {
|
||||||
|
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||||
|
err = &NotFoundError{promocode.Label}
|
||||||
|
} else if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_u.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
187
backend/ent/promocodeusage.go
Normal file
187
backend/ent/promocodeusage.go
Normal file
@@ -0,0 +1,187 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PromoCodeUsage is the model entity for the PromoCodeUsage schema.
|
||||||
|
type PromoCodeUsage struct {
|
||||||
|
config `json:"-"`
|
||||||
|
// ID of the ent.
|
||||||
|
ID int64 `json:"id,omitempty"`
|
||||||
|
// 优惠码ID
|
||||||
|
PromoCodeID int64 `json:"promo_code_id,omitempty"`
|
||||||
|
// 使用用户ID
|
||||||
|
UserID int64 `json:"user_id,omitempty"`
|
||||||
|
// 实际赠送金额
|
||||||
|
BonusAmount float64 `json:"bonus_amount,omitempty"`
|
||||||
|
// 使用时间
|
||||||
|
UsedAt time.Time `json:"used_at,omitempty"`
|
||||||
|
// Edges holds the relations/edges for other nodes in the graph.
|
||||||
|
// The values are being populated by the PromoCodeUsageQuery when eager-loading is set.
|
||||||
|
Edges PromoCodeUsageEdges `json:"edges"`
|
||||||
|
selectValues sql.SelectValues
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeUsageEdges holds the relations/edges for other nodes in the graph.
|
||||||
|
type PromoCodeUsageEdges struct {
|
||||||
|
// PromoCode holds the value of the promo_code edge.
|
||||||
|
PromoCode *PromoCode `json:"promo_code,omitempty"`
|
||||||
|
// User holds the value of the user edge.
|
||||||
|
User *User `json:"user,omitempty"`
|
||||||
|
// loadedTypes holds the information for reporting if a
|
||||||
|
// type was loaded (or requested) in eager-loading or not.
|
||||||
|
loadedTypes [2]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeOrErr returns the PromoCode value or an error if the edge
|
||||||
|
// was not loaded in eager-loading, or loaded but was not found.
|
||||||
|
func (e PromoCodeUsageEdges) PromoCodeOrErr() (*PromoCode, error) {
|
||||||
|
if e.PromoCode != nil {
|
||||||
|
return e.PromoCode, nil
|
||||||
|
} else if e.loadedTypes[0] {
|
||||||
|
return nil, &NotFoundError{label: promocode.Label}
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "promo_code"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserOrErr returns the User value or an error if the edge
|
||||||
|
// was not loaded in eager-loading, or loaded but was not found.
|
||||||
|
func (e PromoCodeUsageEdges) UserOrErr() (*User, error) {
|
||||||
|
if e.User != nil {
|
||||||
|
return e.User, nil
|
||||||
|
} else if e.loadedTypes[1] {
|
||||||
|
return nil, &NotFoundError{label: user.Label}
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "user"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanValues returns the types for scanning values from sql.Rows.
|
||||||
|
func (*PromoCodeUsage) scanValues(columns []string) ([]any, error) {
|
||||||
|
values := make([]any, len(columns))
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case promocodeusage.FieldBonusAmount:
|
||||||
|
values[i] = new(sql.NullFloat64)
|
||||||
|
case promocodeusage.FieldID, promocodeusage.FieldPromoCodeID, promocodeusage.FieldUserID:
|
||||||
|
values[i] = new(sql.NullInt64)
|
||||||
|
case promocodeusage.FieldUsedAt:
|
||||||
|
values[i] = new(sql.NullTime)
|
||||||
|
default:
|
||||||
|
values[i] = new(sql.UnknownType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||||
|
// to the PromoCodeUsage fields.
|
||||||
|
func (_m *PromoCodeUsage) assignValues(columns []string, values []any) error {
|
||||||
|
if m, n := len(values), len(columns); m < n {
|
||||||
|
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||||
|
}
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case promocodeusage.FieldID:
|
||||||
|
value, ok := values[i].(*sql.NullInt64)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field id", value)
|
||||||
|
}
|
||||||
|
_m.ID = int64(value.Int64)
|
||||||
|
case promocodeusage.FieldPromoCodeID:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field promo_code_id", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.PromoCodeID = value.Int64
|
||||||
|
}
|
||||||
|
case promocodeusage.FieldUserID:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field user_id", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.UserID = value.Int64
|
||||||
|
}
|
||||||
|
case promocodeusage.FieldBonusAmount:
|
||||||
|
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field bonus_amount", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.BonusAmount = value.Float64
|
||||||
|
}
|
||||||
|
case promocodeusage.FieldUsedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field used_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.UsedAt = value.Time
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
_m.selectValues.Set(columns[i], values[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the ent.Value that was dynamically selected and assigned to the PromoCodeUsage.
|
||||||
|
// This includes values selected through modifiers, order, etc.
|
||||||
|
func (_m *PromoCodeUsage) Value(name string) (ent.Value, error) {
|
||||||
|
return _m.selectValues.Get(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryPromoCode queries the "promo_code" edge of the PromoCodeUsage entity.
|
||||||
|
func (_m *PromoCodeUsage) QueryPromoCode() *PromoCodeQuery {
|
||||||
|
return NewPromoCodeUsageClient(_m.config).QueryPromoCode(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryUser queries the "user" edge of the PromoCodeUsage entity.
|
||||||
|
func (_m *PromoCodeUsage) QueryUser() *UserQuery {
|
||||||
|
return NewPromoCodeUsageClient(_m.config).QueryUser(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update returns a builder for updating this PromoCodeUsage.
|
||||||
|
// Note that you need to call PromoCodeUsage.Unwrap() before calling this method if this PromoCodeUsage
|
||||||
|
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||||
|
func (_m *PromoCodeUsage) Update() *PromoCodeUsageUpdateOne {
|
||||||
|
return NewPromoCodeUsageClient(_m.config).UpdateOne(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap unwraps the PromoCodeUsage entity that was returned from a transaction after it was closed,
|
||||||
|
// so that all future queries will be executed through the driver which created the transaction.
|
||||||
|
func (_m *PromoCodeUsage) Unwrap() *PromoCodeUsage {
|
||||||
|
_tx, ok := _m.config.driver.(*txDriver)
|
||||||
|
if !ok {
|
||||||
|
panic("ent: PromoCodeUsage is not a transactional entity")
|
||||||
|
}
|
||||||
|
_m.config.driver = _tx.drv
|
||||||
|
return _m
|
||||||
|
}
|
||||||
|
|
||||||
|
// String implements the fmt.Stringer.
|
||||||
|
func (_m *PromoCodeUsage) String() string {
|
||||||
|
var builder strings.Builder
|
||||||
|
builder.WriteString("PromoCodeUsage(")
|
||||||
|
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
||||||
|
builder.WriteString("promo_code_id=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.PromoCodeID))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("user_id=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.UserID))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("bonus_amount=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.BonusAmount))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("used_at=")
|
||||||
|
builder.WriteString(_m.UsedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteByte(')')
|
||||||
|
return builder.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeUsages is a parsable slice of PromoCodeUsage.
|
||||||
|
type PromoCodeUsages []*PromoCodeUsage
|
||||||
125
backend/ent/promocodeusage/promocodeusage.go
Normal file
125
backend/ent/promocodeusage/promocodeusage.go
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package promocodeusage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Label holds the string label denoting the promocodeusage type in the database.
|
||||||
|
Label = "promo_code_usage"
|
||||||
|
// FieldID holds the string denoting the id field in the database.
|
||||||
|
FieldID = "id"
|
||||||
|
// FieldPromoCodeID holds the string denoting the promo_code_id field in the database.
|
||||||
|
FieldPromoCodeID = "promo_code_id"
|
||||||
|
// FieldUserID holds the string denoting the user_id field in the database.
|
||||||
|
FieldUserID = "user_id"
|
||||||
|
// FieldBonusAmount holds the string denoting the bonus_amount field in the database.
|
||||||
|
FieldBonusAmount = "bonus_amount"
|
||||||
|
// FieldUsedAt holds the string denoting the used_at field in the database.
|
||||||
|
FieldUsedAt = "used_at"
|
||||||
|
// EdgePromoCode holds the string denoting the promo_code edge name in mutations.
|
||||||
|
EdgePromoCode = "promo_code"
|
||||||
|
// EdgeUser holds the string denoting the user edge name in mutations.
|
||||||
|
EdgeUser = "user"
|
||||||
|
// Table holds the table name of the promocodeusage in the database.
|
||||||
|
Table = "promo_code_usages"
|
||||||
|
// PromoCodeTable is the table that holds the promo_code relation/edge.
|
||||||
|
PromoCodeTable = "promo_code_usages"
|
||||||
|
// PromoCodeInverseTable is the table name for the PromoCode entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "promocode" package.
|
||||||
|
PromoCodeInverseTable = "promo_codes"
|
||||||
|
// PromoCodeColumn is the table column denoting the promo_code relation/edge.
|
||||||
|
PromoCodeColumn = "promo_code_id"
|
||||||
|
// UserTable is the table that holds the user relation/edge.
|
||||||
|
UserTable = "promo_code_usages"
|
||||||
|
// UserInverseTable is the table name for the User entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "user" package.
|
||||||
|
UserInverseTable = "users"
|
||||||
|
// UserColumn is the table column denoting the user relation/edge.
|
||||||
|
UserColumn = "user_id"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Columns holds all SQL columns for promocodeusage fields.
|
||||||
|
var Columns = []string{
|
||||||
|
FieldID,
|
||||||
|
FieldPromoCodeID,
|
||||||
|
FieldUserID,
|
||||||
|
FieldBonusAmount,
|
||||||
|
FieldUsedAt,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||||
|
func ValidColumn(column string) bool {
|
||||||
|
for i := range Columns {
|
||||||
|
if column == Columns[i] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// DefaultUsedAt holds the default value on creation for the "used_at" field.
|
||||||
|
DefaultUsedAt func() time.Time
|
||||||
|
)
|
||||||
|
|
||||||
|
// OrderOption defines the ordering options for the PromoCodeUsage queries.
|
||||||
|
type OrderOption func(*sql.Selector)
|
||||||
|
|
||||||
|
// ByID orders the results by the id field.
|
||||||
|
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByPromoCodeID orders the results by the promo_code_id field.
|
||||||
|
func ByPromoCodeID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldPromoCodeID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUserID orders the results by the user_id field.
|
||||||
|
func ByUserID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUserID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByBonusAmount orders the results by the bonus_amount field.
|
||||||
|
func ByBonusAmount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldBonusAmount, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUsedAt orders the results by the used_at field.
|
||||||
|
func ByUsedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUsedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByPromoCodeField orders the results by promo_code field.
|
||||||
|
func ByPromoCodeField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newPromoCodeStep(), sql.OrderByField(field, opts...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUserField orders the results by user field.
|
||||||
|
func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func newPromoCodeStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(PromoCodeInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, PromoCodeTable, PromoCodeColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func newUserStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(UserInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
257
backend/ent/promocodeusage/where.go
Normal file
257
backend/ent/promocodeusage/where.go
Normal file
@@ -0,0 +1,257 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package promocodeusage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ID filters vertices based on their ID field.
|
||||||
|
func ID(id int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDEQ applies the EQ predicate on the ID field.
|
||||||
|
func IDEQ(id int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDNEQ applies the NEQ predicate on the ID field.
|
||||||
|
func IDNEQ(id int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldNEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDIn applies the In predicate on the ID field.
|
||||||
|
func IDIn(ids ...int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldIn(FieldID, ids...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDNotIn applies the NotIn predicate on the ID field.
|
||||||
|
func IDNotIn(ids ...int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldNotIn(FieldID, ids...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDGT applies the GT predicate on the ID field.
|
||||||
|
func IDGT(id int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldGT(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDGTE applies the GTE predicate on the ID field.
|
||||||
|
func IDGTE(id int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldGTE(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDLT applies the LT predicate on the ID field.
|
||||||
|
func IDLT(id int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldLT(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDLTE applies the LTE predicate on the ID field.
|
||||||
|
func IDLTE(id int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldLTE(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeID applies equality check predicate on the "promo_code_id" field. It's identical to PromoCodeIDEQ.
|
||||||
|
func PromoCodeID(v int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldEQ(FieldPromoCodeID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserID applies equality check predicate on the "user_id" field. It's identical to UserIDEQ.
|
||||||
|
func UserID(v int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldEQ(FieldUserID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmount applies equality check predicate on the "bonus_amount" field. It's identical to BonusAmountEQ.
|
||||||
|
func BonusAmount(v float64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldEQ(FieldBonusAmount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedAt applies equality check predicate on the "used_at" field. It's identical to UsedAtEQ.
|
||||||
|
func UsedAt(v time.Time) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldEQ(FieldUsedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeIDEQ applies the EQ predicate on the "promo_code_id" field.
|
||||||
|
func PromoCodeIDEQ(v int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldEQ(FieldPromoCodeID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeIDNEQ applies the NEQ predicate on the "promo_code_id" field.
|
||||||
|
func PromoCodeIDNEQ(v int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldNEQ(FieldPromoCodeID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeIDIn applies the In predicate on the "promo_code_id" field.
|
||||||
|
func PromoCodeIDIn(vs ...int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldIn(FieldPromoCodeID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeIDNotIn applies the NotIn predicate on the "promo_code_id" field.
|
||||||
|
func PromoCodeIDNotIn(vs ...int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldNotIn(FieldPromoCodeID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserIDEQ applies the EQ predicate on the "user_id" field.
|
||||||
|
func UserIDEQ(v int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldEQ(FieldUserID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserIDNEQ applies the NEQ predicate on the "user_id" field.
|
||||||
|
func UserIDNEQ(v int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldNEQ(FieldUserID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserIDIn applies the In predicate on the "user_id" field.
|
||||||
|
func UserIDIn(vs ...int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldIn(FieldUserID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserIDNotIn applies the NotIn predicate on the "user_id" field.
|
||||||
|
func UserIDNotIn(vs ...int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldNotIn(FieldUserID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmountEQ applies the EQ predicate on the "bonus_amount" field.
|
||||||
|
func BonusAmountEQ(v float64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldEQ(FieldBonusAmount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmountNEQ applies the NEQ predicate on the "bonus_amount" field.
|
||||||
|
func BonusAmountNEQ(v float64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldNEQ(FieldBonusAmount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmountIn applies the In predicate on the "bonus_amount" field.
|
||||||
|
func BonusAmountIn(vs ...float64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldIn(FieldBonusAmount, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmountNotIn applies the NotIn predicate on the "bonus_amount" field.
|
||||||
|
func BonusAmountNotIn(vs ...float64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldNotIn(FieldBonusAmount, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmountGT applies the GT predicate on the "bonus_amount" field.
|
||||||
|
func BonusAmountGT(v float64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldGT(FieldBonusAmount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmountGTE applies the GTE predicate on the "bonus_amount" field.
|
||||||
|
func BonusAmountGTE(v float64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldGTE(FieldBonusAmount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmountLT applies the LT predicate on the "bonus_amount" field.
|
||||||
|
func BonusAmountLT(v float64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldLT(FieldBonusAmount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmountLTE applies the LTE predicate on the "bonus_amount" field.
|
||||||
|
func BonusAmountLTE(v float64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldLTE(FieldBonusAmount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedAtEQ applies the EQ predicate on the "used_at" field.
|
||||||
|
func UsedAtEQ(v time.Time) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldEQ(FieldUsedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedAtNEQ applies the NEQ predicate on the "used_at" field.
|
||||||
|
func UsedAtNEQ(v time.Time) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldNEQ(FieldUsedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedAtIn applies the In predicate on the "used_at" field.
|
||||||
|
func UsedAtIn(vs ...time.Time) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldIn(FieldUsedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedAtNotIn applies the NotIn predicate on the "used_at" field.
|
||||||
|
func UsedAtNotIn(vs ...time.Time) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldNotIn(FieldUsedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedAtGT applies the GT predicate on the "used_at" field.
|
||||||
|
func UsedAtGT(v time.Time) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldGT(FieldUsedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedAtGTE applies the GTE predicate on the "used_at" field.
|
||||||
|
func UsedAtGTE(v time.Time) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldGTE(FieldUsedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedAtLT applies the LT predicate on the "used_at" field.
|
||||||
|
func UsedAtLT(v time.Time) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldLT(FieldUsedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedAtLTE applies the LTE predicate on the "used_at" field.
|
||||||
|
func UsedAtLTE(v time.Time) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldLTE(FieldUsedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasPromoCode applies the HasEdge predicate on the "promo_code" edge.
|
||||||
|
func HasPromoCode() predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(func(s *sql.Selector) {
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, PromoCodeTable, PromoCodeColumn),
|
||||||
|
)
|
||||||
|
sqlgraph.HasNeighbors(s, step)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasPromoCodeWith applies the HasEdge predicate on the "promo_code" edge with a given conditions (other predicates).
|
||||||
|
func HasPromoCodeWith(preds ...predicate.PromoCode) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(func(s *sql.Selector) {
|
||||||
|
step := newPromoCodeStep()
|
||||||
|
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||||
|
for _, p := range preds {
|
||||||
|
p(s)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasUser applies the HasEdge predicate on the "user" edge.
|
||||||
|
func HasUser() predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(func(s *sql.Selector) {
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
|
||||||
|
)
|
||||||
|
sqlgraph.HasNeighbors(s, step)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates).
|
||||||
|
func HasUserWith(preds ...predicate.User) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(func(s *sql.Selector) {
|
||||||
|
step := newUserStep()
|
||||||
|
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||||
|
for _, p := range preds {
|
||||||
|
p(s)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// And groups predicates with the AND operator between them.
|
||||||
|
func And(predicates ...predicate.PromoCodeUsage) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.AndPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Or groups predicates with the OR operator between them.
|
||||||
|
func Or(predicates ...predicate.PromoCodeUsage) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.OrPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not applies the not operator on the given predicate.
|
||||||
|
func Not(p predicate.PromoCodeUsage) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.NotPredicates(p))
|
||||||
|
}
|
||||||
696
backend/ent/promocodeusage_create.go
Normal file
696
backend/ent/promocodeusage_create.go
Normal file
@@ -0,0 +1,696 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PromoCodeUsageCreate is the builder for creating a PromoCodeUsage entity.
|
||||||
|
type PromoCodeUsageCreate struct {
|
||||||
|
config
|
||||||
|
mutation *PromoCodeUsageMutation
|
||||||
|
hooks []Hook
|
||||||
|
conflict []sql.ConflictOption
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPromoCodeID sets the "promo_code_id" field.
|
||||||
|
func (_c *PromoCodeUsageCreate) SetPromoCodeID(v int64) *PromoCodeUsageCreate {
|
||||||
|
_c.mutation.SetPromoCodeID(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUserID sets the "user_id" field.
|
||||||
|
func (_c *PromoCodeUsageCreate) SetUserID(v int64) *PromoCodeUsageCreate {
|
||||||
|
_c.mutation.SetUserID(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBonusAmount sets the "bonus_amount" field.
|
||||||
|
func (_c *PromoCodeUsageCreate) SetBonusAmount(v float64) *PromoCodeUsageCreate {
|
||||||
|
_c.mutation.SetBonusAmount(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsedAt sets the "used_at" field.
|
||||||
|
func (_c *PromoCodeUsageCreate) SetUsedAt(v time.Time) *PromoCodeUsageCreate {
|
||||||
|
_c.mutation.SetUsedAt(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUsedAt sets the "used_at" field if the given value is not nil.
|
||||||
|
func (_c *PromoCodeUsageCreate) SetNillableUsedAt(v *time.Time) *PromoCodeUsageCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetUsedAt(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPromoCode sets the "promo_code" edge to the PromoCode entity.
|
||||||
|
func (_c *PromoCodeUsageCreate) SetPromoCode(v *PromoCode) *PromoCodeUsageCreate {
|
||||||
|
return _c.SetPromoCodeID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUser sets the "user" edge to the User entity.
|
||||||
|
func (_c *PromoCodeUsageCreate) SetUser(v *User) *PromoCodeUsageCreate {
|
||||||
|
return _c.SetUserID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the PromoCodeUsageMutation object of the builder.
|
||||||
|
func (_c *PromoCodeUsageCreate) Mutation() *PromoCodeUsageMutation {
|
||||||
|
return _c.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save creates the PromoCodeUsage in the database.
|
||||||
|
func (_c *PromoCodeUsageCreate) Save(ctx context.Context) (*PromoCodeUsage, error) {
|
||||||
|
_c.defaults()
|
||||||
|
return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX calls Save and panics if Save returns an error.
|
||||||
|
func (_c *PromoCodeUsageCreate) SaveX(ctx context.Context) *PromoCodeUsage {
|
||||||
|
v, err := _c.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (_c *PromoCodeUsageCreate) Exec(ctx context.Context) error {
|
||||||
|
_, err := _c.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_c *PromoCodeUsageCreate) ExecX(ctx context.Context) {
|
||||||
|
if err := _c.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaults sets the default values of the builder before save.
|
||||||
|
func (_c *PromoCodeUsageCreate) defaults() {
|
||||||
|
if _, ok := _c.mutation.UsedAt(); !ok {
|
||||||
|
v := promocodeusage.DefaultUsedAt()
|
||||||
|
_c.mutation.SetUsedAt(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_c *PromoCodeUsageCreate) check() error {
|
||||||
|
if _, ok := _c.mutation.PromoCodeID(); !ok {
|
||||||
|
return &ValidationError{Name: "promo_code_id", err: errors.New(`ent: missing required field "PromoCodeUsage.promo_code_id"`)}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.UserID(); !ok {
|
||||||
|
return &ValidationError{Name: "user_id", err: errors.New(`ent: missing required field "PromoCodeUsage.user_id"`)}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.BonusAmount(); !ok {
|
||||||
|
return &ValidationError{Name: "bonus_amount", err: errors.New(`ent: missing required field "PromoCodeUsage.bonus_amount"`)}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.UsedAt(); !ok {
|
||||||
|
return &ValidationError{Name: "used_at", err: errors.New(`ent: missing required field "PromoCodeUsage.used_at"`)}
|
||||||
|
}
|
||||||
|
if len(_c.mutation.PromoCodeIDs()) == 0 {
|
||||||
|
return &ValidationError{Name: "promo_code", err: errors.New(`ent: missing required edge "PromoCodeUsage.promo_code"`)}
|
||||||
|
}
|
||||||
|
if len(_c.mutation.UserIDs()) == 0 {
|
||||||
|
return &ValidationError{Name: "user", err: errors.New(`ent: missing required edge "PromoCodeUsage.user"`)}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_c *PromoCodeUsageCreate) sqlSave(ctx context.Context) (*PromoCodeUsage, error) {
|
||||||
|
if err := _c.check(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_node, _spec := _c.createSpec()
|
||||||
|
if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil {
|
||||||
|
if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
id := _spec.ID.Value.(int64)
|
||||||
|
_node.ID = int64(id)
|
||||||
|
_c.mutation.id = &_node.ID
|
||||||
|
_c.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_c *PromoCodeUsageCreate) createSpec() (*PromoCodeUsage, *sqlgraph.CreateSpec) {
|
||||||
|
var (
|
||||||
|
_node = &PromoCodeUsage{config: _c.config}
|
||||||
|
_spec = sqlgraph.NewCreateSpec(promocodeusage.Table, sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64))
|
||||||
|
)
|
||||||
|
_spec.OnConflict = _c.conflict
|
||||||
|
if value, ok := _c.mutation.BonusAmount(); ok {
|
||||||
|
_spec.SetField(promocodeusage.FieldBonusAmount, field.TypeFloat64, value)
|
||||||
|
_node.BonusAmount = value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.UsedAt(); ok {
|
||||||
|
_spec.SetField(promocodeusage.FieldUsedAt, field.TypeTime, value)
|
||||||
|
_node.UsedAt = value
|
||||||
|
}
|
||||||
|
if nodes := _c.mutation.PromoCodeIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: promocodeusage.PromoCodeTable,
|
||||||
|
Columns: []string{promocodeusage.PromoCodeColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_node.PromoCodeID = nodes[0]
|
||||||
|
_spec.Edges = append(_spec.Edges, edge)
|
||||||
|
}
|
||||||
|
if nodes := _c.mutation.UserIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: promocodeusage.UserTable,
|
||||||
|
Columns: []string{promocodeusage.UserColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_node.UserID = nodes[0]
|
||||||
|
_spec.Edges = append(_spec.Edges, edge)
|
||||||
|
}
|
||||||
|
return _node, _spec
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||||
|
// of the `INSERT` statement. For example:
|
||||||
|
//
|
||||||
|
// client.PromoCodeUsage.Create().
|
||||||
|
// SetPromoCodeID(v).
|
||||||
|
// OnConflict(
|
||||||
|
// // Update the row with the new values
|
||||||
|
// // the was proposed for insertion.
|
||||||
|
// sql.ResolveWithNewValues(),
|
||||||
|
// ).
|
||||||
|
// // Override some of the fields with custom
|
||||||
|
// // update values.
|
||||||
|
// Update(func(u *ent.PromoCodeUsageUpsert) {
|
||||||
|
// SetPromoCodeID(v+v).
|
||||||
|
// }).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (_c *PromoCodeUsageCreate) OnConflict(opts ...sql.ConflictOption) *PromoCodeUsageUpsertOne {
|
||||||
|
_c.conflict = opts
|
||||||
|
return &PromoCodeUsageUpsertOne{
|
||||||
|
create: _c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||||
|
// as conflict target. Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.PromoCodeUsage.Create().
|
||||||
|
// OnConflict(sql.ConflictColumns(columns...)).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (_c *PromoCodeUsageCreate) OnConflictColumns(columns ...string) *PromoCodeUsageUpsertOne {
|
||||||
|
_c.conflict = append(_c.conflict, sql.ConflictColumns(columns...))
|
||||||
|
return &PromoCodeUsageUpsertOne{
|
||||||
|
create: _c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
// PromoCodeUsageUpsertOne is the builder for "upsert"-ing
|
||||||
|
// one PromoCodeUsage node.
|
||||||
|
PromoCodeUsageUpsertOne struct {
|
||||||
|
create *PromoCodeUsageCreate
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeUsageUpsert is the "OnConflict" setter.
|
||||||
|
PromoCodeUsageUpsert struct {
|
||||||
|
*sql.UpdateSet
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// SetPromoCodeID sets the "promo_code_id" field.
|
||||||
|
func (u *PromoCodeUsageUpsert) SetPromoCodeID(v int64) *PromoCodeUsageUpsert {
|
||||||
|
u.Set(promocodeusage.FieldPromoCodeID, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatePromoCodeID sets the "promo_code_id" field to the value that was provided on create.
|
||||||
|
func (u *PromoCodeUsageUpsert) UpdatePromoCodeID() *PromoCodeUsageUpsert {
|
||||||
|
u.SetExcluded(promocodeusage.FieldPromoCodeID)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUserID sets the "user_id" field.
|
||||||
|
func (u *PromoCodeUsageUpsert) SetUserID(v int64) *PromoCodeUsageUpsert {
|
||||||
|
u.Set(promocodeusage.FieldUserID, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUserID sets the "user_id" field to the value that was provided on create.
|
||||||
|
func (u *PromoCodeUsageUpsert) UpdateUserID() *PromoCodeUsageUpsert {
|
||||||
|
u.SetExcluded(promocodeusage.FieldUserID)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBonusAmount sets the "bonus_amount" field.
|
||||||
|
func (u *PromoCodeUsageUpsert) SetBonusAmount(v float64) *PromoCodeUsageUpsert {
|
||||||
|
u.Set(promocodeusage.FieldBonusAmount, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateBonusAmount sets the "bonus_amount" field to the value that was provided on create.
|
||||||
|
func (u *PromoCodeUsageUpsert) UpdateBonusAmount() *PromoCodeUsageUpsert {
|
||||||
|
u.SetExcluded(promocodeusage.FieldBonusAmount)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddBonusAmount adds v to the "bonus_amount" field.
|
||||||
|
func (u *PromoCodeUsageUpsert) AddBonusAmount(v float64) *PromoCodeUsageUpsert {
|
||||||
|
u.Add(promocodeusage.FieldBonusAmount, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsedAt sets the "used_at" field.
|
||||||
|
func (u *PromoCodeUsageUpsert) SetUsedAt(v time.Time) *PromoCodeUsageUpsert {
|
||||||
|
u.Set(promocodeusage.FieldUsedAt, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUsedAt sets the "used_at" field to the value that was provided on create.
|
||||||
|
func (u *PromoCodeUsageUpsert) UpdateUsedAt() *PromoCodeUsageUpsert {
|
||||||
|
u.SetExcluded(promocodeusage.FieldUsedAt)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateNewValues updates the mutable fields using the new values that were set on create.
|
||||||
|
// Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.PromoCodeUsage.Create().
|
||||||
|
// OnConflict(
|
||||||
|
// sql.ResolveWithNewValues(),
|
||||||
|
// ).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (u *PromoCodeUsageUpsertOne) UpdateNewValues() *PromoCodeUsageUpsertOne {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ignore sets each column to itself in case of conflict.
|
||||||
|
// Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.PromoCodeUsage.Create().
|
||||||
|
// OnConflict(sql.ResolveWithIgnore()).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (u *PromoCodeUsageUpsertOne) Ignore() *PromoCodeUsageUpsertOne {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||||
|
// Supported only by SQLite and PostgreSQL.
|
||||||
|
func (u *PromoCodeUsageUpsertOne) DoNothing() *PromoCodeUsageUpsertOne {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update allows overriding fields `UPDATE` values. See the PromoCodeUsageCreate.OnConflict
|
||||||
|
// documentation for more info.
|
||||||
|
func (u *PromoCodeUsageUpsertOne) Update(set func(*PromoCodeUsageUpsert)) *PromoCodeUsageUpsertOne {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||||
|
set(&PromoCodeUsageUpsert{UpdateSet: update})
|
||||||
|
}))
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPromoCodeID sets the "promo_code_id" field.
|
||||||
|
func (u *PromoCodeUsageUpsertOne) SetPromoCodeID(v int64) *PromoCodeUsageUpsertOne {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.SetPromoCodeID(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatePromoCodeID sets the "promo_code_id" field to the value that was provided on create.
|
||||||
|
func (u *PromoCodeUsageUpsertOne) UpdatePromoCodeID() *PromoCodeUsageUpsertOne {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.UpdatePromoCodeID()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUserID sets the "user_id" field.
|
||||||
|
func (u *PromoCodeUsageUpsertOne) SetUserID(v int64) *PromoCodeUsageUpsertOne {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.SetUserID(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUserID sets the "user_id" field to the value that was provided on create.
|
||||||
|
func (u *PromoCodeUsageUpsertOne) UpdateUserID() *PromoCodeUsageUpsertOne {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.UpdateUserID()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBonusAmount sets the "bonus_amount" field.
|
||||||
|
func (u *PromoCodeUsageUpsertOne) SetBonusAmount(v float64) *PromoCodeUsageUpsertOne {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.SetBonusAmount(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddBonusAmount adds v to the "bonus_amount" field.
|
||||||
|
func (u *PromoCodeUsageUpsertOne) AddBonusAmount(v float64) *PromoCodeUsageUpsertOne {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.AddBonusAmount(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateBonusAmount sets the "bonus_amount" field to the value that was provided on create.
|
||||||
|
func (u *PromoCodeUsageUpsertOne) UpdateBonusAmount() *PromoCodeUsageUpsertOne {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.UpdateBonusAmount()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsedAt sets the "used_at" field.
|
||||||
|
func (u *PromoCodeUsageUpsertOne) SetUsedAt(v time.Time) *PromoCodeUsageUpsertOne {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.SetUsedAt(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUsedAt sets the "used_at" field to the value that was provided on create.
|
||||||
|
func (u *PromoCodeUsageUpsertOne) UpdateUsedAt() *PromoCodeUsageUpsertOne {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.UpdateUsedAt()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (u *PromoCodeUsageUpsertOne) Exec(ctx context.Context) error {
|
||||||
|
if len(u.create.conflict) == 0 {
|
||||||
|
return errors.New("ent: missing options for PromoCodeUsageCreate.OnConflict")
|
||||||
|
}
|
||||||
|
return u.create.Exec(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (u *PromoCodeUsageUpsertOne) ExecX(ctx context.Context) {
|
||||||
|
if err := u.create.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the UPSERT query and returns the inserted/updated ID.
|
||||||
|
func (u *PromoCodeUsageUpsertOne) ID(ctx context.Context) (id int64, err error) {
|
||||||
|
node, err := u.create.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return id, err
|
||||||
|
}
|
||||||
|
return node.ID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDX is like ID, but panics if an error occurs.
|
||||||
|
func (u *PromoCodeUsageUpsertOne) IDX(ctx context.Context) int64 {
|
||||||
|
id, err := u.ID(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeUsageCreateBulk is the builder for creating many PromoCodeUsage entities in bulk.
|
||||||
|
type PromoCodeUsageCreateBulk struct {
|
||||||
|
config
|
||||||
|
err error
|
||||||
|
builders []*PromoCodeUsageCreate
|
||||||
|
conflict []sql.ConflictOption
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save creates the PromoCodeUsage entities in the database.
|
||||||
|
func (_c *PromoCodeUsageCreateBulk) Save(ctx context.Context) ([]*PromoCodeUsage, error) {
|
||||||
|
if _c.err != nil {
|
||||||
|
return nil, _c.err
|
||||||
|
}
|
||||||
|
specs := make([]*sqlgraph.CreateSpec, len(_c.builders))
|
||||||
|
nodes := make([]*PromoCodeUsage, len(_c.builders))
|
||||||
|
mutators := make([]Mutator, len(_c.builders))
|
||||||
|
for i := range _c.builders {
|
||||||
|
func(i int, root context.Context) {
|
||||||
|
builder := _c.builders[i]
|
||||||
|
builder.defaults()
|
||||||
|
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
||||||
|
mutation, ok := m.(*PromoCodeUsageMutation)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
||||||
|
}
|
||||||
|
if err := builder.check(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
builder.mutation = mutation
|
||||||
|
var err error
|
||||||
|
nodes[i], specs[i] = builder.createSpec()
|
||||||
|
if i < len(mutators)-1 {
|
||||||
|
_, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation)
|
||||||
|
} else {
|
||||||
|
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
|
||||||
|
spec.OnConflict = _c.conflict
|
||||||
|
// Invoke the actual operation on the latest mutation in the chain.
|
||||||
|
if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil {
|
||||||
|
if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
mutation.id = &nodes[i].ID
|
||||||
|
if specs[i].ID.Value != nil {
|
||||||
|
id := specs[i].ID.Value.(int64)
|
||||||
|
nodes[i].ID = int64(id)
|
||||||
|
}
|
||||||
|
mutation.done = true
|
||||||
|
return nodes[i], nil
|
||||||
|
})
|
||||||
|
for i := len(builder.hooks) - 1; i >= 0; i-- {
|
||||||
|
mut = builder.hooks[i](mut)
|
||||||
|
}
|
||||||
|
mutators[i] = mut
|
||||||
|
}(i, ctx)
|
||||||
|
}
|
||||||
|
if len(mutators) > 0 {
|
||||||
|
if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_c *PromoCodeUsageCreateBulk) SaveX(ctx context.Context) []*PromoCodeUsage {
|
||||||
|
v, err := _c.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (_c *PromoCodeUsageCreateBulk) Exec(ctx context.Context) error {
|
||||||
|
_, err := _c.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_c *PromoCodeUsageCreateBulk) ExecX(ctx context.Context) {
|
||||||
|
if err := _c.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||||
|
// of the `INSERT` statement. For example:
|
||||||
|
//
|
||||||
|
// client.PromoCodeUsage.CreateBulk(builders...).
|
||||||
|
// OnConflict(
|
||||||
|
// // Update the row with the new values
|
||||||
|
// // the was proposed for insertion.
|
||||||
|
// sql.ResolveWithNewValues(),
|
||||||
|
// ).
|
||||||
|
// // Override some of the fields with custom
|
||||||
|
// // update values.
|
||||||
|
// Update(func(u *ent.PromoCodeUsageUpsert) {
|
||||||
|
// SetPromoCodeID(v+v).
|
||||||
|
// }).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (_c *PromoCodeUsageCreateBulk) OnConflict(opts ...sql.ConflictOption) *PromoCodeUsageUpsertBulk {
|
||||||
|
_c.conflict = opts
|
||||||
|
return &PromoCodeUsageUpsertBulk{
|
||||||
|
create: _c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||||
|
// as conflict target. Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.PromoCodeUsage.Create().
|
||||||
|
// OnConflict(sql.ConflictColumns(columns...)).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (_c *PromoCodeUsageCreateBulk) OnConflictColumns(columns ...string) *PromoCodeUsageUpsertBulk {
|
||||||
|
_c.conflict = append(_c.conflict, sql.ConflictColumns(columns...))
|
||||||
|
return &PromoCodeUsageUpsertBulk{
|
||||||
|
create: _c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeUsageUpsertBulk is the builder for "upsert"-ing
|
||||||
|
// a bulk of PromoCodeUsage nodes.
|
||||||
|
type PromoCodeUsageUpsertBulk struct {
|
||||||
|
create *PromoCodeUsageCreateBulk
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateNewValues updates the mutable fields using the new values that
|
||||||
|
// were set on create. Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.PromoCodeUsage.Create().
|
||||||
|
// OnConflict(
|
||||||
|
// sql.ResolveWithNewValues(),
|
||||||
|
// ).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (u *PromoCodeUsageUpsertBulk) UpdateNewValues() *PromoCodeUsageUpsertBulk {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ignore sets each column to itself in case of conflict.
|
||||||
|
// Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.PromoCodeUsage.Create().
|
||||||
|
// OnConflict(sql.ResolveWithIgnore()).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (u *PromoCodeUsageUpsertBulk) Ignore() *PromoCodeUsageUpsertBulk {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||||
|
// Supported only by SQLite and PostgreSQL.
|
||||||
|
func (u *PromoCodeUsageUpsertBulk) DoNothing() *PromoCodeUsageUpsertBulk {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update allows overriding fields `UPDATE` values. See the PromoCodeUsageCreateBulk.OnConflict
|
||||||
|
// documentation for more info.
|
||||||
|
func (u *PromoCodeUsageUpsertBulk) Update(set func(*PromoCodeUsageUpsert)) *PromoCodeUsageUpsertBulk {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||||
|
set(&PromoCodeUsageUpsert{UpdateSet: update})
|
||||||
|
}))
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPromoCodeID sets the "promo_code_id" field.
|
||||||
|
func (u *PromoCodeUsageUpsertBulk) SetPromoCodeID(v int64) *PromoCodeUsageUpsertBulk {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.SetPromoCodeID(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatePromoCodeID sets the "promo_code_id" field to the value that was provided on create.
|
||||||
|
func (u *PromoCodeUsageUpsertBulk) UpdatePromoCodeID() *PromoCodeUsageUpsertBulk {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.UpdatePromoCodeID()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUserID sets the "user_id" field.
|
||||||
|
func (u *PromoCodeUsageUpsertBulk) SetUserID(v int64) *PromoCodeUsageUpsertBulk {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.SetUserID(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUserID sets the "user_id" field to the value that was provided on create.
|
||||||
|
func (u *PromoCodeUsageUpsertBulk) UpdateUserID() *PromoCodeUsageUpsertBulk {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.UpdateUserID()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBonusAmount sets the "bonus_amount" field.
|
||||||
|
func (u *PromoCodeUsageUpsertBulk) SetBonusAmount(v float64) *PromoCodeUsageUpsertBulk {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.SetBonusAmount(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddBonusAmount adds v to the "bonus_amount" field.
|
||||||
|
func (u *PromoCodeUsageUpsertBulk) AddBonusAmount(v float64) *PromoCodeUsageUpsertBulk {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.AddBonusAmount(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateBonusAmount sets the "bonus_amount" field to the value that was provided on create.
|
||||||
|
func (u *PromoCodeUsageUpsertBulk) UpdateBonusAmount() *PromoCodeUsageUpsertBulk {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.UpdateBonusAmount()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsedAt sets the "used_at" field.
|
||||||
|
func (u *PromoCodeUsageUpsertBulk) SetUsedAt(v time.Time) *PromoCodeUsageUpsertBulk {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.SetUsedAt(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUsedAt sets the "used_at" field to the value that was provided on create.
|
||||||
|
func (u *PromoCodeUsageUpsertBulk) UpdateUsedAt() *PromoCodeUsageUpsertBulk {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.UpdateUsedAt()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (u *PromoCodeUsageUpsertBulk) Exec(ctx context.Context) error {
|
||||||
|
if u.create.err != nil {
|
||||||
|
return u.create.err
|
||||||
|
}
|
||||||
|
for i, b := range u.create.builders {
|
||||||
|
if len(b.conflict) != 0 {
|
||||||
|
return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the PromoCodeUsageCreateBulk instead", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(u.create.conflict) == 0 {
|
||||||
|
return errors.New("ent: missing options for PromoCodeUsageCreateBulk.OnConflict")
|
||||||
|
}
|
||||||
|
return u.create.Exec(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (u *PromoCodeUsageUpsertBulk) ExecX(ctx context.Context) {
|
||||||
|
if err := u.create.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
88
backend/ent/promocodeusage_delete.go
Normal file
88
backend/ent/promocodeusage_delete.go
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PromoCodeUsageDelete is the builder for deleting a PromoCodeUsage entity.
|
||||||
|
type PromoCodeUsageDelete struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *PromoCodeUsageMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the PromoCodeUsageDelete builder.
|
||||||
|
func (_d *PromoCodeUsageDelete) Where(ps ...predicate.PromoCodeUsage) *PromoCodeUsageDelete {
|
||||||
|
_d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||||
|
func (_d *PromoCodeUsageDelete) Exec(ctx context.Context) (int, error) {
|
||||||
|
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *PromoCodeUsageDelete) ExecX(ctx context.Context) int {
|
||||||
|
n, err := _d.Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_d *PromoCodeUsageDelete) sqlExec(ctx context.Context) (int, error) {
|
||||||
|
_spec := sqlgraph.NewDeleteSpec(promocodeusage.Table, sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64))
|
||||||
|
if ps := _d.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
||||||
|
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
_d.mutation.done = true
|
||||||
|
return affected, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeUsageDeleteOne is the builder for deleting a single PromoCodeUsage entity.
|
||||||
|
type PromoCodeUsageDeleteOne struct {
|
||||||
|
_d *PromoCodeUsageDelete
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the PromoCodeUsageDelete builder.
|
||||||
|
func (_d *PromoCodeUsageDeleteOne) Where(ps ...predicate.PromoCodeUsage) *PromoCodeUsageDeleteOne {
|
||||||
|
_d._d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query.
|
||||||
|
func (_d *PromoCodeUsageDeleteOne) Exec(ctx context.Context) error {
|
||||||
|
n, err := _d._d.Exec(ctx)
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
return err
|
||||||
|
case n == 0:
|
||||||
|
return &NotFoundError{promocodeusage.Label}
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *PromoCodeUsageDeleteOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _d.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
718
backend/ent/promocodeusage_query.go
Normal file
718
backend/ent/promocodeusage_query.go
Normal file
@@ -0,0 +1,718 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PromoCodeUsageQuery is the builder for querying PromoCodeUsage entities.
|
||||||
|
type PromoCodeUsageQuery struct {
|
||||||
|
config
|
||||||
|
ctx *QueryContext
|
||||||
|
order []promocodeusage.OrderOption
|
||||||
|
inters []Interceptor
|
||||||
|
predicates []predicate.PromoCodeUsage
|
||||||
|
withPromoCode *PromoCodeQuery
|
||||||
|
withUser *UserQuery
|
||||||
|
modifiers []func(*sql.Selector)
|
||||||
|
// intermediate query (i.e. traversal path).
|
||||||
|
sql *sql.Selector
|
||||||
|
path func(context.Context) (*sql.Selector, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where adds a new predicate for the PromoCodeUsageQuery builder.
|
||||||
|
func (_q *PromoCodeUsageQuery) Where(ps ...predicate.PromoCodeUsage) *PromoCodeUsageQuery {
|
||||||
|
_q.predicates = append(_q.predicates, ps...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Limit the number of records to be returned by this query.
|
||||||
|
func (_q *PromoCodeUsageQuery) Limit(limit int) *PromoCodeUsageQuery {
|
||||||
|
_q.ctx.Limit = &limit
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Offset to start from.
|
||||||
|
func (_q *PromoCodeUsageQuery) Offset(offset int) *PromoCodeUsageQuery {
|
||||||
|
_q.ctx.Offset = &offset
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unique configures the query builder to filter duplicate records on query.
|
||||||
|
// By default, unique is set to true, and can be disabled using this method.
|
||||||
|
func (_q *PromoCodeUsageQuery) Unique(unique bool) *PromoCodeUsageQuery {
|
||||||
|
_q.ctx.Unique = &unique
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Order specifies how the records should be ordered.
|
||||||
|
func (_q *PromoCodeUsageQuery) Order(o ...promocodeusage.OrderOption) *PromoCodeUsageQuery {
|
||||||
|
_q.order = append(_q.order, o...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryPromoCode chains the current query on the "promo_code" edge.
|
||||||
|
func (_q *PromoCodeUsageQuery) QueryPromoCode() *PromoCodeQuery {
|
||||||
|
query := (&PromoCodeClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(promocodeusage.Table, promocodeusage.FieldID, selector),
|
||||||
|
sqlgraph.To(promocode.Table, promocode.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, promocodeusage.PromoCodeTable, promocodeusage.PromoCodeColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryUser chains the current query on the "user" edge.
|
||||||
|
func (_q *PromoCodeUsageQuery) QueryUser() *UserQuery {
|
||||||
|
query := (&UserClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(promocodeusage.Table, promocodeusage.FieldID, selector),
|
||||||
|
sqlgraph.To(user.Table, user.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, promocodeusage.UserTable, promocodeusage.UserColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// First returns the first PromoCodeUsage entity from the query.
|
||||||
|
// Returns a *NotFoundError when no PromoCodeUsage was found.
|
||||||
|
func (_q *PromoCodeUsageQuery) First(ctx context.Context) (*PromoCodeUsage, error) {
|
||||||
|
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nil, &NotFoundError{promocodeusage.Label}
|
||||||
|
}
|
||||||
|
return nodes[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstX is like First, but panics if an error occurs.
|
||||||
|
func (_q *PromoCodeUsageQuery) FirstX(ctx context.Context) *PromoCodeUsage {
|
||||||
|
node, err := _q.First(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstID returns the first PromoCodeUsage ID from the query.
|
||||||
|
// Returns a *NotFoundError when no PromoCodeUsage ID was found.
|
||||||
|
func (_q *PromoCodeUsageQuery) FirstID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
err = &NotFoundError{promocodeusage.Label}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return ids[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||||
|
func (_q *PromoCodeUsageQuery) FirstIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.FirstID(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only returns a single PromoCodeUsage entity found by the query, ensuring it only returns one.
|
||||||
|
// Returns a *NotSingularError when more than one PromoCodeUsage entity is found.
|
||||||
|
// Returns a *NotFoundError when no PromoCodeUsage entities are found.
|
||||||
|
func (_q *PromoCodeUsageQuery) Only(ctx context.Context) (*PromoCodeUsage, error) {
|
||||||
|
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch len(nodes) {
|
||||||
|
case 1:
|
||||||
|
return nodes[0], nil
|
||||||
|
case 0:
|
||||||
|
return nil, &NotFoundError{promocodeusage.Label}
|
||||||
|
default:
|
||||||
|
return nil, &NotSingularError{promocodeusage.Label}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyX is like Only, but panics if an error occurs.
|
||||||
|
func (_q *PromoCodeUsageQuery) OnlyX(ctx context.Context) *PromoCodeUsage {
|
||||||
|
node, err := _q.Only(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyID is like Only, but returns the only PromoCodeUsage ID in the query.
|
||||||
|
// Returns a *NotSingularError when more than one PromoCodeUsage ID is found.
|
||||||
|
// Returns a *NotFoundError when no entities are found.
|
||||||
|
func (_q *PromoCodeUsageQuery) OnlyID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch len(ids) {
|
||||||
|
case 1:
|
||||||
|
id = ids[0]
|
||||||
|
case 0:
|
||||||
|
err = &NotFoundError{promocodeusage.Label}
|
||||||
|
default:
|
||||||
|
err = &NotSingularError{promocodeusage.Label}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||||
|
func (_q *PromoCodeUsageQuery) OnlyIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.OnlyID(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// All executes the query and returns a list of PromoCodeUsages.
|
||||||
|
func (_q *PromoCodeUsageQuery) All(ctx context.Context) ([]*PromoCodeUsage, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
qr := querierAll[[]*PromoCodeUsage, *PromoCodeUsageQuery]()
|
||||||
|
return withInterceptors[[]*PromoCodeUsage](ctx, _q, qr, _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllX is like All, but panics if an error occurs.
|
||||||
|
func (_q *PromoCodeUsageQuery) AllX(ctx context.Context) []*PromoCodeUsage {
|
||||||
|
nodes, err := _q.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDs executes the query and returns a list of PromoCodeUsage IDs.
|
||||||
|
func (_q *PromoCodeUsageQuery) IDs(ctx context.Context) (ids []int64, err error) {
|
||||||
|
if _q.ctx.Unique == nil && _q.path != nil {
|
||||||
|
_q.Unique(true)
|
||||||
|
}
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
|
||||||
|
if err = _q.Select(promocodeusage.FieldID).Scan(ctx, &ids); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ids, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDsX is like IDs, but panics if an error occurs.
|
||||||
|
func (_q *PromoCodeUsageQuery) IDsX(ctx context.Context) []int64 {
|
||||||
|
ids, err := _q.IDs(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return ids
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the count of the given query.
|
||||||
|
func (_q *PromoCodeUsageQuery) Count(ctx context.Context) (int, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return withInterceptors[int](ctx, _q, querierCount[*PromoCodeUsageQuery](), _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountX is like Count, but panics if an error occurs.
|
||||||
|
func (_q *PromoCodeUsageQuery) CountX(ctx context.Context) int {
|
||||||
|
count, err := _q.Count(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exist returns true if the query has elements in the graph.
|
||||||
|
func (_q *PromoCodeUsageQuery) Exist(ctx context.Context) (bool, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
||||||
|
switch _, err := _q.FirstID(ctx); {
|
||||||
|
case IsNotFound(err):
|
||||||
|
return false, nil
|
||||||
|
case err != nil:
|
||||||
|
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||||
|
default:
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExistX is like Exist, but panics if an error occurs.
|
||||||
|
func (_q *PromoCodeUsageQuery) ExistX(ctx context.Context) bool {
|
||||||
|
exist, err := _q.Exist(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return exist
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clone returns a duplicate of the PromoCodeUsageQuery builder, including all associated steps. It can be
|
||||||
|
// used to prepare common query builders and use them differently after the clone is made.
|
||||||
|
func (_q *PromoCodeUsageQuery) Clone() *PromoCodeUsageQuery {
|
||||||
|
if _q == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &PromoCodeUsageQuery{
|
||||||
|
config: _q.config,
|
||||||
|
ctx: _q.ctx.Clone(),
|
||||||
|
order: append([]promocodeusage.OrderOption{}, _q.order...),
|
||||||
|
inters: append([]Interceptor{}, _q.inters...),
|
||||||
|
predicates: append([]predicate.PromoCodeUsage{}, _q.predicates...),
|
||||||
|
withPromoCode: _q.withPromoCode.Clone(),
|
||||||
|
withUser: _q.withUser.Clone(),
|
||||||
|
// clone intermediate query.
|
||||||
|
sql: _q.sql.Clone(),
|
||||||
|
path: _q.path,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithPromoCode tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "promo_code" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *PromoCodeUsageQuery) WithPromoCode(opts ...func(*PromoCodeQuery)) *PromoCodeUsageQuery {
|
||||||
|
query := (&PromoCodeClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withPromoCode = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithUser tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "user" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *PromoCodeUsageQuery) WithUser(opts ...func(*UserQuery)) *PromoCodeUsageQuery {
|
||||||
|
query := (&UserClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withUser = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupBy is used to group vertices by one or more fields/columns.
|
||||||
|
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// PromoCodeID int64 `json:"promo_code_id,omitempty"`
|
||||||
|
// Count int `json:"count,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.PromoCodeUsage.Query().
|
||||||
|
// GroupBy(promocodeusage.FieldPromoCodeID).
|
||||||
|
// Aggregate(ent.Count()).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *PromoCodeUsageQuery) GroupBy(field string, fields ...string) *PromoCodeUsageGroupBy {
|
||||||
|
_q.ctx.Fields = append([]string{field}, fields...)
|
||||||
|
grbuild := &PromoCodeUsageGroupBy{build: _q}
|
||||||
|
grbuild.flds = &_q.ctx.Fields
|
||||||
|
grbuild.label = promocodeusage.Label
|
||||||
|
grbuild.scan = grbuild.Scan
|
||||||
|
return grbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows the selection one or more fields/columns for the given query,
|
||||||
|
// instead of selecting all fields in the entity.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// PromoCodeID int64 `json:"promo_code_id,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.PromoCodeUsage.Query().
|
||||||
|
// Select(promocodeusage.FieldPromoCodeID).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *PromoCodeUsageQuery) Select(fields ...string) *PromoCodeUsageSelect {
|
||||||
|
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
||||||
|
sbuild := &PromoCodeUsageSelect{PromoCodeUsageQuery: _q}
|
||||||
|
sbuild.label = promocodeusage.Label
|
||||||
|
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
||||||
|
return sbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate returns a PromoCodeUsageSelect configured with the given aggregations.
|
||||||
|
func (_q *PromoCodeUsageQuery) Aggregate(fns ...AggregateFunc) *PromoCodeUsageSelect {
|
||||||
|
return _q.Select().Aggregate(fns...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *PromoCodeUsageQuery) prepareQuery(ctx context.Context) error {
|
||||||
|
for _, inter := range _q.inters {
|
||||||
|
if inter == nil {
|
||||||
|
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||||
|
}
|
||||||
|
if trv, ok := inter.(Traverser); ok {
|
||||||
|
if err := trv.Traverse(ctx, _q); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, f := range _q.ctx.Fields {
|
||||||
|
if !promocodeusage.ValidColumn(f) {
|
||||||
|
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _q.path != nil {
|
||||||
|
prev, err := _q.path(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_q.sql = prev
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *PromoCodeUsageQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*PromoCodeUsage, error) {
|
||||||
|
var (
|
||||||
|
nodes = []*PromoCodeUsage{}
|
||||||
|
_spec = _q.querySpec()
|
||||||
|
loadedTypes = [2]bool{
|
||||||
|
_q.withPromoCode != nil,
|
||||||
|
_q.withUser != nil,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||||
|
return (*PromoCodeUsage).scanValues(nil, columns)
|
||||||
|
}
|
||||||
|
_spec.Assign = func(columns []string, values []any) error {
|
||||||
|
node := &PromoCodeUsage{config: _q.config}
|
||||||
|
nodes = append(nodes, node)
|
||||||
|
node.Edges.loadedTypes = loadedTypes
|
||||||
|
return node.assignValues(columns, values)
|
||||||
|
}
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
for i := range hooks {
|
||||||
|
hooks[i](ctx, _spec)
|
||||||
|
}
|
||||||
|
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
if query := _q.withPromoCode; query != nil {
|
||||||
|
if err := _q.loadPromoCode(ctx, query, nodes, nil,
|
||||||
|
func(n *PromoCodeUsage, e *PromoCode) { n.Edges.PromoCode = e }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if query := _q.withUser; query != nil {
|
||||||
|
if err := _q.loadUser(ctx, query, nodes, nil,
|
||||||
|
func(n *PromoCodeUsage, e *User) { n.Edges.User = e }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *PromoCodeUsageQuery) loadPromoCode(ctx context.Context, query *PromoCodeQuery, nodes []*PromoCodeUsage, init func(*PromoCodeUsage), assign func(*PromoCodeUsage, *PromoCode)) error {
|
||||||
|
ids := make([]int64, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64][]*PromoCodeUsage)
|
||||||
|
for i := range nodes {
|
||||||
|
fk := nodes[i].PromoCodeID
|
||||||
|
if _, ok := nodeids[fk]; !ok {
|
||||||
|
ids = append(ids, fk)
|
||||||
|
}
|
||||||
|
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
query.Where(promocode.IDIn(ids...))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
nodes, ok := nodeids[n.ID]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected foreign-key "promo_code_id" returned %v`, n.ID)
|
||||||
|
}
|
||||||
|
for i := range nodes {
|
||||||
|
assign(nodes[i], n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (_q *PromoCodeUsageQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*PromoCodeUsage, init func(*PromoCodeUsage), assign func(*PromoCodeUsage, *User)) error {
|
||||||
|
ids := make([]int64, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64][]*PromoCodeUsage)
|
||||||
|
for i := range nodes {
|
||||||
|
fk := nodes[i].UserID
|
||||||
|
if _, ok := nodeids[fk]; !ok {
|
||||||
|
ids = append(ids, fk)
|
||||||
|
}
|
||||||
|
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
query.Where(user.IDIn(ids...))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
nodes, ok := nodeids[n.ID]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected foreign-key "user_id" returned %v`, n.ID)
|
||||||
|
}
|
||||||
|
for i := range nodes {
|
||||||
|
assign(nodes[i], n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *PromoCodeUsageQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
|
_spec := _q.querySpec()
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
_spec.Node.Columns = _q.ctx.Fields
|
||||||
|
if len(_q.ctx.Fields) > 0 {
|
||||||
|
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||||
|
}
|
||||||
|
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *PromoCodeUsageQuery) querySpec() *sqlgraph.QuerySpec {
|
||||||
|
_spec := sqlgraph.NewQuerySpec(promocodeusage.Table, promocodeusage.Columns, sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64))
|
||||||
|
_spec.From = _q.sql
|
||||||
|
if unique := _q.ctx.Unique; unique != nil {
|
||||||
|
_spec.Unique = *unique
|
||||||
|
} else if _q.path != nil {
|
||||||
|
_spec.Unique = true
|
||||||
|
}
|
||||||
|
if fields := _q.ctx.Fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, promocodeusage.FieldID)
|
||||||
|
for i := range fields {
|
||||||
|
if fields[i] != promocodeusage.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _q.withPromoCode != nil {
|
||||||
|
_spec.Node.AddColumnOnce(promocodeusage.FieldPromoCodeID)
|
||||||
|
}
|
||||||
|
if _q.withUser != nil {
|
||||||
|
_spec.Node.AddColumnOnce(promocodeusage.FieldUserID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _q.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
_spec.Limit = *limit
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
_spec.Offset = *offset
|
||||||
|
}
|
||||||
|
if ps := _q.order; len(ps) > 0 {
|
||||||
|
_spec.Order = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return _spec
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *PromoCodeUsageQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||||
|
builder := sql.Dialect(_q.driver.Dialect())
|
||||||
|
t1 := builder.Table(promocodeusage.Table)
|
||||||
|
columns := _q.ctx.Fields
|
||||||
|
if len(columns) == 0 {
|
||||||
|
columns = promocodeusage.Columns
|
||||||
|
}
|
||||||
|
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||||
|
if _q.sql != nil {
|
||||||
|
selector = _q.sql
|
||||||
|
selector.Select(selector.Columns(columns...)...)
|
||||||
|
}
|
||||||
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||||
|
selector.Distinct()
|
||||||
|
}
|
||||||
|
for _, m := range _q.modifiers {
|
||||||
|
m(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.predicates {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.order {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
// limit is mandatory for offset clause. We start
|
||||||
|
// with default value, and override it below if needed.
|
||||||
|
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
selector.Limit(*limit)
|
||||||
|
}
|
||||||
|
return selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||||
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||||
|
// either committed or rolled-back.
|
||||||
|
func (_q *PromoCodeUsageQuery) ForUpdate(opts ...sql.LockOption) *PromoCodeUsageQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForUpdate(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||||
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||||
|
// until your transaction commits.
|
||||||
|
func (_q *PromoCodeUsageQuery) ForShare(opts ...sql.LockOption) *PromoCodeUsageQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForShare(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeUsageGroupBy is the group-by builder for PromoCodeUsage entities.
|
||||||
|
type PromoCodeUsageGroupBy struct {
|
||||||
|
selector
|
||||||
|
build *PromoCodeUsageQuery
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the group-by query.
|
||||||
|
func (_g *PromoCodeUsageGroupBy) Aggregate(fns ...AggregateFunc) *PromoCodeUsageGroupBy {
|
||||||
|
_g.fns = append(_g.fns, fns...)
|
||||||
|
return _g
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_g *PromoCodeUsageGroupBy) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
||||||
|
if err := _g.build.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*PromoCodeUsageQuery, *PromoCodeUsageGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_g *PromoCodeUsageGroupBy) sqlScan(ctx context.Context, root *PromoCodeUsageQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx).Select()
|
||||||
|
aggregation := make([]string, 0, len(_g.fns))
|
||||||
|
for _, fn := range _g.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
if len(selector.SelectedColumns()) == 0 {
|
||||||
|
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
||||||
|
for _, f := range *_g.flds {
|
||||||
|
columns = append(columns, selector.C(f))
|
||||||
|
}
|
||||||
|
columns = append(columns, aggregation...)
|
||||||
|
selector.Select(columns...)
|
||||||
|
}
|
||||||
|
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeUsageSelect is the builder for selecting fields of PromoCodeUsage entities.
|
||||||
|
type PromoCodeUsageSelect struct {
|
||||||
|
*PromoCodeUsageQuery
|
||||||
|
selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the selector query.
|
||||||
|
func (_s *PromoCodeUsageSelect) Aggregate(fns ...AggregateFunc) *PromoCodeUsageSelect {
|
||||||
|
_s.fns = append(_s.fns, fns...)
|
||||||
|
return _s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_s *PromoCodeUsageSelect) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
||||||
|
if err := _s.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*PromoCodeUsageQuery, *PromoCodeUsageSelect](ctx, _s.PromoCodeUsageQuery, _s, _s.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_s *PromoCodeUsageSelect) sqlScan(ctx context.Context, root *PromoCodeUsageQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx)
|
||||||
|
aggregation := make([]string, 0, len(_s.fns))
|
||||||
|
for _, fn := range _s.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
switch n := len(*_s.selector.flds); {
|
||||||
|
case n == 0 && len(aggregation) > 0:
|
||||||
|
selector.Select(aggregation...)
|
||||||
|
case n != 0 && len(aggregation) > 0:
|
||||||
|
selector.AppendSelect(aggregation...)
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
510
backend/ent/promocodeusage_update.go
Normal file
510
backend/ent/promocodeusage_update.go
Normal file
@@ -0,0 +1,510 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PromoCodeUsageUpdate is the builder for updating PromoCodeUsage entities.
|
||||||
|
type PromoCodeUsageUpdate struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *PromoCodeUsageMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the PromoCodeUsageUpdate builder.
|
||||||
|
func (_u *PromoCodeUsageUpdate) Where(ps ...predicate.PromoCodeUsage) *PromoCodeUsageUpdate {
|
||||||
|
_u.mutation.Where(ps...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPromoCodeID sets the "promo_code_id" field.
|
||||||
|
func (_u *PromoCodeUsageUpdate) SetPromoCodeID(v int64) *PromoCodeUsageUpdate {
|
||||||
|
_u.mutation.SetPromoCodeID(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillablePromoCodeID sets the "promo_code_id" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUsageUpdate) SetNillablePromoCodeID(v *int64) *PromoCodeUsageUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetPromoCodeID(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUserID sets the "user_id" field.
|
||||||
|
func (_u *PromoCodeUsageUpdate) SetUserID(v int64) *PromoCodeUsageUpdate {
|
||||||
|
_u.mutation.SetUserID(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUserID sets the "user_id" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUsageUpdate) SetNillableUserID(v *int64) *PromoCodeUsageUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUserID(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBonusAmount sets the "bonus_amount" field.
|
||||||
|
func (_u *PromoCodeUsageUpdate) SetBonusAmount(v float64) *PromoCodeUsageUpdate {
|
||||||
|
_u.mutation.ResetBonusAmount()
|
||||||
|
_u.mutation.SetBonusAmount(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableBonusAmount sets the "bonus_amount" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUsageUpdate) SetNillableBonusAmount(v *float64) *PromoCodeUsageUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetBonusAmount(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddBonusAmount adds value to the "bonus_amount" field.
|
||||||
|
func (_u *PromoCodeUsageUpdate) AddBonusAmount(v float64) *PromoCodeUsageUpdate {
|
||||||
|
_u.mutation.AddBonusAmount(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsedAt sets the "used_at" field.
|
||||||
|
func (_u *PromoCodeUsageUpdate) SetUsedAt(v time.Time) *PromoCodeUsageUpdate {
|
||||||
|
_u.mutation.SetUsedAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUsedAt sets the "used_at" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUsageUpdate) SetNillableUsedAt(v *time.Time) *PromoCodeUsageUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUsedAt(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPromoCode sets the "promo_code" edge to the PromoCode entity.
|
||||||
|
func (_u *PromoCodeUsageUpdate) SetPromoCode(v *PromoCode) *PromoCodeUsageUpdate {
|
||||||
|
return _u.SetPromoCodeID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUser sets the "user" edge to the User entity.
|
||||||
|
func (_u *PromoCodeUsageUpdate) SetUser(v *User) *PromoCodeUsageUpdate {
|
||||||
|
return _u.SetUserID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the PromoCodeUsageMutation object of the builder.
|
||||||
|
func (_u *PromoCodeUsageUpdate) Mutation() *PromoCodeUsageMutation {
|
||||||
|
return _u.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearPromoCode clears the "promo_code" edge to the PromoCode entity.
|
||||||
|
func (_u *PromoCodeUsageUpdate) ClearPromoCode() *PromoCodeUsageUpdate {
|
||||||
|
_u.mutation.ClearPromoCode()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearUser clears the "user" edge to the User entity.
|
||||||
|
func (_u *PromoCodeUsageUpdate) ClearUser() *PromoCodeUsageUpdate {
|
||||||
|
_u.mutation.ClearUser()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||||
|
func (_u *PromoCodeUsageUpdate) Save(ctx context.Context) (int, error) {
|
||||||
|
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_u *PromoCodeUsageUpdate) SaveX(ctx context.Context) int {
|
||||||
|
affected, err := _u.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return affected
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (_u *PromoCodeUsageUpdate) Exec(ctx context.Context) error {
|
||||||
|
_, err := _u.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_u *PromoCodeUsageUpdate) ExecX(ctx context.Context) {
|
||||||
|
if err := _u.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_u *PromoCodeUsageUpdate) check() error {
|
||||||
|
if _u.mutation.PromoCodeCleared() && len(_u.mutation.PromoCodeIDs()) > 0 {
|
||||||
|
return errors.New(`ent: clearing a required unique edge "PromoCodeUsage.promo_code"`)
|
||||||
|
}
|
||||||
|
if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 {
|
||||||
|
return errors.New(`ent: clearing a required unique edge "PromoCodeUsage.user"`)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_u *PromoCodeUsageUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
||||||
|
if err := _u.check(); err != nil {
|
||||||
|
return _node, err
|
||||||
|
}
|
||||||
|
_spec := sqlgraph.NewUpdateSpec(promocodeusage.Table, promocodeusage.Columns, sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64))
|
||||||
|
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.BonusAmount(); ok {
|
||||||
|
_spec.SetField(promocodeusage.FieldBonusAmount, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedBonusAmount(); ok {
|
||||||
|
_spec.AddField(promocodeusage.FieldBonusAmount, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.UsedAt(); ok {
|
||||||
|
_spec.SetField(promocodeusage.FieldUsedAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.PromoCodeCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: promocodeusage.PromoCodeTable,
|
||||||
|
Columns: []string{promocodeusage.PromoCodeColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.PromoCodeIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: promocodeusage.PromoCodeTable,
|
||||||
|
Columns: []string{promocodeusage.PromoCodeColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _u.mutation.UserCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: promocodeusage.UserTable,
|
||||||
|
Columns: []string{promocodeusage.UserColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.UserIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: promocodeusage.UserTable,
|
||||||
|
Columns: []string{promocodeusage.UserColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil {
|
||||||
|
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||||
|
err = &NotFoundError{promocodeusage.Label}
|
||||||
|
} else if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
_u.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeUsageUpdateOne is the builder for updating a single PromoCodeUsage entity.
|
||||||
|
type PromoCodeUsageUpdateOne struct {
|
||||||
|
config
|
||||||
|
fields []string
|
||||||
|
hooks []Hook
|
||||||
|
mutation *PromoCodeUsageMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPromoCodeID sets the "promo_code_id" field.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) SetPromoCodeID(v int64) *PromoCodeUsageUpdateOne {
|
||||||
|
_u.mutation.SetPromoCodeID(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillablePromoCodeID sets the "promo_code_id" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) SetNillablePromoCodeID(v *int64) *PromoCodeUsageUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetPromoCodeID(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUserID sets the "user_id" field.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) SetUserID(v int64) *PromoCodeUsageUpdateOne {
|
||||||
|
_u.mutation.SetUserID(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUserID sets the "user_id" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) SetNillableUserID(v *int64) *PromoCodeUsageUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUserID(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBonusAmount sets the "bonus_amount" field.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) SetBonusAmount(v float64) *PromoCodeUsageUpdateOne {
|
||||||
|
_u.mutation.ResetBonusAmount()
|
||||||
|
_u.mutation.SetBonusAmount(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableBonusAmount sets the "bonus_amount" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) SetNillableBonusAmount(v *float64) *PromoCodeUsageUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetBonusAmount(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddBonusAmount adds value to the "bonus_amount" field.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) AddBonusAmount(v float64) *PromoCodeUsageUpdateOne {
|
||||||
|
_u.mutation.AddBonusAmount(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsedAt sets the "used_at" field.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) SetUsedAt(v time.Time) *PromoCodeUsageUpdateOne {
|
||||||
|
_u.mutation.SetUsedAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUsedAt sets the "used_at" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) SetNillableUsedAt(v *time.Time) *PromoCodeUsageUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUsedAt(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPromoCode sets the "promo_code" edge to the PromoCode entity.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) SetPromoCode(v *PromoCode) *PromoCodeUsageUpdateOne {
|
||||||
|
return _u.SetPromoCodeID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUser sets the "user" edge to the User entity.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) SetUser(v *User) *PromoCodeUsageUpdateOne {
|
||||||
|
return _u.SetUserID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the PromoCodeUsageMutation object of the builder.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) Mutation() *PromoCodeUsageMutation {
|
||||||
|
return _u.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearPromoCode clears the "promo_code" edge to the PromoCode entity.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) ClearPromoCode() *PromoCodeUsageUpdateOne {
|
||||||
|
_u.mutation.ClearPromoCode()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearUser clears the "user" edge to the User entity.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) ClearUser() *PromoCodeUsageUpdateOne {
|
||||||
|
_u.mutation.ClearUser()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the PromoCodeUsageUpdate builder.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) Where(ps ...predicate.PromoCodeUsage) *PromoCodeUsageUpdateOne {
|
||||||
|
_u.mutation.Where(ps...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||||
|
// The default is selecting all fields defined in the entity schema.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) Select(field string, fields ...string) *PromoCodeUsageUpdateOne {
|
||||||
|
_u.fields = append([]string{field}, fields...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save executes the query and returns the updated PromoCodeUsage entity.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) Save(ctx context.Context) (*PromoCodeUsage, error) {
|
||||||
|
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) SaveX(ctx context.Context) *PromoCodeUsage {
|
||||||
|
node, err := _u.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query on the entity.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) Exec(ctx context.Context) error {
|
||||||
|
_, err := _u.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _u.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) check() error {
|
||||||
|
if _u.mutation.PromoCodeCleared() && len(_u.mutation.PromoCodeIDs()) > 0 {
|
||||||
|
return errors.New(`ent: clearing a required unique edge "PromoCodeUsage.promo_code"`)
|
||||||
|
}
|
||||||
|
if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 {
|
||||||
|
return errors.New(`ent: clearing a required unique edge "PromoCodeUsage.user"`)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) sqlSave(ctx context.Context) (_node *PromoCodeUsage, err error) {
|
||||||
|
if err := _u.check(); err != nil {
|
||||||
|
return _node, err
|
||||||
|
}
|
||||||
|
_spec := sqlgraph.NewUpdateSpec(promocodeusage.Table, promocodeusage.Columns, sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64))
|
||||||
|
id, ok := _u.mutation.ID()
|
||||||
|
if !ok {
|
||||||
|
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "PromoCodeUsage.id" for update`)}
|
||||||
|
}
|
||||||
|
_spec.Node.ID.Value = id
|
||||||
|
if fields := _u.fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, promocodeusage.FieldID)
|
||||||
|
for _, f := range fields {
|
||||||
|
if !promocodeusage.ValidColumn(f) {
|
||||||
|
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
if f != promocodeusage.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.BonusAmount(); ok {
|
||||||
|
_spec.SetField(promocodeusage.FieldBonusAmount, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedBonusAmount(); ok {
|
||||||
|
_spec.AddField(promocodeusage.FieldBonusAmount, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.UsedAt(); ok {
|
||||||
|
_spec.SetField(promocodeusage.FieldUsedAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.PromoCodeCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: promocodeusage.PromoCodeTable,
|
||||||
|
Columns: []string{promocodeusage.PromoCodeColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.PromoCodeIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: promocodeusage.PromoCodeTable,
|
||||||
|
Columns: []string{promocodeusage.PromoCodeColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _u.mutation.UserCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: promocodeusage.UserTable,
|
||||||
|
Columns: []string{promocodeusage.UserColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.UserIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: promocodeusage.UserTable,
|
||||||
|
Columns: []string{promocodeusage.UserColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
_node = &PromoCodeUsage{config: _u.config}
|
||||||
|
_spec.Assign = _node.assignValues
|
||||||
|
_spec.ScanValues = _node.scanValues
|
||||||
|
if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil {
|
||||||
|
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||||
|
err = &NotFoundError{promocodeusage.Label}
|
||||||
|
} else if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_u.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
@@ -9,6 +9,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
|
|
||||||
"entgo.io/ent"
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
"entgo.io/ent/dialect/sql"
|
"entgo.io/ent/dialect/sql"
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
"entgo.io/ent/schema/field"
|
"entgo.io/ent/schema/field"
|
||||||
@@ -25,6 +26,7 @@ type ProxyQuery struct {
|
|||||||
inters []Interceptor
|
inters []Interceptor
|
||||||
predicates []predicate.Proxy
|
predicates []predicate.Proxy
|
||||||
withAccounts *AccountQuery
|
withAccounts *AccountQuery
|
||||||
|
modifiers []func(*sql.Selector)
|
||||||
// intermediate query (i.e. traversal path).
|
// intermediate query (i.e. traversal path).
|
||||||
sql *sql.Selector
|
sql *sql.Selector
|
||||||
path func(context.Context) (*sql.Selector, error)
|
path func(context.Context) (*sql.Selector, error)
|
||||||
@@ -384,6 +386,9 @@ func (_q *ProxyQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Proxy,
|
|||||||
node.Edges.loadedTypes = loadedTypes
|
node.Edges.loadedTypes = loadedTypes
|
||||||
return node.assignValues(columns, values)
|
return node.assignValues(columns, values)
|
||||||
}
|
}
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
for i := range hooks {
|
for i := range hooks {
|
||||||
hooks[i](ctx, _spec)
|
hooks[i](ctx, _spec)
|
||||||
}
|
}
|
||||||
@@ -439,6 +444,9 @@ func (_q *ProxyQuery) loadAccounts(ctx context.Context, query *AccountQuery, nod
|
|||||||
|
|
||||||
func (_q *ProxyQuery) sqlCount(ctx context.Context) (int, error) {
|
func (_q *ProxyQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
_spec := _q.querySpec()
|
_spec := _q.querySpec()
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
_spec.Node.Columns = _q.ctx.Fields
|
_spec.Node.Columns = _q.ctx.Fields
|
||||||
if len(_q.ctx.Fields) > 0 {
|
if len(_q.ctx.Fields) > 0 {
|
||||||
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||||
@@ -501,6 +509,9 @@ func (_q *ProxyQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||||
selector.Distinct()
|
selector.Distinct()
|
||||||
}
|
}
|
||||||
|
for _, m := range _q.modifiers {
|
||||||
|
m(selector)
|
||||||
|
}
|
||||||
for _, p := range _q.predicates {
|
for _, p := range _q.predicates {
|
||||||
p(selector)
|
p(selector)
|
||||||
}
|
}
|
||||||
@@ -518,6 +529,32 @@ func (_q *ProxyQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
return selector
|
return selector
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||||
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||||
|
// either committed or rolled-back.
|
||||||
|
func (_q *ProxyQuery) ForUpdate(opts ...sql.LockOption) *ProxyQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForUpdate(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||||
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||||
|
// until your transaction commits.
|
||||||
|
func (_q *ProxyQuery) ForShare(opts ...sql.LockOption) *ProxyQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForShare(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
// ProxyGroupBy is the group-by builder for Proxy entities.
|
// ProxyGroupBy is the group-by builder for Proxy entities.
|
||||||
type ProxyGroupBy struct {
|
type ProxyGroupBy struct {
|
||||||
selector
|
selector
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
|
|
||||||
"entgo.io/ent"
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
"entgo.io/ent/dialect/sql"
|
"entgo.io/ent/dialect/sql"
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
"entgo.io/ent/schema/field"
|
"entgo.io/ent/schema/field"
|
||||||
@@ -26,6 +27,7 @@ type RedeemCodeQuery struct {
|
|||||||
predicates []predicate.RedeemCode
|
predicates []predicate.RedeemCode
|
||||||
withUser *UserQuery
|
withUser *UserQuery
|
||||||
withGroup *GroupQuery
|
withGroup *GroupQuery
|
||||||
|
modifiers []func(*sql.Selector)
|
||||||
// intermediate query (i.e. traversal path).
|
// intermediate query (i.e. traversal path).
|
||||||
sql *sql.Selector
|
sql *sql.Selector
|
||||||
path func(context.Context) (*sql.Selector, error)
|
path func(context.Context) (*sql.Selector, error)
|
||||||
@@ -420,6 +422,9 @@ func (_q *RedeemCodeQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*R
|
|||||||
node.Edges.loadedTypes = loadedTypes
|
node.Edges.loadedTypes = loadedTypes
|
||||||
return node.assignValues(columns, values)
|
return node.assignValues(columns, values)
|
||||||
}
|
}
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
for i := range hooks {
|
for i := range hooks {
|
||||||
hooks[i](ctx, _spec)
|
hooks[i](ctx, _spec)
|
||||||
}
|
}
|
||||||
@@ -511,6 +516,9 @@ func (_q *RedeemCodeQuery) loadGroup(ctx context.Context, query *GroupQuery, nod
|
|||||||
|
|
||||||
func (_q *RedeemCodeQuery) sqlCount(ctx context.Context) (int, error) {
|
func (_q *RedeemCodeQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
_spec := _q.querySpec()
|
_spec := _q.querySpec()
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
_spec.Node.Columns = _q.ctx.Fields
|
_spec.Node.Columns = _q.ctx.Fields
|
||||||
if len(_q.ctx.Fields) > 0 {
|
if len(_q.ctx.Fields) > 0 {
|
||||||
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||||
@@ -579,6 +587,9 @@ func (_q *RedeemCodeQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||||
selector.Distinct()
|
selector.Distinct()
|
||||||
}
|
}
|
||||||
|
for _, m := range _q.modifiers {
|
||||||
|
m(selector)
|
||||||
|
}
|
||||||
for _, p := range _q.predicates {
|
for _, p := range _q.predicates {
|
||||||
p(selector)
|
p(selector)
|
||||||
}
|
}
|
||||||
@@ -596,6 +607,32 @@ func (_q *RedeemCodeQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
return selector
|
return selector
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||||
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||||
|
// either committed or rolled-back.
|
||||||
|
func (_q *RedeemCodeQuery) ForUpdate(opts ...sql.LockOption) *RedeemCodeQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForUpdate(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||||
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||||
|
// until your transaction commits.
|
||||||
|
func (_q *RedeemCodeQuery) ForShare(opts ...sql.LockOption) *RedeemCodeQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForShare(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
// RedeemCodeGroupBy is the group-by builder for RedeemCode entities.
|
// RedeemCodeGroupBy is the group-by builder for RedeemCode entities.
|
||||||
type RedeemCodeGroupBy struct {
|
type RedeemCodeGroupBy struct {
|
||||||
selector
|
selector
|
||||||
|
|||||||
@@ -9,6 +9,8 @@ import (
|
|||||||
"github.com/Wei-Shaw/sub2api/ent/accountgroup"
|
"github.com/Wei-Shaw/sub2api/ent/accountgroup"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/group"
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/proxy"
|
"github.com/Wei-Shaw/sub2api/ent/proxy"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/redeemcode"
|
"github.com/Wei-Shaw/sub2api/ent/redeemcode"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/schema"
|
"github.com/Wei-Shaw/sub2api/ent/schema"
|
||||||
@@ -274,6 +276,60 @@ func init() {
|
|||||||
groupDescClaudeCodeOnly := groupFields[14].Descriptor()
|
groupDescClaudeCodeOnly := groupFields[14].Descriptor()
|
||||||
// group.DefaultClaudeCodeOnly holds the default value on creation for the claude_code_only field.
|
// group.DefaultClaudeCodeOnly holds the default value on creation for the claude_code_only field.
|
||||||
group.DefaultClaudeCodeOnly = groupDescClaudeCodeOnly.Default.(bool)
|
group.DefaultClaudeCodeOnly = groupDescClaudeCodeOnly.Default.(bool)
|
||||||
|
promocodeFields := schema.PromoCode{}.Fields()
|
||||||
|
_ = promocodeFields
|
||||||
|
// promocodeDescCode is the schema descriptor for code field.
|
||||||
|
promocodeDescCode := promocodeFields[0].Descriptor()
|
||||||
|
// promocode.CodeValidator is a validator for the "code" field. It is called by the builders before save.
|
||||||
|
promocode.CodeValidator = func() func(string) error {
|
||||||
|
validators := promocodeDescCode.Validators
|
||||||
|
fns := [...]func(string) error{
|
||||||
|
validators[0].(func(string) error),
|
||||||
|
validators[1].(func(string) error),
|
||||||
|
}
|
||||||
|
return func(code string) error {
|
||||||
|
for _, fn := range fns {
|
||||||
|
if err := fn(code); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// promocodeDescBonusAmount is the schema descriptor for bonus_amount field.
|
||||||
|
promocodeDescBonusAmount := promocodeFields[1].Descriptor()
|
||||||
|
// promocode.DefaultBonusAmount holds the default value on creation for the bonus_amount field.
|
||||||
|
promocode.DefaultBonusAmount = promocodeDescBonusAmount.Default.(float64)
|
||||||
|
// promocodeDescMaxUses is the schema descriptor for max_uses field.
|
||||||
|
promocodeDescMaxUses := promocodeFields[2].Descriptor()
|
||||||
|
// promocode.DefaultMaxUses holds the default value on creation for the max_uses field.
|
||||||
|
promocode.DefaultMaxUses = promocodeDescMaxUses.Default.(int)
|
||||||
|
// promocodeDescUsedCount is the schema descriptor for used_count field.
|
||||||
|
promocodeDescUsedCount := promocodeFields[3].Descriptor()
|
||||||
|
// promocode.DefaultUsedCount holds the default value on creation for the used_count field.
|
||||||
|
promocode.DefaultUsedCount = promocodeDescUsedCount.Default.(int)
|
||||||
|
// promocodeDescStatus is the schema descriptor for status field.
|
||||||
|
promocodeDescStatus := promocodeFields[4].Descriptor()
|
||||||
|
// promocode.DefaultStatus holds the default value on creation for the status field.
|
||||||
|
promocode.DefaultStatus = promocodeDescStatus.Default.(string)
|
||||||
|
// promocode.StatusValidator is a validator for the "status" field. It is called by the builders before save.
|
||||||
|
promocode.StatusValidator = promocodeDescStatus.Validators[0].(func(string) error)
|
||||||
|
// promocodeDescCreatedAt is the schema descriptor for created_at field.
|
||||||
|
promocodeDescCreatedAt := promocodeFields[7].Descriptor()
|
||||||
|
// promocode.DefaultCreatedAt holds the default value on creation for the created_at field.
|
||||||
|
promocode.DefaultCreatedAt = promocodeDescCreatedAt.Default.(func() time.Time)
|
||||||
|
// promocodeDescUpdatedAt is the schema descriptor for updated_at field.
|
||||||
|
promocodeDescUpdatedAt := promocodeFields[8].Descriptor()
|
||||||
|
// promocode.DefaultUpdatedAt holds the default value on creation for the updated_at field.
|
||||||
|
promocode.DefaultUpdatedAt = promocodeDescUpdatedAt.Default.(func() time.Time)
|
||||||
|
// promocode.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.
|
||||||
|
promocode.UpdateDefaultUpdatedAt = promocodeDescUpdatedAt.UpdateDefault.(func() time.Time)
|
||||||
|
promocodeusageFields := schema.PromoCodeUsage{}.Fields()
|
||||||
|
_ = promocodeusageFields
|
||||||
|
// promocodeusageDescUsedAt is the schema descriptor for used_at field.
|
||||||
|
promocodeusageDescUsedAt := promocodeusageFields[3].Descriptor()
|
||||||
|
// promocodeusage.DefaultUsedAt holds the default value on creation for the used_at field.
|
||||||
|
promocodeusage.DefaultUsedAt = promocodeusageDescUsedAt.Default.(func() time.Time)
|
||||||
proxyMixin := schema.Proxy{}.Mixin()
|
proxyMixin := schema.Proxy{}.Mixin()
|
||||||
proxyMixinHooks1 := proxyMixin[1].Hooks()
|
proxyMixinHooks1 := proxyMixin[1].Hooks()
|
||||||
proxy.Hooks[0] = proxyMixinHooks1[0]
|
proxy.Hooks[0] = proxyMixinHooks1[0]
|
||||||
@@ -533,16 +589,20 @@ func init() {
|
|||||||
usagelogDescUserAgent := usagelogFields[24].Descriptor()
|
usagelogDescUserAgent := usagelogFields[24].Descriptor()
|
||||||
// usagelog.UserAgentValidator is a validator for the "user_agent" field. It is called by the builders before save.
|
// usagelog.UserAgentValidator is a validator for the "user_agent" field. It is called by the builders before save.
|
||||||
usagelog.UserAgentValidator = usagelogDescUserAgent.Validators[0].(func(string) error)
|
usagelog.UserAgentValidator = usagelogDescUserAgent.Validators[0].(func(string) error)
|
||||||
|
// usagelogDescIPAddress is the schema descriptor for ip_address field.
|
||||||
|
usagelogDescIPAddress := usagelogFields[25].Descriptor()
|
||||||
|
// usagelog.IPAddressValidator is a validator for the "ip_address" field. It is called by the builders before save.
|
||||||
|
usagelog.IPAddressValidator = usagelogDescIPAddress.Validators[0].(func(string) error)
|
||||||
// usagelogDescImageCount is the schema descriptor for image_count field.
|
// usagelogDescImageCount is the schema descriptor for image_count field.
|
||||||
usagelogDescImageCount := usagelogFields[25].Descriptor()
|
usagelogDescImageCount := usagelogFields[26].Descriptor()
|
||||||
// usagelog.DefaultImageCount holds the default value on creation for the image_count field.
|
// usagelog.DefaultImageCount holds the default value on creation for the image_count field.
|
||||||
usagelog.DefaultImageCount = usagelogDescImageCount.Default.(int)
|
usagelog.DefaultImageCount = usagelogDescImageCount.Default.(int)
|
||||||
// usagelogDescImageSize is the schema descriptor for image_size field.
|
// usagelogDescImageSize is the schema descriptor for image_size field.
|
||||||
usagelogDescImageSize := usagelogFields[26].Descriptor()
|
usagelogDescImageSize := usagelogFields[27].Descriptor()
|
||||||
// usagelog.ImageSizeValidator is a validator for the "image_size" field. It is called by the builders before save.
|
// usagelog.ImageSizeValidator is a validator for the "image_size" field. It is called by the builders before save.
|
||||||
usagelog.ImageSizeValidator = usagelogDescImageSize.Validators[0].(func(string) error)
|
usagelog.ImageSizeValidator = usagelogDescImageSize.Validators[0].(func(string) error)
|
||||||
// usagelogDescCreatedAt is the schema descriptor for created_at field.
|
// usagelogDescCreatedAt is the schema descriptor for created_at field.
|
||||||
usagelogDescCreatedAt := usagelogFields[27].Descriptor()
|
usagelogDescCreatedAt := usagelogFields[28].Descriptor()
|
||||||
// usagelog.DefaultCreatedAt holds the default value on creation for the created_at field.
|
// usagelog.DefaultCreatedAt holds the default value on creation for the created_at field.
|
||||||
usagelog.DefaultCreatedAt = usagelogDescCreatedAt.Default.(func() time.Time)
|
usagelog.DefaultCreatedAt = usagelogDescCreatedAt.Default.(func() time.Time)
|
||||||
userMixin := schema.User{}.Mixin()
|
userMixin := schema.User{}.Mixin()
|
||||||
|
|||||||
@@ -46,6 +46,12 @@ func (APIKey) Fields() []ent.Field {
|
|||||||
field.String("status").
|
field.String("status").
|
||||||
MaxLen(20).
|
MaxLen(20).
|
||||||
Default(service.StatusActive),
|
Default(service.StatusActive),
|
||||||
|
field.JSON("ip_whitelist", []string{}).
|
||||||
|
Optional().
|
||||||
|
Comment("Allowed IPs/CIDRs, e.g. [\"192.168.1.100\", \"10.0.0.0/8\"]"),
|
||||||
|
field.JSON("ip_blacklist", []string{}).
|
||||||
|
Optional().
|
||||||
|
Comment("Blocked IPs/CIDRs"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
87
backend/ent/schema/promo_code.go
Normal file
87
backend/ent/schema/promo_code.go
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
package schema
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
|
"entgo.io/ent/dialect/entsql"
|
||||||
|
"entgo.io/ent/schema"
|
||||||
|
"entgo.io/ent/schema/edge"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"entgo.io/ent/schema/index"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PromoCode holds the schema definition for the PromoCode entity.
|
||||||
|
//
|
||||||
|
// 注册优惠码:用户注册时使用,可获得赠送余额
|
||||||
|
// 与 RedeemCode 不同,PromoCode 支持多次使用(有使用次数限制)
|
||||||
|
//
|
||||||
|
// 删除策略:硬删除
|
||||||
|
type PromoCode struct {
|
||||||
|
ent.Schema
|
||||||
|
}
|
||||||
|
|
||||||
|
func (PromoCode) Annotations() []schema.Annotation {
|
||||||
|
return []schema.Annotation{
|
||||||
|
entsql.Annotation{Table: "promo_codes"},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (PromoCode) Fields() []ent.Field {
|
||||||
|
return []ent.Field{
|
||||||
|
field.String("code").
|
||||||
|
MaxLen(32).
|
||||||
|
NotEmpty().
|
||||||
|
Unique().
|
||||||
|
Comment("优惠码"),
|
||||||
|
field.Float("bonus_amount").
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}).
|
||||||
|
Default(0).
|
||||||
|
Comment("赠送余额金额"),
|
||||||
|
field.Int("max_uses").
|
||||||
|
Default(0).
|
||||||
|
Comment("最大使用次数,0表示无限制"),
|
||||||
|
field.Int("used_count").
|
||||||
|
Default(0).
|
||||||
|
Comment("已使用次数"),
|
||||||
|
field.String("status").
|
||||||
|
MaxLen(20).
|
||||||
|
Default(service.PromoCodeStatusActive).
|
||||||
|
Comment("状态: active, disabled"),
|
||||||
|
field.Time("expires_at").
|
||||||
|
Optional().
|
||||||
|
Nillable().
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "timestamptz"}).
|
||||||
|
Comment("过期时间,null表示永不过期"),
|
||||||
|
field.String("notes").
|
||||||
|
Optional().
|
||||||
|
Nillable().
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "text"}).
|
||||||
|
Comment("备注"),
|
||||||
|
field.Time("created_at").
|
||||||
|
Immutable().
|
||||||
|
Default(time.Now).
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "timestamptz"}),
|
||||||
|
field.Time("updated_at").
|
||||||
|
Default(time.Now).
|
||||||
|
UpdateDefault(time.Now).
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "timestamptz"}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (PromoCode) Edges() []ent.Edge {
|
||||||
|
return []ent.Edge{
|
||||||
|
edge.To("usage_records", PromoCodeUsage.Type),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (PromoCode) Indexes() []ent.Index {
|
||||||
|
return []ent.Index{
|
||||||
|
// code 字段已在 Fields() 中声明 Unique(),无需重复索引
|
||||||
|
index.Fields("status"),
|
||||||
|
index.Fields("expires_at"),
|
||||||
|
}
|
||||||
|
}
|
||||||
66
backend/ent/schema/promo_code_usage.go
Normal file
66
backend/ent/schema/promo_code_usage.go
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
package schema
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
|
"entgo.io/ent/dialect/entsql"
|
||||||
|
"entgo.io/ent/schema"
|
||||||
|
"entgo.io/ent/schema/edge"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"entgo.io/ent/schema/index"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PromoCodeUsage holds the schema definition for the PromoCodeUsage entity.
|
||||||
|
//
|
||||||
|
// 优惠码使用记录:记录每个用户使用优惠码的情况
|
||||||
|
type PromoCodeUsage struct {
|
||||||
|
ent.Schema
|
||||||
|
}
|
||||||
|
|
||||||
|
func (PromoCodeUsage) Annotations() []schema.Annotation {
|
||||||
|
return []schema.Annotation{
|
||||||
|
entsql.Annotation{Table: "promo_code_usages"},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (PromoCodeUsage) Fields() []ent.Field {
|
||||||
|
return []ent.Field{
|
||||||
|
field.Int64("promo_code_id").
|
||||||
|
Comment("优惠码ID"),
|
||||||
|
field.Int64("user_id").
|
||||||
|
Comment("使用用户ID"),
|
||||||
|
field.Float("bonus_amount").
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}).
|
||||||
|
Comment("实际赠送金额"),
|
||||||
|
field.Time("used_at").
|
||||||
|
Default(time.Now).
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "timestamptz"}).
|
||||||
|
Comment("使用时间"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (PromoCodeUsage) Edges() []ent.Edge {
|
||||||
|
return []ent.Edge{
|
||||||
|
edge.From("promo_code", PromoCode.Type).
|
||||||
|
Ref("usage_records").
|
||||||
|
Field("promo_code_id").
|
||||||
|
Required().
|
||||||
|
Unique(),
|
||||||
|
edge.From("user", User.Type).
|
||||||
|
Ref("promo_code_usages").
|
||||||
|
Field("user_id").
|
||||||
|
Required().
|
||||||
|
Unique(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (PromoCodeUsage) Indexes() []ent.Index {
|
||||||
|
return []ent.Index{
|
||||||
|
index.Fields("promo_code_id"),
|
||||||
|
index.Fields("user_id"),
|
||||||
|
// 每个用户每个优惠码只能使用一次
|
||||||
|
index.Fields("promo_code_id", "user_id").Unique(),
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -100,6 +100,10 @@ func (UsageLog) Fields() []ent.Field {
|
|||||||
MaxLen(512).
|
MaxLen(512).
|
||||||
Optional().
|
Optional().
|
||||||
Nillable(),
|
Nillable(),
|
||||||
|
field.String("ip_address").
|
||||||
|
MaxLen(45). // 支持 IPv6
|
||||||
|
Optional().
|
||||||
|
Nillable(),
|
||||||
|
|
||||||
// 图片生成字段(仅 gemini-3-pro-image 等图片模型使用)
|
// 图片生成字段(仅 gemini-3-pro-image 等图片模型使用)
|
||||||
field.Int("image_count").
|
field.Int("image_count").
|
||||||
|
|||||||
@@ -74,6 +74,7 @@ func (User) Edges() []ent.Edge {
|
|||||||
Through("user_allowed_groups", UserAllowedGroup.Type),
|
Through("user_allowed_groups", UserAllowedGroup.Type),
|
||||||
edge.To("usage_logs", UsageLog.Type),
|
edge.To("usage_logs", UsageLog.Type),
|
||||||
edge.To("attribute_values", UserAttributeValue.Type),
|
edge.To("attribute_values", UserAttributeValue.Type),
|
||||||
|
edge.To("promo_code_usages", PromoCodeUsage.Type),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
|
|
||||||
"entgo.io/ent"
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
"entgo.io/ent/dialect/sql"
|
"entgo.io/ent/dialect/sql"
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
"entgo.io/ent/schema/field"
|
"entgo.io/ent/schema/field"
|
||||||
@@ -22,6 +23,7 @@ type SettingQuery struct {
|
|||||||
order []setting.OrderOption
|
order []setting.OrderOption
|
||||||
inters []Interceptor
|
inters []Interceptor
|
||||||
predicates []predicate.Setting
|
predicates []predicate.Setting
|
||||||
|
modifiers []func(*sql.Selector)
|
||||||
// intermediate query (i.e. traversal path).
|
// intermediate query (i.e. traversal path).
|
||||||
sql *sql.Selector
|
sql *sql.Selector
|
||||||
path func(context.Context) (*sql.Selector, error)
|
path func(context.Context) (*sql.Selector, error)
|
||||||
@@ -343,6 +345,9 @@ func (_q *SettingQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Sett
|
|||||||
nodes = append(nodes, node)
|
nodes = append(nodes, node)
|
||||||
return node.assignValues(columns, values)
|
return node.assignValues(columns, values)
|
||||||
}
|
}
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
for i := range hooks {
|
for i := range hooks {
|
||||||
hooks[i](ctx, _spec)
|
hooks[i](ctx, _spec)
|
||||||
}
|
}
|
||||||
@@ -357,6 +362,9 @@ func (_q *SettingQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Sett
|
|||||||
|
|
||||||
func (_q *SettingQuery) sqlCount(ctx context.Context) (int, error) {
|
func (_q *SettingQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
_spec := _q.querySpec()
|
_spec := _q.querySpec()
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
_spec.Node.Columns = _q.ctx.Fields
|
_spec.Node.Columns = _q.ctx.Fields
|
||||||
if len(_q.ctx.Fields) > 0 {
|
if len(_q.ctx.Fields) > 0 {
|
||||||
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||||
@@ -419,6 +427,9 @@ func (_q *SettingQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||||
selector.Distinct()
|
selector.Distinct()
|
||||||
}
|
}
|
||||||
|
for _, m := range _q.modifiers {
|
||||||
|
m(selector)
|
||||||
|
}
|
||||||
for _, p := range _q.predicates {
|
for _, p := range _q.predicates {
|
||||||
p(selector)
|
p(selector)
|
||||||
}
|
}
|
||||||
@@ -436,6 +447,32 @@ func (_q *SettingQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
return selector
|
return selector
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||||
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||||
|
// either committed or rolled-back.
|
||||||
|
func (_q *SettingQuery) ForUpdate(opts ...sql.LockOption) *SettingQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForUpdate(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||||
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||||
|
// until your transaction commits.
|
||||||
|
func (_q *SettingQuery) ForShare(opts ...sql.LockOption) *SettingQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForShare(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
// SettingGroupBy is the group-by builder for Setting entities.
|
// SettingGroupBy is the group-by builder for Setting entities.
|
||||||
type SettingGroupBy struct {
|
type SettingGroupBy struct {
|
||||||
selector
|
selector
|
||||||
|
|||||||
@@ -22,6 +22,10 @@ type Tx struct {
|
|||||||
AccountGroup *AccountGroupClient
|
AccountGroup *AccountGroupClient
|
||||||
// Group is the client for interacting with the Group builders.
|
// Group is the client for interacting with the Group builders.
|
||||||
Group *GroupClient
|
Group *GroupClient
|
||||||
|
// PromoCode is the client for interacting with the PromoCode builders.
|
||||||
|
PromoCode *PromoCodeClient
|
||||||
|
// PromoCodeUsage is the client for interacting with the PromoCodeUsage builders.
|
||||||
|
PromoCodeUsage *PromoCodeUsageClient
|
||||||
// Proxy is the client for interacting with the Proxy builders.
|
// Proxy is the client for interacting with the Proxy builders.
|
||||||
Proxy *ProxyClient
|
Proxy *ProxyClient
|
||||||
// RedeemCode is the client for interacting with the RedeemCode builders.
|
// RedeemCode is the client for interacting with the RedeemCode builders.
|
||||||
@@ -175,6 +179,8 @@ func (tx *Tx) init() {
|
|||||||
tx.Account = NewAccountClient(tx.config)
|
tx.Account = NewAccountClient(tx.config)
|
||||||
tx.AccountGroup = NewAccountGroupClient(tx.config)
|
tx.AccountGroup = NewAccountGroupClient(tx.config)
|
||||||
tx.Group = NewGroupClient(tx.config)
|
tx.Group = NewGroupClient(tx.config)
|
||||||
|
tx.PromoCode = NewPromoCodeClient(tx.config)
|
||||||
|
tx.PromoCodeUsage = NewPromoCodeUsageClient(tx.config)
|
||||||
tx.Proxy = NewProxyClient(tx.config)
|
tx.Proxy = NewProxyClient(tx.config)
|
||||||
tx.RedeemCode = NewRedeemCodeClient(tx.config)
|
tx.RedeemCode = NewRedeemCodeClient(tx.config)
|
||||||
tx.Setting = NewSettingClient(tx.config)
|
tx.Setting = NewSettingClient(tx.config)
|
||||||
|
|||||||
@@ -72,6 +72,8 @@ type UsageLog struct {
|
|||||||
FirstTokenMs *int `json:"first_token_ms,omitempty"`
|
FirstTokenMs *int `json:"first_token_ms,omitempty"`
|
||||||
// UserAgent holds the value of the "user_agent" field.
|
// UserAgent holds the value of the "user_agent" field.
|
||||||
UserAgent *string `json:"user_agent,omitempty"`
|
UserAgent *string `json:"user_agent,omitempty"`
|
||||||
|
// IPAddress holds the value of the "ip_address" field.
|
||||||
|
IPAddress *string `json:"ip_address,omitempty"`
|
||||||
// ImageCount holds the value of the "image_count" field.
|
// ImageCount holds the value of the "image_count" field.
|
||||||
ImageCount int `json:"image_count,omitempty"`
|
ImageCount int `json:"image_count,omitempty"`
|
||||||
// ImageSize holds the value of the "image_size" field.
|
// ImageSize holds the value of the "image_size" field.
|
||||||
@@ -167,7 +169,7 @@ func (*UsageLog) scanValues(columns []string) ([]any, error) {
|
|||||||
values[i] = new(sql.NullFloat64)
|
values[i] = new(sql.NullFloat64)
|
||||||
case usagelog.FieldID, usagelog.FieldUserID, usagelog.FieldAPIKeyID, usagelog.FieldAccountID, usagelog.FieldGroupID, usagelog.FieldSubscriptionID, usagelog.FieldInputTokens, usagelog.FieldOutputTokens, usagelog.FieldCacheCreationTokens, usagelog.FieldCacheReadTokens, usagelog.FieldCacheCreation5mTokens, usagelog.FieldCacheCreation1hTokens, usagelog.FieldBillingType, usagelog.FieldDurationMs, usagelog.FieldFirstTokenMs, usagelog.FieldImageCount:
|
case usagelog.FieldID, usagelog.FieldUserID, usagelog.FieldAPIKeyID, usagelog.FieldAccountID, usagelog.FieldGroupID, usagelog.FieldSubscriptionID, usagelog.FieldInputTokens, usagelog.FieldOutputTokens, usagelog.FieldCacheCreationTokens, usagelog.FieldCacheReadTokens, usagelog.FieldCacheCreation5mTokens, usagelog.FieldCacheCreation1hTokens, usagelog.FieldBillingType, usagelog.FieldDurationMs, usagelog.FieldFirstTokenMs, usagelog.FieldImageCount:
|
||||||
values[i] = new(sql.NullInt64)
|
values[i] = new(sql.NullInt64)
|
||||||
case usagelog.FieldRequestID, usagelog.FieldModel, usagelog.FieldUserAgent, usagelog.FieldImageSize:
|
case usagelog.FieldRequestID, usagelog.FieldModel, usagelog.FieldUserAgent, usagelog.FieldIPAddress, usagelog.FieldImageSize:
|
||||||
values[i] = new(sql.NullString)
|
values[i] = new(sql.NullString)
|
||||||
case usagelog.FieldCreatedAt:
|
case usagelog.FieldCreatedAt:
|
||||||
values[i] = new(sql.NullTime)
|
values[i] = new(sql.NullTime)
|
||||||
@@ -347,6 +349,13 @@ func (_m *UsageLog) assignValues(columns []string, values []any) error {
|
|||||||
_m.UserAgent = new(string)
|
_m.UserAgent = new(string)
|
||||||
*_m.UserAgent = value.String
|
*_m.UserAgent = value.String
|
||||||
}
|
}
|
||||||
|
case usagelog.FieldIPAddress:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field ip_address", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.IPAddress = new(string)
|
||||||
|
*_m.IPAddress = value.String
|
||||||
|
}
|
||||||
case usagelog.FieldImageCount:
|
case usagelog.FieldImageCount:
|
||||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
return fmt.Errorf("unexpected type %T for field image_count", values[i])
|
return fmt.Errorf("unexpected type %T for field image_count", values[i])
|
||||||
@@ -512,6 +521,11 @@ func (_m *UsageLog) String() string {
|
|||||||
builder.WriteString(*v)
|
builder.WriteString(*v)
|
||||||
}
|
}
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
|
if v := _m.IPAddress; v != nil {
|
||||||
|
builder.WriteString("ip_address=")
|
||||||
|
builder.WriteString(*v)
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
builder.WriteString("image_count=")
|
builder.WriteString("image_count=")
|
||||||
builder.WriteString(fmt.Sprintf("%v", _m.ImageCount))
|
builder.WriteString(fmt.Sprintf("%v", _m.ImageCount))
|
||||||
builder.WriteString(", ")
|
builder.WriteString(", ")
|
||||||
|
|||||||
@@ -64,6 +64,8 @@ const (
|
|||||||
FieldFirstTokenMs = "first_token_ms"
|
FieldFirstTokenMs = "first_token_ms"
|
||||||
// FieldUserAgent holds the string denoting the user_agent field in the database.
|
// FieldUserAgent holds the string denoting the user_agent field in the database.
|
||||||
FieldUserAgent = "user_agent"
|
FieldUserAgent = "user_agent"
|
||||||
|
// FieldIPAddress holds the string denoting the ip_address field in the database.
|
||||||
|
FieldIPAddress = "ip_address"
|
||||||
// FieldImageCount holds the string denoting the image_count field in the database.
|
// FieldImageCount holds the string denoting the image_count field in the database.
|
||||||
FieldImageCount = "image_count"
|
FieldImageCount = "image_count"
|
||||||
// FieldImageSize holds the string denoting the image_size field in the database.
|
// FieldImageSize holds the string denoting the image_size field in the database.
|
||||||
@@ -147,6 +149,7 @@ var Columns = []string{
|
|||||||
FieldDurationMs,
|
FieldDurationMs,
|
||||||
FieldFirstTokenMs,
|
FieldFirstTokenMs,
|
||||||
FieldUserAgent,
|
FieldUserAgent,
|
||||||
|
FieldIPAddress,
|
||||||
FieldImageCount,
|
FieldImageCount,
|
||||||
FieldImageSize,
|
FieldImageSize,
|
||||||
FieldCreatedAt,
|
FieldCreatedAt,
|
||||||
@@ -199,6 +202,8 @@ var (
|
|||||||
DefaultStream bool
|
DefaultStream bool
|
||||||
// UserAgentValidator is a validator for the "user_agent" field. It is called by the builders before save.
|
// UserAgentValidator is a validator for the "user_agent" field. It is called by the builders before save.
|
||||||
UserAgentValidator func(string) error
|
UserAgentValidator func(string) error
|
||||||
|
// IPAddressValidator is a validator for the "ip_address" field. It is called by the builders before save.
|
||||||
|
IPAddressValidator func(string) error
|
||||||
// DefaultImageCount holds the default value on creation for the "image_count" field.
|
// DefaultImageCount holds the default value on creation for the "image_count" field.
|
||||||
DefaultImageCount int
|
DefaultImageCount int
|
||||||
// ImageSizeValidator is a validator for the "image_size" field. It is called by the builders before save.
|
// ImageSizeValidator is a validator for the "image_size" field. It is called by the builders before save.
|
||||||
@@ -340,6 +345,11 @@ func ByUserAgent(opts ...sql.OrderTermOption) OrderOption {
|
|||||||
return sql.OrderByField(FieldUserAgent, opts...).ToFunc()
|
return sql.OrderByField(FieldUserAgent, opts...).ToFunc()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ByIPAddress orders the results by the ip_address field.
|
||||||
|
func ByIPAddress(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldIPAddress, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
// ByImageCount orders the results by the image_count field.
|
// ByImageCount orders the results by the image_count field.
|
||||||
func ByImageCount(opts ...sql.OrderTermOption) OrderOption {
|
func ByImageCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
return sql.OrderByField(FieldImageCount, opts...).ToFunc()
|
return sql.OrderByField(FieldImageCount, opts...).ToFunc()
|
||||||
|
|||||||
@@ -180,6 +180,11 @@ func UserAgent(v string) predicate.UsageLog {
|
|||||||
return predicate.UsageLog(sql.FieldEQ(FieldUserAgent, v))
|
return predicate.UsageLog(sql.FieldEQ(FieldUserAgent, v))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IPAddress applies equality check predicate on the "ip_address" field. It's identical to IPAddressEQ.
|
||||||
|
func IPAddress(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldEQ(FieldIPAddress, v))
|
||||||
|
}
|
||||||
|
|
||||||
// ImageCount applies equality check predicate on the "image_count" field. It's identical to ImageCountEQ.
|
// ImageCount applies equality check predicate on the "image_count" field. It's identical to ImageCountEQ.
|
||||||
func ImageCount(v int) predicate.UsageLog {
|
func ImageCount(v int) predicate.UsageLog {
|
||||||
return predicate.UsageLog(sql.FieldEQ(FieldImageCount, v))
|
return predicate.UsageLog(sql.FieldEQ(FieldImageCount, v))
|
||||||
@@ -1190,6 +1195,81 @@ func UserAgentContainsFold(v string) predicate.UsageLog {
|
|||||||
return predicate.UsageLog(sql.FieldContainsFold(FieldUserAgent, v))
|
return predicate.UsageLog(sql.FieldContainsFold(FieldUserAgent, v))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IPAddressEQ applies the EQ predicate on the "ip_address" field.
|
||||||
|
func IPAddressEQ(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldEQ(FieldIPAddress, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPAddressNEQ applies the NEQ predicate on the "ip_address" field.
|
||||||
|
func IPAddressNEQ(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldNEQ(FieldIPAddress, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPAddressIn applies the In predicate on the "ip_address" field.
|
||||||
|
func IPAddressIn(vs ...string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldIn(FieldIPAddress, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPAddressNotIn applies the NotIn predicate on the "ip_address" field.
|
||||||
|
func IPAddressNotIn(vs ...string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldNotIn(FieldIPAddress, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPAddressGT applies the GT predicate on the "ip_address" field.
|
||||||
|
func IPAddressGT(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldGT(FieldIPAddress, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPAddressGTE applies the GTE predicate on the "ip_address" field.
|
||||||
|
func IPAddressGTE(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldGTE(FieldIPAddress, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPAddressLT applies the LT predicate on the "ip_address" field.
|
||||||
|
func IPAddressLT(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldLT(FieldIPAddress, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPAddressLTE applies the LTE predicate on the "ip_address" field.
|
||||||
|
func IPAddressLTE(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldLTE(FieldIPAddress, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPAddressContains applies the Contains predicate on the "ip_address" field.
|
||||||
|
func IPAddressContains(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldContains(FieldIPAddress, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPAddressHasPrefix applies the HasPrefix predicate on the "ip_address" field.
|
||||||
|
func IPAddressHasPrefix(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldHasPrefix(FieldIPAddress, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPAddressHasSuffix applies the HasSuffix predicate on the "ip_address" field.
|
||||||
|
func IPAddressHasSuffix(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldHasSuffix(FieldIPAddress, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPAddressIsNil applies the IsNil predicate on the "ip_address" field.
|
||||||
|
func IPAddressIsNil() predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldIsNull(FieldIPAddress))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPAddressNotNil applies the NotNil predicate on the "ip_address" field.
|
||||||
|
func IPAddressNotNil() predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldNotNull(FieldIPAddress))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPAddressEqualFold applies the EqualFold predicate on the "ip_address" field.
|
||||||
|
func IPAddressEqualFold(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldEqualFold(FieldIPAddress, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPAddressContainsFold applies the ContainsFold predicate on the "ip_address" field.
|
||||||
|
func IPAddressContainsFold(v string) predicate.UsageLog {
|
||||||
|
return predicate.UsageLog(sql.FieldContainsFold(FieldIPAddress, v))
|
||||||
|
}
|
||||||
|
|
||||||
// ImageCountEQ applies the EQ predicate on the "image_count" field.
|
// ImageCountEQ applies the EQ predicate on the "image_count" field.
|
||||||
func ImageCountEQ(v int) predicate.UsageLog {
|
func ImageCountEQ(v int) predicate.UsageLog {
|
||||||
return predicate.UsageLog(sql.FieldEQ(FieldImageCount, v))
|
return predicate.UsageLog(sql.FieldEQ(FieldImageCount, v))
|
||||||
|
|||||||
@@ -337,6 +337,20 @@ func (_c *UsageLogCreate) SetNillableUserAgent(v *string) *UsageLogCreate {
|
|||||||
return _c
|
return _c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetIPAddress sets the "ip_address" field.
|
||||||
|
func (_c *UsageLogCreate) SetIPAddress(v string) *UsageLogCreate {
|
||||||
|
_c.mutation.SetIPAddress(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableIPAddress sets the "ip_address" field if the given value is not nil.
|
||||||
|
func (_c *UsageLogCreate) SetNillableIPAddress(v *string) *UsageLogCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetIPAddress(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
// SetImageCount sets the "image_count" field.
|
// SetImageCount sets the "image_count" field.
|
||||||
func (_c *UsageLogCreate) SetImageCount(v int) *UsageLogCreate {
|
func (_c *UsageLogCreate) SetImageCount(v int) *UsageLogCreate {
|
||||||
_c.mutation.SetImageCount(v)
|
_c.mutation.SetImageCount(v)
|
||||||
@@ -586,6 +600,11 @@ func (_c *UsageLogCreate) check() error {
|
|||||||
return &ValidationError{Name: "user_agent", err: fmt.Errorf(`ent: validator failed for field "UsageLog.user_agent": %w`, err)}
|
return &ValidationError{Name: "user_agent", err: fmt.Errorf(`ent: validator failed for field "UsageLog.user_agent": %w`, err)}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if v, ok := _c.mutation.IPAddress(); ok {
|
||||||
|
if err := usagelog.IPAddressValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "ip_address", err: fmt.Errorf(`ent: validator failed for field "UsageLog.ip_address": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
if _, ok := _c.mutation.ImageCount(); !ok {
|
if _, ok := _c.mutation.ImageCount(); !ok {
|
||||||
return &ValidationError{Name: "image_count", err: errors.New(`ent: missing required field "UsageLog.image_count"`)}
|
return &ValidationError{Name: "image_count", err: errors.New(`ent: missing required field "UsageLog.image_count"`)}
|
||||||
}
|
}
|
||||||
@@ -713,6 +732,10 @@ func (_c *UsageLogCreate) createSpec() (*UsageLog, *sqlgraph.CreateSpec) {
|
|||||||
_spec.SetField(usagelog.FieldUserAgent, field.TypeString, value)
|
_spec.SetField(usagelog.FieldUserAgent, field.TypeString, value)
|
||||||
_node.UserAgent = &value
|
_node.UserAgent = &value
|
||||||
}
|
}
|
||||||
|
if value, ok := _c.mutation.IPAddress(); ok {
|
||||||
|
_spec.SetField(usagelog.FieldIPAddress, field.TypeString, value)
|
||||||
|
_node.IPAddress = &value
|
||||||
|
}
|
||||||
if value, ok := _c.mutation.ImageCount(); ok {
|
if value, ok := _c.mutation.ImageCount(); ok {
|
||||||
_spec.SetField(usagelog.FieldImageCount, field.TypeInt, value)
|
_spec.SetField(usagelog.FieldImageCount, field.TypeInt, value)
|
||||||
_node.ImageCount = value
|
_node.ImageCount = value
|
||||||
@@ -1288,6 +1311,24 @@ func (u *UsageLogUpsert) ClearUserAgent() *UsageLogUpsert {
|
|||||||
return u
|
return u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetIPAddress sets the "ip_address" field.
|
||||||
|
func (u *UsageLogUpsert) SetIPAddress(v string) *UsageLogUpsert {
|
||||||
|
u.Set(usagelog.FieldIPAddress, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateIPAddress sets the "ip_address" field to the value that was provided on create.
|
||||||
|
func (u *UsageLogUpsert) UpdateIPAddress() *UsageLogUpsert {
|
||||||
|
u.SetExcluded(usagelog.FieldIPAddress)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearIPAddress clears the value of the "ip_address" field.
|
||||||
|
func (u *UsageLogUpsert) ClearIPAddress() *UsageLogUpsert {
|
||||||
|
u.SetNull(usagelog.FieldIPAddress)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
// SetImageCount sets the "image_count" field.
|
// SetImageCount sets the "image_count" field.
|
||||||
func (u *UsageLogUpsert) SetImageCount(v int) *UsageLogUpsert {
|
func (u *UsageLogUpsert) SetImageCount(v int) *UsageLogUpsert {
|
||||||
u.Set(usagelog.FieldImageCount, v)
|
u.Set(usagelog.FieldImageCount, v)
|
||||||
@@ -1866,6 +1907,27 @@ func (u *UsageLogUpsertOne) ClearUserAgent() *UsageLogUpsertOne {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetIPAddress sets the "ip_address" field.
|
||||||
|
func (u *UsageLogUpsertOne) SetIPAddress(v string) *UsageLogUpsertOne {
|
||||||
|
return u.Update(func(s *UsageLogUpsert) {
|
||||||
|
s.SetIPAddress(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateIPAddress sets the "ip_address" field to the value that was provided on create.
|
||||||
|
func (u *UsageLogUpsertOne) UpdateIPAddress() *UsageLogUpsertOne {
|
||||||
|
return u.Update(func(s *UsageLogUpsert) {
|
||||||
|
s.UpdateIPAddress()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearIPAddress clears the value of the "ip_address" field.
|
||||||
|
func (u *UsageLogUpsertOne) ClearIPAddress() *UsageLogUpsertOne {
|
||||||
|
return u.Update(func(s *UsageLogUpsert) {
|
||||||
|
s.ClearIPAddress()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// SetImageCount sets the "image_count" field.
|
// SetImageCount sets the "image_count" field.
|
||||||
func (u *UsageLogUpsertOne) SetImageCount(v int) *UsageLogUpsertOne {
|
func (u *UsageLogUpsertOne) SetImageCount(v int) *UsageLogUpsertOne {
|
||||||
return u.Update(func(s *UsageLogUpsert) {
|
return u.Update(func(s *UsageLogUpsert) {
|
||||||
@@ -2616,6 +2678,27 @@ func (u *UsageLogUpsertBulk) ClearUserAgent() *UsageLogUpsertBulk {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetIPAddress sets the "ip_address" field.
|
||||||
|
func (u *UsageLogUpsertBulk) SetIPAddress(v string) *UsageLogUpsertBulk {
|
||||||
|
return u.Update(func(s *UsageLogUpsert) {
|
||||||
|
s.SetIPAddress(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateIPAddress sets the "ip_address" field to the value that was provided on create.
|
||||||
|
func (u *UsageLogUpsertBulk) UpdateIPAddress() *UsageLogUpsertBulk {
|
||||||
|
return u.Update(func(s *UsageLogUpsert) {
|
||||||
|
s.UpdateIPAddress()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearIPAddress clears the value of the "ip_address" field.
|
||||||
|
func (u *UsageLogUpsertBulk) ClearIPAddress() *UsageLogUpsertBulk {
|
||||||
|
return u.Update(func(s *UsageLogUpsert) {
|
||||||
|
s.ClearIPAddress()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// SetImageCount sets the "image_count" field.
|
// SetImageCount sets the "image_count" field.
|
||||||
func (u *UsageLogUpsertBulk) SetImageCount(v int) *UsageLogUpsertBulk {
|
func (u *UsageLogUpsertBulk) SetImageCount(v int) *UsageLogUpsertBulk {
|
||||||
return u.Update(func(s *UsageLogUpsert) {
|
return u.Update(func(s *UsageLogUpsert) {
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
|
|
||||||
"entgo.io/ent"
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
"entgo.io/ent/dialect/sql"
|
"entgo.io/ent/dialect/sql"
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
"entgo.io/ent/schema/field"
|
"entgo.io/ent/schema/field"
|
||||||
@@ -32,6 +33,7 @@ type UsageLogQuery struct {
|
|||||||
withAccount *AccountQuery
|
withAccount *AccountQuery
|
||||||
withGroup *GroupQuery
|
withGroup *GroupQuery
|
||||||
withSubscription *UserSubscriptionQuery
|
withSubscription *UserSubscriptionQuery
|
||||||
|
modifiers []func(*sql.Selector)
|
||||||
// intermediate query (i.e. traversal path).
|
// intermediate query (i.e. traversal path).
|
||||||
sql *sql.Selector
|
sql *sql.Selector
|
||||||
path func(context.Context) (*sql.Selector, error)
|
path func(context.Context) (*sql.Selector, error)
|
||||||
@@ -531,6 +533,9 @@ func (_q *UsageLogQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Usa
|
|||||||
node.Edges.loadedTypes = loadedTypes
|
node.Edges.loadedTypes = loadedTypes
|
||||||
return node.assignValues(columns, values)
|
return node.assignValues(columns, values)
|
||||||
}
|
}
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
for i := range hooks {
|
for i := range hooks {
|
||||||
hooks[i](ctx, _spec)
|
hooks[i](ctx, _spec)
|
||||||
}
|
}
|
||||||
@@ -727,6 +732,9 @@ func (_q *UsageLogQuery) loadSubscription(ctx context.Context, query *UserSubscr
|
|||||||
|
|
||||||
func (_q *UsageLogQuery) sqlCount(ctx context.Context) (int, error) {
|
func (_q *UsageLogQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
_spec := _q.querySpec()
|
_spec := _q.querySpec()
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
_spec.Node.Columns = _q.ctx.Fields
|
_spec.Node.Columns = _q.ctx.Fields
|
||||||
if len(_q.ctx.Fields) > 0 {
|
if len(_q.ctx.Fields) > 0 {
|
||||||
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||||
@@ -804,6 +812,9 @@ func (_q *UsageLogQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||||
selector.Distinct()
|
selector.Distinct()
|
||||||
}
|
}
|
||||||
|
for _, m := range _q.modifiers {
|
||||||
|
m(selector)
|
||||||
|
}
|
||||||
for _, p := range _q.predicates {
|
for _, p := range _q.predicates {
|
||||||
p(selector)
|
p(selector)
|
||||||
}
|
}
|
||||||
@@ -821,6 +832,32 @@ func (_q *UsageLogQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
return selector
|
return selector
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||||
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||||
|
// either committed or rolled-back.
|
||||||
|
func (_q *UsageLogQuery) ForUpdate(opts ...sql.LockOption) *UsageLogQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForUpdate(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||||
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||||
|
// until your transaction commits.
|
||||||
|
func (_q *UsageLogQuery) ForShare(opts ...sql.LockOption) *UsageLogQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForShare(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
// UsageLogGroupBy is the group-by builder for UsageLog entities.
|
// UsageLogGroupBy is the group-by builder for UsageLog entities.
|
||||||
type UsageLogGroupBy struct {
|
type UsageLogGroupBy struct {
|
||||||
selector
|
selector
|
||||||
|
|||||||
@@ -524,6 +524,26 @@ func (_u *UsageLogUpdate) ClearUserAgent() *UsageLogUpdate {
|
|||||||
return _u
|
return _u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetIPAddress sets the "ip_address" field.
|
||||||
|
func (_u *UsageLogUpdate) SetIPAddress(v string) *UsageLogUpdate {
|
||||||
|
_u.mutation.SetIPAddress(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableIPAddress sets the "ip_address" field if the given value is not nil.
|
||||||
|
func (_u *UsageLogUpdate) SetNillableIPAddress(v *string) *UsageLogUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetIPAddress(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearIPAddress clears the value of the "ip_address" field.
|
||||||
|
func (_u *UsageLogUpdate) ClearIPAddress() *UsageLogUpdate {
|
||||||
|
_u.mutation.ClearIPAddress()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
// SetImageCount sets the "image_count" field.
|
// SetImageCount sets the "image_count" field.
|
||||||
func (_u *UsageLogUpdate) SetImageCount(v int) *UsageLogUpdate {
|
func (_u *UsageLogUpdate) SetImageCount(v int) *UsageLogUpdate {
|
||||||
_u.mutation.ResetImageCount()
|
_u.mutation.ResetImageCount()
|
||||||
@@ -669,6 +689,11 @@ func (_u *UsageLogUpdate) check() error {
|
|||||||
return &ValidationError{Name: "user_agent", err: fmt.Errorf(`ent: validator failed for field "UsageLog.user_agent": %w`, err)}
|
return &ValidationError{Name: "user_agent", err: fmt.Errorf(`ent: validator failed for field "UsageLog.user_agent": %w`, err)}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if v, ok := _u.mutation.IPAddress(); ok {
|
||||||
|
if err := usagelog.IPAddressValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "ip_address", err: fmt.Errorf(`ent: validator failed for field "UsageLog.ip_address": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
if v, ok := _u.mutation.ImageSize(); ok {
|
if v, ok := _u.mutation.ImageSize(); ok {
|
||||||
if err := usagelog.ImageSizeValidator(v); err != nil {
|
if err := usagelog.ImageSizeValidator(v); err != nil {
|
||||||
return &ValidationError{Name: "image_size", err: fmt.Errorf(`ent: validator failed for field "UsageLog.image_size": %w`, err)}
|
return &ValidationError{Name: "image_size", err: fmt.Errorf(`ent: validator failed for field "UsageLog.image_size": %w`, err)}
|
||||||
@@ -815,6 +840,12 @@ func (_u *UsageLogUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
|||||||
if _u.mutation.UserAgentCleared() {
|
if _u.mutation.UserAgentCleared() {
|
||||||
_spec.ClearField(usagelog.FieldUserAgent, field.TypeString)
|
_spec.ClearField(usagelog.FieldUserAgent, field.TypeString)
|
||||||
}
|
}
|
||||||
|
if value, ok := _u.mutation.IPAddress(); ok {
|
||||||
|
_spec.SetField(usagelog.FieldIPAddress, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.IPAddressCleared() {
|
||||||
|
_spec.ClearField(usagelog.FieldIPAddress, field.TypeString)
|
||||||
|
}
|
||||||
if value, ok := _u.mutation.ImageCount(); ok {
|
if value, ok := _u.mutation.ImageCount(); ok {
|
||||||
_spec.SetField(usagelog.FieldImageCount, field.TypeInt, value)
|
_spec.SetField(usagelog.FieldImageCount, field.TypeInt, value)
|
||||||
}
|
}
|
||||||
@@ -1484,6 +1515,26 @@ func (_u *UsageLogUpdateOne) ClearUserAgent() *UsageLogUpdateOne {
|
|||||||
return _u
|
return _u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetIPAddress sets the "ip_address" field.
|
||||||
|
func (_u *UsageLogUpdateOne) SetIPAddress(v string) *UsageLogUpdateOne {
|
||||||
|
_u.mutation.SetIPAddress(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableIPAddress sets the "ip_address" field if the given value is not nil.
|
||||||
|
func (_u *UsageLogUpdateOne) SetNillableIPAddress(v *string) *UsageLogUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetIPAddress(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearIPAddress clears the value of the "ip_address" field.
|
||||||
|
func (_u *UsageLogUpdateOne) ClearIPAddress() *UsageLogUpdateOne {
|
||||||
|
_u.mutation.ClearIPAddress()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
// SetImageCount sets the "image_count" field.
|
// SetImageCount sets the "image_count" field.
|
||||||
func (_u *UsageLogUpdateOne) SetImageCount(v int) *UsageLogUpdateOne {
|
func (_u *UsageLogUpdateOne) SetImageCount(v int) *UsageLogUpdateOne {
|
||||||
_u.mutation.ResetImageCount()
|
_u.mutation.ResetImageCount()
|
||||||
@@ -1642,6 +1693,11 @@ func (_u *UsageLogUpdateOne) check() error {
|
|||||||
return &ValidationError{Name: "user_agent", err: fmt.Errorf(`ent: validator failed for field "UsageLog.user_agent": %w`, err)}
|
return &ValidationError{Name: "user_agent", err: fmt.Errorf(`ent: validator failed for field "UsageLog.user_agent": %w`, err)}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if v, ok := _u.mutation.IPAddress(); ok {
|
||||||
|
if err := usagelog.IPAddressValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "ip_address", err: fmt.Errorf(`ent: validator failed for field "UsageLog.ip_address": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
if v, ok := _u.mutation.ImageSize(); ok {
|
if v, ok := _u.mutation.ImageSize(); ok {
|
||||||
if err := usagelog.ImageSizeValidator(v); err != nil {
|
if err := usagelog.ImageSizeValidator(v); err != nil {
|
||||||
return &ValidationError{Name: "image_size", err: fmt.Errorf(`ent: validator failed for field "UsageLog.image_size": %w`, err)}
|
return &ValidationError{Name: "image_size", err: fmt.Errorf(`ent: validator failed for field "UsageLog.image_size": %w`, err)}
|
||||||
@@ -1805,6 +1861,12 @@ func (_u *UsageLogUpdateOne) sqlSave(ctx context.Context) (_node *UsageLog, err
|
|||||||
if _u.mutation.UserAgentCleared() {
|
if _u.mutation.UserAgentCleared() {
|
||||||
_spec.ClearField(usagelog.FieldUserAgent, field.TypeString)
|
_spec.ClearField(usagelog.FieldUserAgent, field.TypeString)
|
||||||
}
|
}
|
||||||
|
if value, ok := _u.mutation.IPAddress(); ok {
|
||||||
|
_spec.SetField(usagelog.FieldIPAddress, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.IPAddressCleared() {
|
||||||
|
_spec.ClearField(usagelog.FieldIPAddress, field.TypeString)
|
||||||
|
}
|
||||||
if value, ok := _u.mutation.ImageCount(); ok {
|
if value, ok := _u.mutation.ImageCount(); ok {
|
||||||
_spec.SetField(usagelog.FieldImageCount, field.TypeInt, value)
|
_spec.SetField(usagelog.FieldImageCount, field.TypeInt, value)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -61,11 +61,13 @@ type UserEdges struct {
|
|||||||
UsageLogs []*UsageLog `json:"usage_logs,omitempty"`
|
UsageLogs []*UsageLog `json:"usage_logs,omitempty"`
|
||||||
// AttributeValues holds the value of the attribute_values edge.
|
// AttributeValues holds the value of the attribute_values edge.
|
||||||
AttributeValues []*UserAttributeValue `json:"attribute_values,omitempty"`
|
AttributeValues []*UserAttributeValue `json:"attribute_values,omitempty"`
|
||||||
|
// PromoCodeUsages holds the value of the promo_code_usages edge.
|
||||||
|
PromoCodeUsages []*PromoCodeUsage `json:"promo_code_usages,omitempty"`
|
||||||
// UserAllowedGroups holds the value of the user_allowed_groups edge.
|
// UserAllowedGroups holds the value of the user_allowed_groups edge.
|
||||||
UserAllowedGroups []*UserAllowedGroup `json:"user_allowed_groups,omitempty"`
|
UserAllowedGroups []*UserAllowedGroup `json:"user_allowed_groups,omitempty"`
|
||||||
// loadedTypes holds the information for reporting if a
|
// loadedTypes holds the information for reporting if a
|
||||||
// type was loaded (or requested) in eager-loading or not.
|
// type was loaded (or requested) in eager-loading or not.
|
||||||
loadedTypes [8]bool
|
loadedTypes [9]bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// APIKeysOrErr returns the APIKeys value or an error if the edge
|
// APIKeysOrErr returns the APIKeys value or an error if the edge
|
||||||
@@ -131,10 +133,19 @@ func (e UserEdges) AttributeValuesOrErr() ([]*UserAttributeValue, error) {
|
|||||||
return nil, &NotLoadedError{edge: "attribute_values"}
|
return nil, &NotLoadedError{edge: "attribute_values"}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PromoCodeUsagesOrErr returns the PromoCodeUsages value or an error if the edge
|
||||||
|
// was not loaded in eager-loading.
|
||||||
|
func (e UserEdges) PromoCodeUsagesOrErr() ([]*PromoCodeUsage, error) {
|
||||||
|
if e.loadedTypes[7] {
|
||||||
|
return e.PromoCodeUsages, nil
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "promo_code_usages"}
|
||||||
|
}
|
||||||
|
|
||||||
// UserAllowedGroupsOrErr returns the UserAllowedGroups value or an error if the edge
|
// UserAllowedGroupsOrErr returns the UserAllowedGroups value or an error if the edge
|
||||||
// was not loaded in eager-loading.
|
// was not loaded in eager-loading.
|
||||||
func (e UserEdges) UserAllowedGroupsOrErr() ([]*UserAllowedGroup, error) {
|
func (e UserEdges) UserAllowedGroupsOrErr() ([]*UserAllowedGroup, error) {
|
||||||
if e.loadedTypes[7] {
|
if e.loadedTypes[8] {
|
||||||
return e.UserAllowedGroups, nil
|
return e.UserAllowedGroups, nil
|
||||||
}
|
}
|
||||||
return nil, &NotLoadedError{edge: "user_allowed_groups"}
|
return nil, &NotLoadedError{edge: "user_allowed_groups"}
|
||||||
@@ -289,6 +300,11 @@ func (_m *User) QueryAttributeValues() *UserAttributeValueQuery {
|
|||||||
return NewUserClient(_m.config).QueryAttributeValues(_m)
|
return NewUserClient(_m.config).QueryAttributeValues(_m)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// QueryPromoCodeUsages queries the "promo_code_usages" edge of the User entity.
|
||||||
|
func (_m *User) QueryPromoCodeUsages() *PromoCodeUsageQuery {
|
||||||
|
return NewUserClient(_m.config).QueryPromoCodeUsages(_m)
|
||||||
|
}
|
||||||
|
|
||||||
// QueryUserAllowedGroups queries the "user_allowed_groups" edge of the User entity.
|
// QueryUserAllowedGroups queries the "user_allowed_groups" edge of the User entity.
|
||||||
func (_m *User) QueryUserAllowedGroups() *UserAllowedGroupQuery {
|
func (_m *User) QueryUserAllowedGroups() *UserAllowedGroupQuery {
|
||||||
return NewUserClient(_m.config).QueryUserAllowedGroups(_m)
|
return NewUserClient(_m.config).QueryUserAllowedGroups(_m)
|
||||||
|
|||||||
@@ -51,6 +51,8 @@ const (
|
|||||||
EdgeUsageLogs = "usage_logs"
|
EdgeUsageLogs = "usage_logs"
|
||||||
// EdgeAttributeValues holds the string denoting the attribute_values edge name in mutations.
|
// EdgeAttributeValues holds the string denoting the attribute_values edge name in mutations.
|
||||||
EdgeAttributeValues = "attribute_values"
|
EdgeAttributeValues = "attribute_values"
|
||||||
|
// EdgePromoCodeUsages holds the string denoting the promo_code_usages edge name in mutations.
|
||||||
|
EdgePromoCodeUsages = "promo_code_usages"
|
||||||
// EdgeUserAllowedGroups holds the string denoting the user_allowed_groups edge name in mutations.
|
// EdgeUserAllowedGroups holds the string denoting the user_allowed_groups edge name in mutations.
|
||||||
EdgeUserAllowedGroups = "user_allowed_groups"
|
EdgeUserAllowedGroups = "user_allowed_groups"
|
||||||
// Table holds the table name of the user in the database.
|
// Table holds the table name of the user in the database.
|
||||||
@@ -102,6 +104,13 @@ const (
|
|||||||
AttributeValuesInverseTable = "user_attribute_values"
|
AttributeValuesInverseTable = "user_attribute_values"
|
||||||
// AttributeValuesColumn is the table column denoting the attribute_values relation/edge.
|
// AttributeValuesColumn is the table column denoting the attribute_values relation/edge.
|
||||||
AttributeValuesColumn = "user_id"
|
AttributeValuesColumn = "user_id"
|
||||||
|
// PromoCodeUsagesTable is the table that holds the promo_code_usages relation/edge.
|
||||||
|
PromoCodeUsagesTable = "promo_code_usages"
|
||||||
|
// PromoCodeUsagesInverseTable is the table name for the PromoCodeUsage entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "promocodeusage" package.
|
||||||
|
PromoCodeUsagesInverseTable = "promo_code_usages"
|
||||||
|
// PromoCodeUsagesColumn is the table column denoting the promo_code_usages relation/edge.
|
||||||
|
PromoCodeUsagesColumn = "user_id"
|
||||||
// UserAllowedGroupsTable is the table that holds the user_allowed_groups relation/edge.
|
// UserAllowedGroupsTable is the table that holds the user_allowed_groups relation/edge.
|
||||||
UserAllowedGroupsTable = "user_allowed_groups"
|
UserAllowedGroupsTable = "user_allowed_groups"
|
||||||
// UserAllowedGroupsInverseTable is the table name for the UserAllowedGroup entity.
|
// UserAllowedGroupsInverseTable is the table name for the UserAllowedGroup entity.
|
||||||
@@ -342,6 +351,20 @@ func ByAttributeValues(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ByPromoCodeUsagesCount orders the results by promo_code_usages count.
|
||||||
|
func ByPromoCodeUsagesCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborsCount(s, newPromoCodeUsagesStep(), opts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByPromoCodeUsages orders the results by promo_code_usages terms.
|
||||||
|
func ByPromoCodeUsages(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newPromoCodeUsagesStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ByUserAllowedGroupsCount orders the results by user_allowed_groups count.
|
// ByUserAllowedGroupsCount orders the results by user_allowed_groups count.
|
||||||
func ByUserAllowedGroupsCount(opts ...sql.OrderTermOption) OrderOption {
|
func ByUserAllowedGroupsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
return func(s *sql.Selector) {
|
return func(s *sql.Selector) {
|
||||||
@@ -404,6 +427,13 @@ func newAttributeValuesStep() *sqlgraph.Step {
|
|||||||
sqlgraph.Edge(sqlgraph.O2M, false, AttributeValuesTable, AttributeValuesColumn),
|
sqlgraph.Edge(sqlgraph.O2M, false, AttributeValuesTable, AttributeValuesColumn),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
func newPromoCodeUsagesStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(PromoCodeUsagesInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, PromoCodeUsagesTable, PromoCodeUsagesColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
func newUserAllowedGroupsStep() *sqlgraph.Step {
|
func newUserAllowedGroupsStep() *sqlgraph.Step {
|
||||||
return sqlgraph.NewStep(
|
return sqlgraph.NewStep(
|
||||||
sqlgraph.From(Table, FieldID),
|
sqlgraph.From(Table, FieldID),
|
||||||
|
|||||||
@@ -871,6 +871,29 @@ func HasAttributeValuesWith(preds ...predicate.UserAttributeValue) predicate.Use
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HasPromoCodeUsages applies the HasEdge predicate on the "promo_code_usages" edge.
|
||||||
|
func HasPromoCodeUsages() predicate.User {
|
||||||
|
return predicate.User(func(s *sql.Selector) {
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, PromoCodeUsagesTable, PromoCodeUsagesColumn),
|
||||||
|
)
|
||||||
|
sqlgraph.HasNeighbors(s, step)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasPromoCodeUsagesWith applies the HasEdge predicate on the "promo_code_usages" edge with a given conditions (other predicates).
|
||||||
|
func HasPromoCodeUsagesWith(preds ...predicate.PromoCodeUsage) predicate.User {
|
||||||
|
return predicate.User(func(s *sql.Selector) {
|
||||||
|
step := newPromoCodeUsagesStep()
|
||||||
|
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||||
|
for _, p := range preds {
|
||||||
|
p(s)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// HasUserAllowedGroups applies the HasEdge predicate on the "user_allowed_groups" edge.
|
// HasUserAllowedGroups applies the HasEdge predicate on the "user_allowed_groups" edge.
|
||||||
func HasUserAllowedGroups() predicate.User {
|
func HasUserAllowedGroups() predicate.User {
|
||||||
return predicate.User(func(s *sql.Selector) {
|
return predicate.User(func(s *sql.Selector) {
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ import (
|
|||||||
"entgo.io/ent/schema/field"
|
"entgo.io/ent/schema/field"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/group"
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/redeemcode"
|
"github.com/Wei-Shaw/sub2api/ent/redeemcode"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/usagelog"
|
"github.com/Wei-Shaw/sub2api/ent/usagelog"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/user"
|
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||||
@@ -271,6 +272,21 @@ func (_c *UserCreate) AddAttributeValues(v ...*UserAttributeValue) *UserCreate {
|
|||||||
return _c.AddAttributeValueIDs(ids...)
|
return _c.AddAttributeValueIDs(ids...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddPromoCodeUsageIDs adds the "promo_code_usages" edge to the PromoCodeUsage entity by IDs.
|
||||||
|
func (_c *UserCreate) AddPromoCodeUsageIDs(ids ...int64) *UserCreate {
|
||||||
|
_c.mutation.AddPromoCodeUsageIDs(ids...)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddPromoCodeUsages adds the "promo_code_usages" edges to the PromoCodeUsage entity.
|
||||||
|
func (_c *UserCreate) AddPromoCodeUsages(v ...*PromoCodeUsage) *UserCreate {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _c.AddPromoCodeUsageIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
// Mutation returns the UserMutation object of the builder.
|
// Mutation returns the UserMutation object of the builder.
|
||||||
func (_c *UserCreate) Mutation() *UserMutation {
|
func (_c *UserCreate) Mutation() *UserMutation {
|
||||||
return _c.mutation
|
return _c.mutation
|
||||||
@@ -593,6 +609,22 @@ func (_c *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) {
|
|||||||
}
|
}
|
||||||
_spec.Edges = append(_spec.Edges, edge)
|
_spec.Edges = append(_spec.Edges, edge)
|
||||||
}
|
}
|
||||||
|
if nodes := _c.mutation.PromoCodeUsagesIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: user.PromoCodeUsagesTable,
|
||||||
|
Columns: []string{user.PromoCodeUsagesColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges = append(_spec.Edges, edge)
|
||||||
|
}
|
||||||
return _node, _spec
|
return _node, _spec
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -9,12 +9,14 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
|
|
||||||
"entgo.io/ent"
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
"entgo.io/ent/dialect/sql"
|
"entgo.io/ent/dialect/sql"
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
"entgo.io/ent/schema/field"
|
"entgo.io/ent/schema/field"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/group"
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/redeemcode"
|
"github.com/Wei-Shaw/sub2api/ent/redeemcode"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/usagelog"
|
"github.com/Wei-Shaw/sub2api/ent/usagelog"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/user"
|
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||||
@@ -37,7 +39,9 @@ type UserQuery struct {
|
|||||||
withAllowedGroups *GroupQuery
|
withAllowedGroups *GroupQuery
|
||||||
withUsageLogs *UsageLogQuery
|
withUsageLogs *UsageLogQuery
|
||||||
withAttributeValues *UserAttributeValueQuery
|
withAttributeValues *UserAttributeValueQuery
|
||||||
|
withPromoCodeUsages *PromoCodeUsageQuery
|
||||||
withUserAllowedGroups *UserAllowedGroupQuery
|
withUserAllowedGroups *UserAllowedGroupQuery
|
||||||
|
modifiers []func(*sql.Selector)
|
||||||
// intermediate query (i.e. traversal path).
|
// intermediate query (i.e. traversal path).
|
||||||
sql *sql.Selector
|
sql *sql.Selector
|
||||||
path func(context.Context) (*sql.Selector, error)
|
path func(context.Context) (*sql.Selector, error)
|
||||||
@@ -228,6 +232,28 @@ func (_q *UserQuery) QueryAttributeValues() *UserAttributeValueQuery {
|
|||||||
return query
|
return query
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// QueryPromoCodeUsages chains the current query on the "promo_code_usages" edge.
|
||||||
|
func (_q *UserQuery) QueryPromoCodeUsages() *PromoCodeUsageQuery {
|
||||||
|
query := (&PromoCodeUsageClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(user.Table, user.FieldID, selector),
|
||||||
|
sqlgraph.To(promocodeusage.Table, promocodeusage.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, user.PromoCodeUsagesTable, user.PromoCodeUsagesColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
// QueryUserAllowedGroups chains the current query on the "user_allowed_groups" edge.
|
// QueryUserAllowedGroups chains the current query on the "user_allowed_groups" edge.
|
||||||
func (_q *UserQuery) QueryUserAllowedGroups() *UserAllowedGroupQuery {
|
func (_q *UserQuery) QueryUserAllowedGroups() *UserAllowedGroupQuery {
|
||||||
query := (&UserAllowedGroupClient{config: _q.config}).Query()
|
query := (&UserAllowedGroupClient{config: _q.config}).Query()
|
||||||
@@ -449,6 +475,7 @@ func (_q *UserQuery) Clone() *UserQuery {
|
|||||||
withAllowedGroups: _q.withAllowedGroups.Clone(),
|
withAllowedGroups: _q.withAllowedGroups.Clone(),
|
||||||
withUsageLogs: _q.withUsageLogs.Clone(),
|
withUsageLogs: _q.withUsageLogs.Clone(),
|
||||||
withAttributeValues: _q.withAttributeValues.Clone(),
|
withAttributeValues: _q.withAttributeValues.Clone(),
|
||||||
|
withPromoCodeUsages: _q.withPromoCodeUsages.Clone(),
|
||||||
withUserAllowedGroups: _q.withUserAllowedGroups.Clone(),
|
withUserAllowedGroups: _q.withUserAllowedGroups.Clone(),
|
||||||
// clone intermediate query.
|
// clone intermediate query.
|
||||||
sql: _q.sql.Clone(),
|
sql: _q.sql.Clone(),
|
||||||
@@ -533,6 +560,17 @@ func (_q *UserQuery) WithAttributeValues(opts ...func(*UserAttributeValueQuery))
|
|||||||
return _q
|
return _q
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithPromoCodeUsages tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "promo_code_usages" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *UserQuery) WithPromoCodeUsages(opts ...func(*PromoCodeUsageQuery)) *UserQuery {
|
||||||
|
query := (&PromoCodeUsageClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withPromoCodeUsages = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
// WithUserAllowedGroups tells the query-builder to eager-load the nodes that are connected to
|
// WithUserAllowedGroups tells the query-builder to eager-load the nodes that are connected to
|
||||||
// the "user_allowed_groups" edge. The optional arguments are used to configure the query builder of the edge.
|
// the "user_allowed_groups" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
func (_q *UserQuery) WithUserAllowedGroups(opts ...func(*UserAllowedGroupQuery)) *UserQuery {
|
func (_q *UserQuery) WithUserAllowedGroups(opts ...func(*UserAllowedGroupQuery)) *UserQuery {
|
||||||
@@ -622,7 +660,7 @@ func (_q *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, e
|
|||||||
var (
|
var (
|
||||||
nodes = []*User{}
|
nodes = []*User{}
|
||||||
_spec = _q.querySpec()
|
_spec = _q.querySpec()
|
||||||
loadedTypes = [8]bool{
|
loadedTypes = [9]bool{
|
||||||
_q.withAPIKeys != nil,
|
_q.withAPIKeys != nil,
|
||||||
_q.withRedeemCodes != nil,
|
_q.withRedeemCodes != nil,
|
||||||
_q.withSubscriptions != nil,
|
_q.withSubscriptions != nil,
|
||||||
@@ -630,6 +668,7 @@ func (_q *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, e
|
|||||||
_q.withAllowedGroups != nil,
|
_q.withAllowedGroups != nil,
|
||||||
_q.withUsageLogs != nil,
|
_q.withUsageLogs != nil,
|
||||||
_q.withAttributeValues != nil,
|
_q.withAttributeValues != nil,
|
||||||
|
_q.withPromoCodeUsages != nil,
|
||||||
_q.withUserAllowedGroups != nil,
|
_q.withUserAllowedGroups != nil,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
@@ -642,6 +681,9 @@ func (_q *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, e
|
|||||||
node.Edges.loadedTypes = loadedTypes
|
node.Edges.loadedTypes = loadedTypes
|
||||||
return node.assignValues(columns, values)
|
return node.assignValues(columns, values)
|
||||||
}
|
}
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
for i := range hooks {
|
for i := range hooks {
|
||||||
hooks[i](ctx, _spec)
|
hooks[i](ctx, _spec)
|
||||||
}
|
}
|
||||||
@@ -702,6 +744,13 @@ func (_q *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, e
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if query := _q.withPromoCodeUsages; query != nil {
|
||||||
|
if err := _q.loadPromoCodeUsages(ctx, query, nodes,
|
||||||
|
func(n *User) { n.Edges.PromoCodeUsages = []*PromoCodeUsage{} },
|
||||||
|
func(n *User, e *PromoCodeUsage) { n.Edges.PromoCodeUsages = append(n.Edges.PromoCodeUsages, e) }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
if query := _q.withUserAllowedGroups; query != nil {
|
if query := _q.withUserAllowedGroups; query != nil {
|
||||||
if err := _q.loadUserAllowedGroups(ctx, query, nodes,
|
if err := _q.loadUserAllowedGroups(ctx, query, nodes,
|
||||||
func(n *User) { n.Edges.UserAllowedGroups = []*UserAllowedGroup{} },
|
func(n *User) { n.Edges.UserAllowedGroups = []*UserAllowedGroup{} },
|
||||||
@@ -959,6 +1008,36 @@ func (_q *UserQuery) loadAttributeValues(ctx context.Context, query *UserAttribu
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
func (_q *UserQuery) loadPromoCodeUsages(ctx context.Context, query *PromoCodeUsageQuery, nodes []*User, init func(*User), assign func(*User, *PromoCodeUsage)) error {
|
||||||
|
fks := make([]driver.Value, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64]*User)
|
||||||
|
for i := range nodes {
|
||||||
|
fks = append(fks, nodes[i].ID)
|
||||||
|
nodeids[nodes[i].ID] = nodes[i]
|
||||||
|
if init != nil {
|
||||||
|
init(nodes[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(query.ctx.Fields) > 0 {
|
||||||
|
query.ctx.AppendFieldOnce(promocodeusage.FieldUserID)
|
||||||
|
}
|
||||||
|
query.Where(predicate.PromoCodeUsage(func(s *sql.Selector) {
|
||||||
|
s.Where(sql.InValues(s.C(user.PromoCodeUsagesColumn), fks...))
|
||||||
|
}))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
fk := n.UserID
|
||||||
|
node, ok := nodeids[fk]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected referenced foreign-key "user_id" returned %v for node %v`, fk, n.ID)
|
||||||
|
}
|
||||||
|
assign(node, n)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
func (_q *UserQuery) loadUserAllowedGroups(ctx context.Context, query *UserAllowedGroupQuery, nodes []*User, init func(*User), assign func(*User, *UserAllowedGroup)) error {
|
func (_q *UserQuery) loadUserAllowedGroups(ctx context.Context, query *UserAllowedGroupQuery, nodes []*User, init func(*User), assign func(*User, *UserAllowedGroup)) error {
|
||||||
fks := make([]driver.Value, 0, len(nodes))
|
fks := make([]driver.Value, 0, len(nodes))
|
||||||
nodeids := make(map[int64]*User)
|
nodeids := make(map[int64]*User)
|
||||||
@@ -992,6 +1071,9 @@ func (_q *UserQuery) loadUserAllowedGroups(ctx context.Context, query *UserAllow
|
|||||||
|
|
||||||
func (_q *UserQuery) sqlCount(ctx context.Context) (int, error) {
|
func (_q *UserQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
_spec := _q.querySpec()
|
_spec := _q.querySpec()
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
_spec.Node.Columns = _q.ctx.Fields
|
_spec.Node.Columns = _q.ctx.Fields
|
||||||
if len(_q.ctx.Fields) > 0 {
|
if len(_q.ctx.Fields) > 0 {
|
||||||
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||||
@@ -1054,6 +1136,9 @@ func (_q *UserQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||||
selector.Distinct()
|
selector.Distinct()
|
||||||
}
|
}
|
||||||
|
for _, m := range _q.modifiers {
|
||||||
|
m(selector)
|
||||||
|
}
|
||||||
for _, p := range _q.predicates {
|
for _, p := range _q.predicates {
|
||||||
p(selector)
|
p(selector)
|
||||||
}
|
}
|
||||||
@@ -1071,6 +1156,32 @@ func (_q *UserQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
return selector
|
return selector
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||||
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||||
|
// either committed or rolled-back.
|
||||||
|
func (_q *UserQuery) ForUpdate(opts ...sql.LockOption) *UserQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForUpdate(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||||
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||||
|
// until your transaction commits.
|
||||||
|
func (_q *UserQuery) ForShare(opts ...sql.LockOption) *UserQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForShare(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
// UserGroupBy is the group-by builder for User entities.
|
// UserGroupBy is the group-by builder for User entities.
|
||||||
type UserGroupBy struct {
|
type UserGroupBy struct {
|
||||||
selector
|
selector
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ import (
|
|||||||
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/group"
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/redeemcode"
|
"github.com/Wei-Shaw/sub2api/ent/redeemcode"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/usagelog"
|
"github.com/Wei-Shaw/sub2api/ent/usagelog"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/user"
|
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||||
@@ -291,6 +292,21 @@ func (_u *UserUpdate) AddAttributeValues(v ...*UserAttributeValue) *UserUpdate {
|
|||||||
return _u.AddAttributeValueIDs(ids...)
|
return _u.AddAttributeValueIDs(ids...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddPromoCodeUsageIDs adds the "promo_code_usages" edge to the PromoCodeUsage entity by IDs.
|
||||||
|
func (_u *UserUpdate) AddPromoCodeUsageIDs(ids ...int64) *UserUpdate {
|
||||||
|
_u.mutation.AddPromoCodeUsageIDs(ids...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddPromoCodeUsages adds the "promo_code_usages" edges to the PromoCodeUsage entity.
|
||||||
|
func (_u *UserUpdate) AddPromoCodeUsages(v ...*PromoCodeUsage) *UserUpdate {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _u.AddPromoCodeUsageIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
// Mutation returns the UserMutation object of the builder.
|
// Mutation returns the UserMutation object of the builder.
|
||||||
func (_u *UserUpdate) Mutation() *UserMutation {
|
func (_u *UserUpdate) Mutation() *UserMutation {
|
||||||
return _u.mutation
|
return _u.mutation
|
||||||
@@ -443,6 +459,27 @@ func (_u *UserUpdate) RemoveAttributeValues(v ...*UserAttributeValue) *UserUpdat
|
|||||||
return _u.RemoveAttributeValueIDs(ids...)
|
return _u.RemoveAttributeValueIDs(ids...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ClearPromoCodeUsages clears all "promo_code_usages" edges to the PromoCodeUsage entity.
|
||||||
|
func (_u *UserUpdate) ClearPromoCodeUsages() *UserUpdate {
|
||||||
|
_u.mutation.ClearPromoCodeUsages()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemovePromoCodeUsageIDs removes the "promo_code_usages" edge to PromoCodeUsage entities by IDs.
|
||||||
|
func (_u *UserUpdate) RemovePromoCodeUsageIDs(ids ...int64) *UserUpdate {
|
||||||
|
_u.mutation.RemovePromoCodeUsageIDs(ids...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemovePromoCodeUsages removes "promo_code_usages" edges to PromoCodeUsage entities.
|
||||||
|
func (_u *UserUpdate) RemovePromoCodeUsages(v ...*PromoCodeUsage) *UserUpdate {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _u.RemovePromoCodeUsageIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
// Save executes the query and returns the number of nodes affected by the update operation.
|
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||||
func (_u *UserUpdate) Save(ctx context.Context) (int, error) {
|
func (_u *UserUpdate) Save(ctx context.Context) (int, error) {
|
||||||
if err := _u.defaults(); err != nil {
|
if err := _u.defaults(); err != nil {
|
||||||
@@ -893,6 +930,51 @@ func (_u *UserUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
|||||||
}
|
}
|
||||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
}
|
}
|
||||||
|
if _u.mutation.PromoCodeUsagesCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: user.PromoCodeUsagesTable,
|
||||||
|
Columns: []string{user.PromoCodeUsagesColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.RemovedPromoCodeUsagesIDs(); len(nodes) > 0 && !_u.mutation.PromoCodeUsagesCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: user.PromoCodeUsagesTable,
|
||||||
|
Columns: []string{user.PromoCodeUsagesColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.PromoCodeUsagesIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: user.PromoCodeUsagesTable,
|
||||||
|
Columns: []string{user.PromoCodeUsagesColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil {
|
if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil {
|
||||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||||
err = &NotFoundError{user.Label}
|
err = &NotFoundError{user.Label}
|
||||||
@@ -1170,6 +1252,21 @@ func (_u *UserUpdateOne) AddAttributeValues(v ...*UserAttributeValue) *UserUpdat
|
|||||||
return _u.AddAttributeValueIDs(ids...)
|
return _u.AddAttributeValueIDs(ids...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddPromoCodeUsageIDs adds the "promo_code_usages" edge to the PromoCodeUsage entity by IDs.
|
||||||
|
func (_u *UserUpdateOne) AddPromoCodeUsageIDs(ids ...int64) *UserUpdateOne {
|
||||||
|
_u.mutation.AddPromoCodeUsageIDs(ids...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddPromoCodeUsages adds the "promo_code_usages" edges to the PromoCodeUsage entity.
|
||||||
|
func (_u *UserUpdateOne) AddPromoCodeUsages(v ...*PromoCodeUsage) *UserUpdateOne {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _u.AddPromoCodeUsageIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
// Mutation returns the UserMutation object of the builder.
|
// Mutation returns the UserMutation object of the builder.
|
||||||
func (_u *UserUpdateOne) Mutation() *UserMutation {
|
func (_u *UserUpdateOne) Mutation() *UserMutation {
|
||||||
return _u.mutation
|
return _u.mutation
|
||||||
@@ -1322,6 +1419,27 @@ func (_u *UserUpdateOne) RemoveAttributeValues(v ...*UserAttributeValue) *UserUp
|
|||||||
return _u.RemoveAttributeValueIDs(ids...)
|
return _u.RemoveAttributeValueIDs(ids...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ClearPromoCodeUsages clears all "promo_code_usages" edges to the PromoCodeUsage entity.
|
||||||
|
func (_u *UserUpdateOne) ClearPromoCodeUsages() *UserUpdateOne {
|
||||||
|
_u.mutation.ClearPromoCodeUsages()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemovePromoCodeUsageIDs removes the "promo_code_usages" edge to PromoCodeUsage entities by IDs.
|
||||||
|
func (_u *UserUpdateOne) RemovePromoCodeUsageIDs(ids ...int64) *UserUpdateOne {
|
||||||
|
_u.mutation.RemovePromoCodeUsageIDs(ids...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemovePromoCodeUsages removes "promo_code_usages" edges to PromoCodeUsage entities.
|
||||||
|
func (_u *UserUpdateOne) RemovePromoCodeUsages(v ...*PromoCodeUsage) *UserUpdateOne {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _u.RemovePromoCodeUsageIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
// Where appends a list predicates to the UserUpdate builder.
|
// Where appends a list predicates to the UserUpdate builder.
|
||||||
func (_u *UserUpdateOne) Where(ps ...predicate.User) *UserUpdateOne {
|
func (_u *UserUpdateOne) Where(ps ...predicate.User) *UserUpdateOne {
|
||||||
_u.mutation.Where(ps...)
|
_u.mutation.Where(ps...)
|
||||||
@@ -1802,6 +1920,51 @@ func (_u *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) {
|
|||||||
}
|
}
|
||||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
}
|
}
|
||||||
|
if _u.mutation.PromoCodeUsagesCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: user.PromoCodeUsagesTable,
|
||||||
|
Columns: []string{user.PromoCodeUsagesColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.RemovedPromoCodeUsagesIDs(); len(nodes) > 0 && !_u.mutation.PromoCodeUsagesCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: user.PromoCodeUsagesTable,
|
||||||
|
Columns: []string{user.PromoCodeUsagesColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.PromoCodeUsagesIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: user.PromoCodeUsagesTable,
|
||||||
|
Columns: []string{user.PromoCodeUsagesColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
_node = &User{config: _u.config}
|
_node = &User{config: _u.config}
|
||||||
_spec.Assign = _node.assignValues
|
_spec.Assign = _node.assignValues
|
||||||
_spec.ScanValues = _node.scanValues
|
_spec.ScanValues = _node.scanValues
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
|
|
||||||
"entgo.io/ent"
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
"entgo.io/ent/dialect/sql"
|
"entgo.io/ent/dialect/sql"
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/group"
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
@@ -25,6 +26,7 @@ type UserAllowedGroupQuery struct {
|
|||||||
predicates []predicate.UserAllowedGroup
|
predicates []predicate.UserAllowedGroup
|
||||||
withUser *UserQuery
|
withUser *UserQuery
|
||||||
withGroup *GroupQuery
|
withGroup *GroupQuery
|
||||||
|
modifiers []func(*sql.Selector)
|
||||||
// intermediate query (i.e. traversal path).
|
// intermediate query (i.e. traversal path).
|
||||||
sql *sql.Selector
|
sql *sql.Selector
|
||||||
path func(context.Context) (*sql.Selector, error)
|
path func(context.Context) (*sql.Selector, error)
|
||||||
@@ -347,6 +349,9 @@ func (_q *UserAllowedGroupQuery) sqlAll(ctx context.Context, hooks ...queryHook)
|
|||||||
node.Edges.loadedTypes = loadedTypes
|
node.Edges.loadedTypes = loadedTypes
|
||||||
return node.assignValues(columns, values)
|
return node.assignValues(columns, values)
|
||||||
}
|
}
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
for i := range hooks {
|
for i := range hooks {
|
||||||
hooks[i](ctx, _spec)
|
hooks[i](ctx, _spec)
|
||||||
}
|
}
|
||||||
@@ -432,6 +437,9 @@ func (_q *UserAllowedGroupQuery) loadGroup(ctx context.Context, query *GroupQuer
|
|||||||
|
|
||||||
func (_q *UserAllowedGroupQuery) sqlCount(ctx context.Context) (int, error) {
|
func (_q *UserAllowedGroupQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
_spec := _q.querySpec()
|
_spec := _q.querySpec()
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
_spec.Unique = false
|
_spec.Unique = false
|
||||||
_spec.Node.Columns = nil
|
_spec.Node.Columns = nil
|
||||||
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||||
@@ -495,6 +503,9 @@ func (_q *UserAllowedGroupQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||||
selector.Distinct()
|
selector.Distinct()
|
||||||
}
|
}
|
||||||
|
for _, m := range _q.modifiers {
|
||||||
|
m(selector)
|
||||||
|
}
|
||||||
for _, p := range _q.predicates {
|
for _, p := range _q.predicates {
|
||||||
p(selector)
|
p(selector)
|
||||||
}
|
}
|
||||||
@@ -512,6 +523,32 @@ func (_q *UserAllowedGroupQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
return selector
|
return selector
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||||
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||||
|
// either committed or rolled-back.
|
||||||
|
func (_q *UserAllowedGroupQuery) ForUpdate(opts ...sql.LockOption) *UserAllowedGroupQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForUpdate(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||||
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||||
|
// until your transaction commits.
|
||||||
|
func (_q *UserAllowedGroupQuery) ForShare(opts ...sql.LockOption) *UserAllowedGroupQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForShare(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
// UserAllowedGroupGroupBy is the group-by builder for UserAllowedGroup entities.
|
// UserAllowedGroupGroupBy is the group-by builder for UserAllowedGroup entities.
|
||||||
type UserAllowedGroupGroupBy struct {
|
type UserAllowedGroupGroupBy struct {
|
||||||
selector
|
selector
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
|
|
||||||
"entgo.io/ent"
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
"entgo.io/ent/dialect/sql"
|
"entgo.io/ent/dialect/sql"
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
"entgo.io/ent/schema/field"
|
"entgo.io/ent/schema/field"
|
||||||
@@ -25,6 +26,7 @@ type UserAttributeDefinitionQuery struct {
|
|||||||
inters []Interceptor
|
inters []Interceptor
|
||||||
predicates []predicate.UserAttributeDefinition
|
predicates []predicate.UserAttributeDefinition
|
||||||
withValues *UserAttributeValueQuery
|
withValues *UserAttributeValueQuery
|
||||||
|
modifiers []func(*sql.Selector)
|
||||||
// intermediate query (i.e. traversal path).
|
// intermediate query (i.e. traversal path).
|
||||||
sql *sql.Selector
|
sql *sql.Selector
|
||||||
path func(context.Context) (*sql.Selector, error)
|
path func(context.Context) (*sql.Selector, error)
|
||||||
@@ -384,6 +386,9 @@ func (_q *UserAttributeDefinitionQuery) sqlAll(ctx context.Context, hooks ...que
|
|||||||
node.Edges.loadedTypes = loadedTypes
|
node.Edges.loadedTypes = loadedTypes
|
||||||
return node.assignValues(columns, values)
|
return node.assignValues(columns, values)
|
||||||
}
|
}
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
for i := range hooks {
|
for i := range hooks {
|
||||||
hooks[i](ctx, _spec)
|
hooks[i](ctx, _spec)
|
||||||
}
|
}
|
||||||
@@ -436,6 +441,9 @@ func (_q *UserAttributeDefinitionQuery) loadValues(ctx context.Context, query *U
|
|||||||
|
|
||||||
func (_q *UserAttributeDefinitionQuery) sqlCount(ctx context.Context) (int, error) {
|
func (_q *UserAttributeDefinitionQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
_spec := _q.querySpec()
|
_spec := _q.querySpec()
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
_spec.Node.Columns = _q.ctx.Fields
|
_spec.Node.Columns = _q.ctx.Fields
|
||||||
if len(_q.ctx.Fields) > 0 {
|
if len(_q.ctx.Fields) > 0 {
|
||||||
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||||
@@ -498,6 +506,9 @@ func (_q *UserAttributeDefinitionQuery) sqlQuery(ctx context.Context) *sql.Selec
|
|||||||
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||||
selector.Distinct()
|
selector.Distinct()
|
||||||
}
|
}
|
||||||
|
for _, m := range _q.modifiers {
|
||||||
|
m(selector)
|
||||||
|
}
|
||||||
for _, p := range _q.predicates {
|
for _, p := range _q.predicates {
|
||||||
p(selector)
|
p(selector)
|
||||||
}
|
}
|
||||||
@@ -515,6 +526,32 @@ func (_q *UserAttributeDefinitionQuery) sqlQuery(ctx context.Context) *sql.Selec
|
|||||||
return selector
|
return selector
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||||
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||||
|
// either committed or rolled-back.
|
||||||
|
func (_q *UserAttributeDefinitionQuery) ForUpdate(opts ...sql.LockOption) *UserAttributeDefinitionQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForUpdate(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||||
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||||
|
// until your transaction commits.
|
||||||
|
func (_q *UserAttributeDefinitionQuery) ForShare(opts ...sql.LockOption) *UserAttributeDefinitionQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForShare(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
// UserAttributeDefinitionGroupBy is the group-by builder for UserAttributeDefinition entities.
|
// UserAttributeDefinitionGroupBy is the group-by builder for UserAttributeDefinition entities.
|
||||||
type UserAttributeDefinitionGroupBy struct {
|
type UserAttributeDefinitionGroupBy struct {
|
||||||
selector
|
selector
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
|
|
||||||
"entgo.io/ent"
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
"entgo.io/ent/dialect/sql"
|
"entgo.io/ent/dialect/sql"
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
"entgo.io/ent/schema/field"
|
"entgo.io/ent/schema/field"
|
||||||
@@ -26,6 +27,7 @@ type UserAttributeValueQuery struct {
|
|||||||
predicates []predicate.UserAttributeValue
|
predicates []predicate.UserAttributeValue
|
||||||
withUser *UserQuery
|
withUser *UserQuery
|
||||||
withDefinition *UserAttributeDefinitionQuery
|
withDefinition *UserAttributeDefinitionQuery
|
||||||
|
modifiers []func(*sql.Selector)
|
||||||
// intermediate query (i.e. traversal path).
|
// intermediate query (i.e. traversal path).
|
||||||
sql *sql.Selector
|
sql *sql.Selector
|
||||||
path func(context.Context) (*sql.Selector, error)
|
path func(context.Context) (*sql.Selector, error)
|
||||||
@@ -420,6 +422,9 @@ func (_q *UserAttributeValueQuery) sqlAll(ctx context.Context, hooks ...queryHoo
|
|||||||
node.Edges.loadedTypes = loadedTypes
|
node.Edges.loadedTypes = loadedTypes
|
||||||
return node.assignValues(columns, values)
|
return node.assignValues(columns, values)
|
||||||
}
|
}
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
for i := range hooks {
|
for i := range hooks {
|
||||||
hooks[i](ctx, _spec)
|
hooks[i](ctx, _spec)
|
||||||
}
|
}
|
||||||
@@ -505,6 +510,9 @@ func (_q *UserAttributeValueQuery) loadDefinition(ctx context.Context, query *Us
|
|||||||
|
|
||||||
func (_q *UserAttributeValueQuery) sqlCount(ctx context.Context) (int, error) {
|
func (_q *UserAttributeValueQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
_spec := _q.querySpec()
|
_spec := _q.querySpec()
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
_spec.Node.Columns = _q.ctx.Fields
|
_spec.Node.Columns = _q.ctx.Fields
|
||||||
if len(_q.ctx.Fields) > 0 {
|
if len(_q.ctx.Fields) > 0 {
|
||||||
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||||
@@ -573,6 +581,9 @@ func (_q *UserAttributeValueQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||||
selector.Distinct()
|
selector.Distinct()
|
||||||
}
|
}
|
||||||
|
for _, m := range _q.modifiers {
|
||||||
|
m(selector)
|
||||||
|
}
|
||||||
for _, p := range _q.predicates {
|
for _, p := range _q.predicates {
|
||||||
p(selector)
|
p(selector)
|
||||||
}
|
}
|
||||||
@@ -590,6 +601,32 @@ func (_q *UserAttributeValueQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
return selector
|
return selector
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||||
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||||
|
// either committed or rolled-back.
|
||||||
|
func (_q *UserAttributeValueQuery) ForUpdate(opts ...sql.LockOption) *UserAttributeValueQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForUpdate(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||||
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||||
|
// until your transaction commits.
|
||||||
|
func (_q *UserAttributeValueQuery) ForShare(opts ...sql.LockOption) *UserAttributeValueQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForShare(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
// UserAttributeValueGroupBy is the group-by builder for UserAttributeValue entities.
|
// UserAttributeValueGroupBy is the group-by builder for UserAttributeValue entities.
|
||||||
type UserAttributeValueGroupBy struct {
|
type UserAttributeValueGroupBy struct {
|
||||||
selector
|
selector
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
|
|
||||||
"entgo.io/ent"
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
"entgo.io/ent/dialect/sql"
|
"entgo.io/ent/dialect/sql"
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
"entgo.io/ent/schema/field"
|
"entgo.io/ent/schema/field"
|
||||||
@@ -30,6 +31,7 @@ type UserSubscriptionQuery struct {
|
|||||||
withGroup *GroupQuery
|
withGroup *GroupQuery
|
||||||
withAssignedByUser *UserQuery
|
withAssignedByUser *UserQuery
|
||||||
withUsageLogs *UsageLogQuery
|
withUsageLogs *UsageLogQuery
|
||||||
|
modifiers []func(*sql.Selector)
|
||||||
// intermediate query (i.e. traversal path).
|
// intermediate query (i.e. traversal path).
|
||||||
sql *sql.Selector
|
sql *sql.Selector
|
||||||
path func(context.Context) (*sql.Selector, error)
|
path func(context.Context) (*sql.Selector, error)
|
||||||
@@ -494,6 +496,9 @@ func (_q *UserSubscriptionQuery) sqlAll(ctx context.Context, hooks ...queryHook)
|
|||||||
node.Edges.loadedTypes = loadedTypes
|
node.Edges.loadedTypes = loadedTypes
|
||||||
return node.assignValues(columns, values)
|
return node.assignValues(columns, values)
|
||||||
}
|
}
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
for i := range hooks {
|
for i := range hooks {
|
||||||
hooks[i](ctx, _spec)
|
hooks[i](ctx, _spec)
|
||||||
}
|
}
|
||||||
@@ -657,6 +662,9 @@ func (_q *UserSubscriptionQuery) loadUsageLogs(ctx context.Context, query *Usage
|
|||||||
|
|
||||||
func (_q *UserSubscriptionQuery) sqlCount(ctx context.Context) (int, error) {
|
func (_q *UserSubscriptionQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
_spec := _q.querySpec()
|
_spec := _q.querySpec()
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
_spec.Node.Columns = _q.ctx.Fields
|
_spec.Node.Columns = _q.ctx.Fields
|
||||||
if len(_q.ctx.Fields) > 0 {
|
if len(_q.ctx.Fields) > 0 {
|
||||||
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||||
@@ -728,6 +736,9 @@ func (_q *UserSubscriptionQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||||
selector.Distinct()
|
selector.Distinct()
|
||||||
}
|
}
|
||||||
|
for _, m := range _q.modifiers {
|
||||||
|
m(selector)
|
||||||
|
}
|
||||||
for _, p := range _q.predicates {
|
for _, p := range _q.predicates {
|
||||||
p(selector)
|
p(selector)
|
||||||
}
|
}
|
||||||
@@ -745,6 +756,32 @@ func (_q *UserSubscriptionQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
return selector
|
return selector
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||||
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||||
|
// either committed or rolled-back.
|
||||||
|
func (_q *UserSubscriptionQuery) ForUpdate(opts ...sql.LockOption) *UserSubscriptionQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForUpdate(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||||
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||||
|
// until your transaction commits.
|
||||||
|
func (_q *UserSubscriptionQuery) ForShare(opts ...sql.LockOption) *UserSubscriptionQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForShare(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
// UserSubscriptionGroupBy is the group-by builder for UserSubscription entities.
|
// UserSubscriptionGroupBy is the group-by builder for UserSubscription entities.
|
||||||
type UserSubscriptionGroupBy struct {
|
type UserSubscriptionGroupBy struct {
|
||||||
selector
|
selector
|
||||||
|
|||||||
@@ -8,9 +8,11 @@ require (
|
|||||||
github.com/golang-jwt/jwt/v5 v5.2.2
|
github.com/golang-jwt/jwt/v5 v5.2.2
|
||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
github.com/google/wire v0.7.0
|
github.com/google/wire v0.7.0
|
||||||
|
github.com/gorilla/websocket v1.5.3
|
||||||
github.com/imroc/req/v3 v3.57.0
|
github.com/imroc/req/v3 v3.57.0
|
||||||
github.com/lib/pq v1.10.9
|
github.com/lib/pq v1.10.9
|
||||||
github.com/redis/go-redis/v9 v9.17.2
|
github.com/redis/go-redis/v9 v9.17.2
|
||||||
|
github.com/shirou/gopsutil/v4 v4.25.6
|
||||||
github.com/spf13/viper v1.18.2
|
github.com/spf13/viper v1.18.2
|
||||||
github.com/stretchr/testify v1.11.1
|
github.com/stretchr/testify v1.11.1
|
||||||
github.com/testcontainers/testcontainers-go/modules/postgres v0.40.0
|
github.com/testcontainers/testcontainers-go/modules/postgres v0.40.0
|
||||||
@@ -44,11 +46,13 @@ require (
|
|||||||
github.com/containerd/platforms v0.2.1 // indirect
|
github.com/containerd/platforms v0.2.1 // indirect
|
||||||
github.com/cpuguy83/dockercfg v0.3.2 // indirect
|
github.com/cpuguy83/dockercfg v0.3.2 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||||
|
github.com/dgraph-io/ristretto v0.2.0 // indirect
|
||||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||||
github.com/distribution/reference v0.6.0 // indirect
|
github.com/distribution/reference v0.6.0 // indirect
|
||||||
github.com/docker/docker v28.5.1+incompatible // indirect
|
github.com/docker/docker v28.5.1+incompatible // indirect
|
||||||
github.com/docker/go-connections v0.6.0 // indirect
|
github.com/docker/go-connections v0.6.0 // indirect
|
||||||
github.com/docker/go-units v0.5.0 // indirect
|
github.com/docker/go-units v0.5.0 // indirect
|
||||||
|
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||||
github.com/ebitengine/purego v0.8.4 // indirect
|
github.com/ebitengine/purego v0.8.4 // indirect
|
||||||
github.com/fatih/color v1.18.0 // indirect
|
github.com/fatih/color v1.18.0 // indirect
|
||||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||||
@@ -104,9 +108,9 @@ require (
|
|||||||
github.com/quic-go/quic-go v0.57.1 // indirect
|
github.com/quic-go/quic-go v0.57.1 // indirect
|
||||||
github.com/refraction-networking/utls v1.8.1 // indirect
|
github.com/refraction-networking/utls v1.8.1 // indirect
|
||||||
github.com/rivo/uniseg v0.2.0 // indirect
|
github.com/rivo/uniseg v0.2.0 // indirect
|
||||||
|
github.com/robfig/cron/v3 v3.0.1 // indirect
|
||||||
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
||||||
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||||
github.com/shirou/gopsutil/v4 v4.25.6 // indirect
|
|
||||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||||
|
|||||||
@@ -51,6 +51,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
|||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/dgraph-io/ristretto v0.2.0 h1:XAfl+7cmoUDWW/2Lx8TGZQjjxIQ2Ley9DSf52dru4WE=
|
||||||
|
github.com/dgraph-io/ristretto v0.2.0/go.mod h1:8uBHCU/PBV4Ag0CJrP47b9Ofby5dqWNh4FicAdoqFNU=
|
||||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||||
@@ -61,6 +63,8 @@ github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pM
|
|||||||
github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=
|
github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=
|
||||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||||
|
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||||
|
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||||
github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw=
|
github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw=
|
||||||
github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||||
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
||||||
@@ -113,6 +117,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
|||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/wire v0.7.0 h1:JxUKI6+CVBgCO2WToKy/nQk0sS+amI9z9EjVmdaocj4=
|
github.com/google/wire v0.7.0 h1:JxUKI6+CVBgCO2WToKy/nQk0sS+amI9z9EjVmdaocj4=
|
||||||
github.com/google/wire v0.7.0/go.mod h1:n6YbUQD9cPKTnHXEBN2DXlOp/mVADhVErcMFb0v3J18=
|
github.com/google/wire v0.7.0/go.mod h1:n6YbUQD9cPKTnHXEBN2DXlOp/mVADhVErcMFb0v3J18=
|
||||||
|
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||||
|
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg=
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg=
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4=
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4=
|
||||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||||
@@ -220,6 +226,8 @@ github.com/refraction-networking/utls v1.8.1 h1:yNY1kapmQU8JeM1sSw2H2asfTIwWxIkr
|
|||||||
github.com/refraction-networking/utls v1.8.1/go.mod h1:jkSOEkLqn+S/jtpEHPOsVv/4V4EVnelwbMQl4vCWXAM=
|
github.com/refraction-networking/utls v1.8.1/go.mod h1:jkSOEkLqn+S/jtpEHPOsVv/4V4EVnelwbMQl4vCWXAM=
|
||||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||||
|
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||||
|
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
|
|||||||
@@ -36,33 +36,29 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Config struct {
|
type Config struct {
|
||||||
Server ServerConfig `mapstructure:"server"`
|
Server ServerConfig `mapstructure:"server"`
|
||||||
CORS CORSConfig `mapstructure:"cors"`
|
CORS CORSConfig `mapstructure:"cors"`
|
||||||
Security SecurityConfig `mapstructure:"security"`
|
Security SecurityConfig `mapstructure:"security"`
|
||||||
Billing BillingConfig `mapstructure:"billing"`
|
Billing BillingConfig `mapstructure:"billing"`
|
||||||
Turnstile TurnstileConfig `mapstructure:"turnstile"`
|
Turnstile TurnstileConfig `mapstructure:"turnstile"`
|
||||||
Database DatabaseConfig `mapstructure:"database"`
|
Database DatabaseConfig `mapstructure:"database"`
|
||||||
Redis RedisConfig `mapstructure:"redis"`
|
Redis RedisConfig `mapstructure:"redis"`
|
||||||
JWT JWTConfig `mapstructure:"jwt"`
|
Ops OpsConfig `mapstructure:"ops"`
|
||||||
LinuxDo LinuxDoConnectConfig `mapstructure:"linuxdo_connect"`
|
JWT JWTConfig `mapstructure:"jwt"`
|
||||||
Default DefaultConfig `mapstructure:"default"`
|
LinuxDo LinuxDoConnectConfig `mapstructure:"linuxdo_connect"`
|
||||||
RateLimit RateLimitConfig `mapstructure:"rate_limit"`
|
Default DefaultConfig `mapstructure:"default"`
|
||||||
Pricing PricingConfig `mapstructure:"pricing"`
|
RateLimit RateLimitConfig `mapstructure:"rate_limit"`
|
||||||
Gateway GatewayConfig `mapstructure:"gateway"`
|
Pricing PricingConfig `mapstructure:"pricing"`
|
||||||
Concurrency ConcurrencyConfig `mapstructure:"concurrency"`
|
Gateway GatewayConfig `mapstructure:"gateway"`
|
||||||
TokenRefresh TokenRefreshConfig `mapstructure:"token_refresh"`
|
APIKeyAuth APIKeyAuthCacheConfig `mapstructure:"api_key_auth_cache"`
|
||||||
RunMode string `mapstructure:"run_mode" yaml:"run_mode"`
|
Dashboard DashboardCacheConfig `mapstructure:"dashboard_cache"`
|
||||||
Timezone string `mapstructure:"timezone"` // e.g. "Asia/Shanghai", "UTC"
|
DashboardAgg DashboardAggregationConfig `mapstructure:"dashboard_aggregation"`
|
||||||
Gemini GeminiConfig `mapstructure:"gemini"`
|
Concurrency ConcurrencyConfig `mapstructure:"concurrency"`
|
||||||
Update UpdateConfig `mapstructure:"update"`
|
TokenRefresh TokenRefreshConfig `mapstructure:"token_refresh"`
|
||||||
}
|
RunMode string `mapstructure:"run_mode" yaml:"run_mode"`
|
||||||
|
Timezone string `mapstructure:"timezone"` // e.g. "Asia/Shanghai", "UTC"
|
||||||
// UpdateConfig 在线更新相关配置
|
Gemini GeminiConfig `mapstructure:"gemini"`
|
||||||
type UpdateConfig struct {
|
Update UpdateConfig `mapstructure:"update"`
|
||||||
// ProxyURL 用于访问 GitHub 的代理地址
|
|
||||||
// 支持 http/https/socks5/socks5h 协议
|
|
||||||
// 例如: "http://127.0.0.1:7890", "socks5://127.0.0.1:1080"
|
|
||||||
ProxyURL string `mapstructure:"proxy_url"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type GeminiConfig struct {
|
type GeminiConfig struct {
|
||||||
@@ -87,6 +83,33 @@ type GeminiTierQuotaConfig struct {
|
|||||||
CooldownMinutes *int `mapstructure:"cooldown_minutes" json:"cooldown_minutes"`
|
CooldownMinutes *int `mapstructure:"cooldown_minutes" json:"cooldown_minutes"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type UpdateConfig struct {
|
||||||
|
// ProxyURL 用于访问 GitHub 的代理地址
|
||||||
|
// 支持 http/https/socks5/socks5h 协议
|
||||||
|
// 例如: "http://127.0.0.1:7890", "socks5://127.0.0.1:1080"
|
||||||
|
ProxyURL string `mapstructure:"proxy_url"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type LinuxDoConnectConfig struct {
|
||||||
|
Enabled bool `mapstructure:"enabled"`
|
||||||
|
ClientID string `mapstructure:"client_id"`
|
||||||
|
ClientSecret string `mapstructure:"client_secret"`
|
||||||
|
AuthorizeURL string `mapstructure:"authorize_url"`
|
||||||
|
TokenURL string `mapstructure:"token_url"`
|
||||||
|
UserInfoURL string `mapstructure:"userinfo_url"`
|
||||||
|
Scopes string `mapstructure:"scopes"`
|
||||||
|
RedirectURL string `mapstructure:"redirect_url"` // 后端回调地址(需在提供方后台登记)
|
||||||
|
FrontendRedirectURL string `mapstructure:"frontend_redirect_url"` // 前端接收 token 的路由(默认:/auth/linuxdo/callback)
|
||||||
|
TokenAuthMethod string `mapstructure:"token_auth_method"` // client_secret_post / client_secret_basic / none
|
||||||
|
UsePKCE bool `mapstructure:"use_pkce"`
|
||||||
|
|
||||||
|
// 可选:用于从 userinfo JSON 中提取字段的 gjson 路径。
|
||||||
|
// 为空时,服务端会尝试一组常见字段名。
|
||||||
|
UserInfoEmailPath string `mapstructure:"userinfo_email_path"`
|
||||||
|
UserInfoIDPath string `mapstructure:"userinfo_id_path"`
|
||||||
|
UserInfoUsernamePath string `mapstructure:"userinfo_username_path"`
|
||||||
|
}
|
||||||
|
|
||||||
// TokenRefreshConfig OAuth token自动刷新配置
|
// TokenRefreshConfig OAuth token自动刷新配置
|
||||||
type TokenRefreshConfig struct {
|
type TokenRefreshConfig struct {
|
||||||
// 是否启用自动刷新
|
// 是否启用自动刷新
|
||||||
@@ -247,6 +270,29 @@ type GatewaySchedulingConfig struct {
|
|||||||
|
|
||||||
// 过期槽位清理周期(0 表示禁用)
|
// 过期槽位清理周期(0 表示禁用)
|
||||||
SlotCleanupInterval time.Duration `mapstructure:"slot_cleanup_interval"`
|
SlotCleanupInterval time.Duration `mapstructure:"slot_cleanup_interval"`
|
||||||
|
|
||||||
|
// 受控回源配置
|
||||||
|
DbFallbackEnabled bool `mapstructure:"db_fallback_enabled"`
|
||||||
|
// 受控回源超时(秒),0 表示不额外收紧超时
|
||||||
|
DbFallbackTimeoutSeconds int `mapstructure:"db_fallback_timeout_seconds"`
|
||||||
|
// 受控回源限流(实例级 QPS),0 表示不限制
|
||||||
|
DbFallbackMaxQPS int `mapstructure:"db_fallback_max_qps"`
|
||||||
|
|
||||||
|
// Outbox 轮询与滞后阈值配置
|
||||||
|
// Outbox 轮询周期(秒)
|
||||||
|
OutboxPollIntervalSeconds int `mapstructure:"outbox_poll_interval_seconds"`
|
||||||
|
// Outbox 滞后告警阈值(秒)
|
||||||
|
OutboxLagWarnSeconds int `mapstructure:"outbox_lag_warn_seconds"`
|
||||||
|
// Outbox 触发强制重建阈值(秒)
|
||||||
|
OutboxLagRebuildSeconds int `mapstructure:"outbox_lag_rebuild_seconds"`
|
||||||
|
// Outbox 连续滞后触发次数
|
||||||
|
OutboxLagRebuildFailures int `mapstructure:"outbox_lag_rebuild_failures"`
|
||||||
|
// Outbox 积压触发重建阈值(行数)
|
||||||
|
OutboxBacklogRebuildRows int `mapstructure:"outbox_backlog_rebuild_rows"`
|
||||||
|
|
||||||
|
// 全量重建周期配置
|
||||||
|
// 全量重建周期(秒),0 表示禁用
|
||||||
|
FullRebuildIntervalSeconds int `mapstructure:"full_rebuild_interval_seconds"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ServerConfig) Address() string {
|
func (s *ServerConfig) Address() string {
|
||||||
@@ -274,6 +320,13 @@ type DatabaseConfig struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *DatabaseConfig) DSN() string {
|
func (d *DatabaseConfig) DSN() string {
|
||||||
|
// 当密码为空时不包含 password 参数,避免 libpq 解析错误
|
||||||
|
if d.Password == "" {
|
||||||
|
return fmt.Sprintf(
|
||||||
|
"host=%s port=%d user=%s dbname=%s sslmode=%s",
|
||||||
|
d.Host, d.Port, d.User, d.DBName, d.SSLMode,
|
||||||
|
)
|
||||||
|
}
|
||||||
return fmt.Sprintf(
|
return fmt.Sprintf(
|
||||||
"host=%s port=%d user=%s password=%s dbname=%s sslmode=%s",
|
"host=%s port=%d user=%s password=%s dbname=%s sslmode=%s",
|
||||||
d.Host, d.Port, d.User, d.Password, d.DBName, d.SSLMode,
|
d.Host, d.Port, d.User, d.Password, d.DBName, d.SSLMode,
|
||||||
@@ -285,6 +338,13 @@ func (d *DatabaseConfig) DSNWithTimezone(tz string) string {
|
|||||||
if tz == "" {
|
if tz == "" {
|
||||||
tz = "Asia/Shanghai"
|
tz = "Asia/Shanghai"
|
||||||
}
|
}
|
||||||
|
// 当密码为空时不包含 password 参数,避免 libpq 解析错误
|
||||||
|
if d.Password == "" {
|
||||||
|
return fmt.Sprintf(
|
||||||
|
"host=%s port=%d user=%s dbname=%s sslmode=%s TimeZone=%s",
|
||||||
|
d.Host, d.Port, d.User, d.DBName, d.SSLMode, tz,
|
||||||
|
)
|
||||||
|
}
|
||||||
return fmt.Sprintf(
|
return fmt.Sprintf(
|
||||||
"host=%s port=%d user=%s password=%s dbname=%s sslmode=%s TimeZone=%s",
|
"host=%s port=%d user=%s password=%s dbname=%s sslmode=%s TimeZone=%s",
|
||||||
d.Host, d.Port, d.User, d.Password, d.DBName, d.SSLMode, tz,
|
d.Host, d.Port, d.User, d.Password, d.DBName, d.SSLMode, tz,
|
||||||
@@ -315,6 +375,47 @@ func (r *RedisConfig) Address() string {
|
|||||||
return fmt.Sprintf("%s:%d", r.Host, r.Port)
|
return fmt.Sprintf("%s:%d", r.Host, r.Port)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type OpsConfig struct {
|
||||||
|
// Enabled controls whether ops features should run.
|
||||||
|
//
|
||||||
|
// NOTE: vNext still has a DB-backed feature flag (ops_monitoring_enabled) for runtime on/off.
|
||||||
|
// This config flag is the "hard switch" for deployments that want to disable ops completely.
|
||||||
|
Enabled bool `mapstructure:"enabled"`
|
||||||
|
|
||||||
|
// UsePreaggregatedTables prefers ops_metrics_hourly/daily for long-window dashboard queries.
|
||||||
|
UsePreaggregatedTables bool `mapstructure:"use_preaggregated_tables"`
|
||||||
|
|
||||||
|
// Cleanup controls periodic deletion of old ops data to prevent unbounded growth.
|
||||||
|
Cleanup OpsCleanupConfig `mapstructure:"cleanup"`
|
||||||
|
|
||||||
|
// MetricsCollectorCache controls Redis caching for expensive per-window collector queries.
|
||||||
|
MetricsCollectorCache OpsMetricsCollectorCacheConfig `mapstructure:"metrics_collector_cache"`
|
||||||
|
|
||||||
|
// Pre-aggregation configuration.
|
||||||
|
Aggregation OpsAggregationConfig `mapstructure:"aggregation"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type OpsCleanupConfig struct {
|
||||||
|
Enabled bool `mapstructure:"enabled"`
|
||||||
|
Schedule string `mapstructure:"schedule"`
|
||||||
|
|
||||||
|
// Retention days (0 disables that cleanup target).
|
||||||
|
//
|
||||||
|
// vNext requirement: default 30 days across ops datasets.
|
||||||
|
ErrorLogRetentionDays int `mapstructure:"error_log_retention_days"`
|
||||||
|
MinuteMetricsRetentionDays int `mapstructure:"minute_metrics_retention_days"`
|
||||||
|
HourlyMetricsRetentionDays int `mapstructure:"hourly_metrics_retention_days"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type OpsAggregationConfig struct {
|
||||||
|
Enabled bool `mapstructure:"enabled"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type OpsMetricsCollectorCacheConfig struct {
|
||||||
|
Enabled bool `mapstructure:"enabled"`
|
||||||
|
TTL time.Duration `mapstructure:"ttl"`
|
||||||
|
}
|
||||||
|
|
||||||
type JWTConfig struct {
|
type JWTConfig struct {
|
||||||
Secret string `mapstructure:"secret"`
|
Secret string `mapstructure:"secret"`
|
||||||
ExpireHour int `mapstructure:"expire_hour"`
|
ExpireHour int `mapstructure:"expire_hour"`
|
||||||
@@ -324,30 +425,6 @@ type TurnstileConfig struct {
|
|||||||
Required bool `mapstructure:"required"`
|
Required bool `mapstructure:"required"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// LinuxDoConnectConfig 用于 LinuxDo Connect OAuth 登录(终端用户 SSO)。
|
|
||||||
//
|
|
||||||
// 注意:这与上游账号的 OAuth(例如 OpenAI/Gemini 账号接入)不是一回事。
|
|
||||||
// 这里是用于登录 Sub2API 本身的用户体系。
|
|
||||||
type LinuxDoConnectConfig struct {
|
|
||||||
Enabled bool `mapstructure:"enabled"`
|
|
||||||
ClientID string `mapstructure:"client_id"`
|
|
||||||
ClientSecret string `mapstructure:"client_secret"`
|
|
||||||
AuthorizeURL string `mapstructure:"authorize_url"`
|
|
||||||
TokenURL string `mapstructure:"token_url"`
|
|
||||||
UserInfoURL string `mapstructure:"userinfo_url"`
|
|
||||||
Scopes string `mapstructure:"scopes"`
|
|
||||||
RedirectURL string `mapstructure:"redirect_url"` // 后端回调地址(需在提供方后台登记)
|
|
||||||
FrontendRedirectURL string `mapstructure:"frontend_redirect_url"` // 前端接收 token 的路由(默认:/auth/linuxdo/callback)
|
|
||||||
TokenAuthMethod string `mapstructure:"token_auth_method"` // client_secret_post / client_secret_basic / none
|
|
||||||
UsePKCE bool `mapstructure:"use_pkce"`
|
|
||||||
|
|
||||||
// 可选:用于从 userinfo JSON 中提取字段的 gjson 路径。
|
|
||||||
// 为空时,服务端会尝试一组常见字段名。
|
|
||||||
UserInfoEmailPath string `mapstructure:"userinfo_email_path"`
|
|
||||||
UserInfoIDPath string `mapstructure:"userinfo_id_path"`
|
|
||||||
UserInfoUsernamePath string `mapstructure:"userinfo_username_path"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type DefaultConfig struct {
|
type DefaultConfig struct {
|
||||||
AdminEmail string `mapstructure:"admin_email"`
|
AdminEmail string `mapstructure:"admin_email"`
|
||||||
AdminPassword string `mapstructure:"admin_password"`
|
AdminPassword string `mapstructure:"admin_password"`
|
||||||
@@ -361,6 +438,55 @@ type RateLimitConfig struct {
|
|||||||
OverloadCooldownMinutes int `mapstructure:"overload_cooldown_minutes"` // 529过载冷却时间(分钟)
|
OverloadCooldownMinutes int `mapstructure:"overload_cooldown_minutes"` // 529过载冷却时间(分钟)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// APIKeyAuthCacheConfig API Key 认证缓存配置
|
||||||
|
type APIKeyAuthCacheConfig struct {
|
||||||
|
L1Size int `mapstructure:"l1_size"`
|
||||||
|
L1TTLSeconds int `mapstructure:"l1_ttl_seconds"`
|
||||||
|
L2TTLSeconds int `mapstructure:"l2_ttl_seconds"`
|
||||||
|
NegativeTTLSeconds int `mapstructure:"negative_ttl_seconds"`
|
||||||
|
JitterPercent int `mapstructure:"jitter_percent"`
|
||||||
|
Singleflight bool `mapstructure:"singleflight"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DashboardCacheConfig 仪表盘统计缓存配置
|
||||||
|
type DashboardCacheConfig struct {
|
||||||
|
// Enabled: 是否启用仪表盘缓存
|
||||||
|
Enabled bool `mapstructure:"enabled"`
|
||||||
|
// KeyPrefix: Redis key 前缀,用于多环境隔离
|
||||||
|
KeyPrefix string `mapstructure:"key_prefix"`
|
||||||
|
// StatsFreshTTLSeconds: 缓存命中认为“新鲜”的时间窗口(秒)
|
||||||
|
StatsFreshTTLSeconds int `mapstructure:"stats_fresh_ttl_seconds"`
|
||||||
|
// StatsTTLSeconds: Redis 缓存总 TTL(秒)
|
||||||
|
StatsTTLSeconds int `mapstructure:"stats_ttl_seconds"`
|
||||||
|
// StatsRefreshTimeoutSeconds: 异步刷新超时(秒)
|
||||||
|
StatsRefreshTimeoutSeconds int `mapstructure:"stats_refresh_timeout_seconds"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DashboardAggregationConfig 仪表盘预聚合配置
|
||||||
|
type DashboardAggregationConfig struct {
|
||||||
|
// Enabled: 是否启用预聚合作业
|
||||||
|
Enabled bool `mapstructure:"enabled"`
|
||||||
|
// IntervalSeconds: 聚合刷新间隔(秒)
|
||||||
|
IntervalSeconds int `mapstructure:"interval_seconds"`
|
||||||
|
// LookbackSeconds: 回看窗口(秒)
|
||||||
|
LookbackSeconds int `mapstructure:"lookback_seconds"`
|
||||||
|
// BackfillEnabled: 是否允许全量回填
|
||||||
|
BackfillEnabled bool `mapstructure:"backfill_enabled"`
|
||||||
|
// BackfillMaxDays: 回填最大跨度(天)
|
||||||
|
BackfillMaxDays int `mapstructure:"backfill_max_days"`
|
||||||
|
// Retention: 各表保留窗口(天)
|
||||||
|
Retention DashboardAggregationRetentionConfig `mapstructure:"retention"`
|
||||||
|
// RecomputeDays: 启动时重算最近 N 天
|
||||||
|
RecomputeDays int `mapstructure:"recompute_days"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DashboardAggregationRetentionConfig 预聚合保留窗口
|
||||||
|
type DashboardAggregationRetentionConfig struct {
|
||||||
|
UsageLogsDays int `mapstructure:"usage_logs_days"`
|
||||||
|
HourlyDays int `mapstructure:"hourly_days"`
|
||||||
|
DailyDays int `mapstructure:"daily_days"`
|
||||||
|
}
|
||||||
|
|
||||||
func NormalizeRunMode(value string) string {
|
func NormalizeRunMode(value string) string {
|
||||||
normalized := strings.ToLower(strings.TrimSpace(value))
|
normalized := strings.ToLower(strings.TrimSpace(value))
|
||||||
switch normalized {
|
switch normalized {
|
||||||
@@ -426,6 +552,7 @@ func Load() (*Config, error) {
|
|||||||
cfg.LinuxDo.UserInfoEmailPath = strings.TrimSpace(cfg.LinuxDo.UserInfoEmailPath)
|
cfg.LinuxDo.UserInfoEmailPath = strings.TrimSpace(cfg.LinuxDo.UserInfoEmailPath)
|
||||||
cfg.LinuxDo.UserInfoIDPath = strings.TrimSpace(cfg.LinuxDo.UserInfoIDPath)
|
cfg.LinuxDo.UserInfoIDPath = strings.TrimSpace(cfg.LinuxDo.UserInfoIDPath)
|
||||||
cfg.LinuxDo.UserInfoUsernamePath = strings.TrimSpace(cfg.LinuxDo.UserInfoUsernamePath)
|
cfg.LinuxDo.UserInfoUsernamePath = strings.TrimSpace(cfg.LinuxDo.UserInfoUsernamePath)
|
||||||
|
cfg.Dashboard.KeyPrefix = strings.TrimSpace(cfg.Dashboard.KeyPrefix)
|
||||||
cfg.CORS.AllowedOrigins = normalizeStringSlice(cfg.CORS.AllowedOrigins)
|
cfg.CORS.AllowedOrigins = normalizeStringSlice(cfg.CORS.AllowedOrigins)
|
||||||
cfg.Security.ResponseHeaders.AdditionalAllowed = normalizeStringSlice(cfg.Security.ResponseHeaders.AdditionalAllowed)
|
cfg.Security.ResponseHeaders.AdditionalAllowed = normalizeStringSlice(cfg.Security.ResponseHeaders.AdditionalAllowed)
|
||||||
cfg.Security.ResponseHeaders.ForceRemove = normalizeStringSlice(cfg.Security.ResponseHeaders.ForceRemove)
|
cfg.Security.ResponseHeaders.ForceRemove = normalizeStringSlice(cfg.Security.ResponseHeaders.ForceRemove)
|
||||||
@@ -464,81 +591,6 @@ func Load() (*Config, error) {
|
|||||||
return &cfg, nil
|
return &cfg, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ValidateAbsoluteHTTPURL 校验一个绝对 http(s) URL(禁止 fragment)。
|
|
||||||
func ValidateAbsoluteHTTPURL(raw string) error {
|
|
||||||
raw = strings.TrimSpace(raw)
|
|
||||||
if raw == "" {
|
|
||||||
return fmt.Errorf("empty url")
|
|
||||||
}
|
|
||||||
u, err := url.Parse(raw)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !u.IsAbs() {
|
|
||||||
return fmt.Errorf("must be absolute")
|
|
||||||
}
|
|
||||||
if !isHTTPScheme(u.Scheme) {
|
|
||||||
return fmt.Errorf("unsupported scheme: %s", u.Scheme)
|
|
||||||
}
|
|
||||||
if strings.TrimSpace(u.Host) == "" {
|
|
||||||
return fmt.Errorf("missing host")
|
|
||||||
}
|
|
||||||
if u.Fragment != "" {
|
|
||||||
return fmt.Errorf("must not include fragment")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateFrontendRedirectURL 校验前端回调地址:
|
|
||||||
// - 允许同源相对路径(以 / 开头)
|
|
||||||
// - 或绝对 http(s) URL(禁止 fragment)
|
|
||||||
func ValidateFrontendRedirectURL(raw string) error {
|
|
||||||
raw = strings.TrimSpace(raw)
|
|
||||||
if raw == "" {
|
|
||||||
return fmt.Errorf("empty url")
|
|
||||||
}
|
|
||||||
if strings.ContainsAny(raw, "\r\n") {
|
|
||||||
return fmt.Errorf("contains invalid characters")
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(raw, "/") {
|
|
||||||
if strings.HasPrefix(raw, "//") {
|
|
||||||
return fmt.Errorf("must not start with //")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
u, err := url.Parse(raw)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !u.IsAbs() {
|
|
||||||
return fmt.Errorf("must be absolute http(s) url or relative path")
|
|
||||||
}
|
|
||||||
if !isHTTPScheme(u.Scheme) {
|
|
||||||
return fmt.Errorf("unsupported scheme: %s", u.Scheme)
|
|
||||||
}
|
|
||||||
if strings.TrimSpace(u.Host) == "" {
|
|
||||||
return fmt.Errorf("missing host")
|
|
||||||
}
|
|
||||||
if u.Fragment != "" {
|
|
||||||
return fmt.Errorf("must not include fragment")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func isHTTPScheme(scheme string) bool {
|
|
||||||
return strings.EqualFold(scheme, "http") || strings.EqualFold(scheme, "https")
|
|
||||||
}
|
|
||||||
|
|
||||||
func warnIfInsecureURL(field, raw string) {
|
|
||||||
u, err := url.Parse(strings.TrimSpace(raw))
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if strings.EqualFold(u.Scheme, "http") {
|
|
||||||
log.Printf("Warning: %s uses http scheme; use https in production to avoid token leakage.", field)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func setDefaults() {
|
func setDefaults() {
|
||||||
viper.SetDefault("run_mode", RunModeStandard)
|
viper.SetDefault("run_mode", RunModeStandard)
|
||||||
|
|
||||||
@@ -588,7 +640,7 @@ func setDefaults() {
|
|||||||
// Turnstile
|
// Turnstile
|
||||||
viper.SetDefault("turnstile.required", false)
|
viper.SetDefault("turnstile.required", false)
|
||||||
|
|
||||||
// LinuxDo Connect OAuth 登录(终端用户 SSO)
|
// LinuxDo Connect OAuth 登录
|
||||||
viper.SetDefault("linuxdo_connect.enabled", false)
|
viper.SetDefault("linuxdo_connect.enabled", false)
|
||||||
viper.SetDefault("linuxdo_connect.client_id", "")
|
viper.SetDefault("linuxdo_connect.client_id", "")
|
||||||
viper.SetDefault("linuxdo_connect.client_secret", "")
|
viper.SetDefault("linuxdo_connect.client_secret", "")
|
||||||
@@ -627,6 +679,20 @@ func setDefaults() {
|
|||||||
viper.SetDefault("redis.pool_size", 128)
|
viper.SetDefault("redis.pool_size", 128)
|
||||||
viper.SetDefault("redis.min_idle_conns", 10)
|
viper.SetDefault("redis.min_idle_conns", 10)
|
||||||
|
|
||||||
|
// Ops (vNext)
|
||||||
|
viper.SetDefault("ops.enabled", true)
|
||||||
|
viper.SetDefault("ops.use_preaggregated_tables", false)
|
||||||
|
viper.SetDefault("ops.cleanup.enabled", true)
|
||||||
|
viper.SetDefault("ops.cleanup.schedule", "0 2 * * *")
|
||||||
|
// Retention days: vNext defaults to 30 days across ops datasets.
|
||||||
|
viper.SetDefault("ops.cleanup.error_log_retention_days", 30)
|
||||||
|
viper.SetDefault("ops.cleanup.minute_metrics_retention_days", 30)
|
||||||
|
viper.SetDefault("ops.cleanup.hourly_metrics_retention_days", 30)
|
||||||
|
viper.SetDefault("ops.aggregation.enabled", true)
|
||||||
|
viper.SetDefault("ops.metrics_collector_cache.enabled", true)
|
||||||
|
// TTL should be slightly larger than collection interval (1m) to maximize cross-replica cache hits.
|
||||||
|
viper.SetDefault("ops.metrics_collector_cache.ttl", 65*time.Second)
|
||||||
|
|
||||||
// JWT
|
// JWT
|
||||||
viper.SetDefault("jwt.secret", "")
|
viper.SetDefault("jwt.secret", "")
|
||||||
viper.SetDefault("jwt.expire_hour", 24)
|
viper.SetDefault("jwt.expire_hour", 24)
|
||||||
@@ -655,9 +721,35 @@ func setDefaults() {
|
|||||||
// Timezone (default to Asia/Shanghai for Chinese users)
|
// Timezone (default to Asia/Shanghai for Chinese users)
|
||||||
viper.SetDefault("timezone", "Asia/Shanghai")
|
viper.SetDefault("timezone", "Asia/Shanghai")
|
||||||
|
|
||||||
|
// API Key auth cache
|
||||||
|
viper.SetDefault("api_key_auth_cache.l1_size", 65535)
|
||||||
|
viper.SetDefault("api_key_auth_cache.l1_ttl_seconds", 15)
|
||||||
|
viper.SetDefault("api_key_auth_cache.l2_ttl_seconds", 300)
|
||||||
|
viper.SetDefault("api_key_auth_cache.negative_ttl_seconds", 30)
|
||||||
|
viper.SetDefault("api_key_auth_cache.jitter_percent", 10)
|
||||||
|
viper.SetDefault("api_key_auth_cache.singleflight", true)
|
||||||
|
|
||||||
|
// Dashboard cache
|
||||||
|
viper.SetDefault("dashboard_cache.enabled", true)
|
||||||
|
viper.SetDefault("dashboard_cache.key_prefix", "sub2api:")
|
||||||
|
viper.SetDefault("dashboard_cache.stats_fresh_ttl_seconds", 15)
|
||||||
|
viper.SetDefault("dashboard_cache.stats_ttl_seconds", 30)
|
||||||
|
viper.SetDefault("dashboard_cache.stats_refresh_timeout_seconds", 30)
|
||||||
|
|
||||||
|
// Dashboard aggregation
|
||||||
|
viper.SetDefault("dashboard_aggregation.enabled", true)
|
||||||
|
viper.SetDefault("dashboard_aggregation.interval_seconds", 60)
|
||||||
|
viper.SetDefault("dashboard_aggregation.lookback_seconds", 120)
|
||||||
|
viper.SetDefault("dashboard_aggregation.backfill_enabled", false)
|
||||||
|
viper.SetDefault("dashboard_aggregation.backfill_max_days", 31)
|
||||||
|
viper.SetDefault("dashboard_aggregation.retention.usage_logs_days", 90)
|
||||||
|
viper.SetDefault("dashboard_aggregation.retention.hourly_days", 180)
|
||||||
|
viper.SetDefault("dashboard_aggregation.retention.daily_days", 730)
|
||||||
|
viper.SetDefault("dashboard_aggregation.recompute_days", 2)
|
||||||
|
|
||||||
// Gateway
|
// Gateway
|
||||||
viper.SetDefault("gateway.response_header_timeout", 600) // 600秒(10分钟)等待上游响应头,LLM高负载时可能排队较久
|
viper.SetDefault("gateway.response_header_timeout", 600) // 600秒(10分钟)等待上游响应头,LLM高负载时可能排队较久
|
||||||
viper.SetDefault("gateway.log_upstream_error_body", false)
|
viper.SetDefault("gateway.log_upstream_error_body", true)
|
||||||
viper.SetDefault("gateway.log_upstream_error_body_max_bytes", 2048)
|
viper.SetDefault("gateway.log_upstream_error_body_max_bytes", 2048)
|
||||||
viper.SetDefault("gateway.inject_beta_for_apikey", false)
|
viper.SetDefault("gateway.inject_beta_for_apikey", false)
|
||||||
viper.SetDefault("gateway.failover_on_400", false)
|
viper.SetDefault("gateway.failover_on_400", false)
|
||||||
@@ -675,11 +767,20 @@ func setDefaults() {
|
|||||||
viper.SetDefault("gateway.stream_keepalive_interval", 10)
|
viper.SetDefault("gateway.stream_keepalive_interval", 10)
|
||||||
viper.SetDefault("gateway.max_line_size", 10*1024*1024)
|
viper.SetDefault("gateway.max_line_size", 10*1024*1024)
|
||||||
viper.SetDefault("gateway.scheduling.sticky_session_max_waiting", 3)
|
viper.SetDefault("gateway.scheduling.sticky_session_max_waiting", 3)
|
||||||
viper.SetDefault("gateway.scheduling.sticky_session_wait_timeout", 45*time.Second)
|
viper.SetDefault("gateway.scheduling.sticky_session_wait_timeout", 120*time.Second)
|
||||||
viper.SetDefault("gateway.scheduling.fallback_wait_timeout", 30*time.Second)
|
viper.SetDefault("gateway.scheduling.fallback_wait_timeout", 30*time.Second)
|
||||||
viper.SetDefault("gateway.scheduling.fallback_max_waiting", 100)
|
viper.SetDefault("gateway.scheduling.fallback_max_waiting", 100)
|
||||||
viper.SetDefault("gateway.scheduling.load_batch_enabled", true)
|
viper.SetDefault("gateway.scheduling.load_batch_enabled", true)
|
||||||
viper.SetDefault("gateway.scheduling.slot_cleanup_interval", 30*time.Second)
|
viper.SetDefault("gateway.scheduling.slot_cleanup_interval", 30*time.Second)
|
||||||
|
viper.SetDefault("gateway.scheduling.db_fallback_enabled", true)
|
||||||
|
viper.SetDefault("gateway.scheduling.db_fallback_timeout_seconds", 0)
|
||||||
|
viper.SetDefault("gateway.scheduling.db_fallback_max_qps", 0)
|
||||||
|
viper.SetDefault("gateway.scheduling.outbox_poll_interval_seconds", 1)
|
||||||
|
viper.SetDefault("gateway.scheduling.outbox_lag_warn_seconds", 5)
|
||||||
|
viper.SetDefault("gateway.scheduling.outbox_lag_rebuild_seconds", 10)
|
||||||
|
viper.SetDefault("gateway.scheduling.outbox_lag_rebuild_failures", 3)
|
||||||
|
viper.SetDefault("gateway.scheduling.outbox_backlog_rebuild_rows", 10000)
|
||||||
|
viper.SetDefault("gateway.scheduling.full_rebuild_interval_seconds", 300)
|
||||||
viper.SetDefault("concurrency.ping_interval", 10)
|
viper.SetDefault("concurrency.ping_interval", 10)
|
||||||
|
|
||||||
// TokenRefresh
|
// TokenRefresh
|
||||||
@@ -696,10 +797,6 @@ func setDefaults() {
|
|||||||
viper.SetDefault("gemini.oauth.client_secret", "")
|
viper.SetDefault("gemini.oauth.client_secret", "")
|
||||||
viper.SetDefault("gemini.oauth.scopes", "")
|
viper.SetDefault("gemini.oauth.scopes", "")
|
||||||
viper.SetDefault("gemini.quota.policy", "")
|
viper.SetDefault("gemini.quota.policy", "")
|
||||||
|
|
||||||
// Update - 在线更新配置
|
|
||||||
// 代理地址为空表示直连 GitHub(适用于海外服务器)
|
|
||||||
viper.SetDefault("update.proxy_url", "")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Config) Validate() error {
|
func (c *Config) Validate() error {
|
||||||
@@ -740,7 +837,8 @@ func (c *Config) Validate() error {
|
|||||||
if method == "none" && !c.LinuxDo.UsePKCE {
|
if method == "none" && !c.LinuxDo.UsePKCE {
|
||||||
return fmt.Errorf("linuxdo_connect.use_pkce must be true when linuxdo_connect.token_auth_method=none")
|
return fmt.Errorf("linuxdo_connect.use_pkce must be true when linuxdo_connect.token_auth_method=none")
|
||||||
}
|
}
|
||||||
if (method == "" || method == "client_secret_post" || method == "client_secret_basic") && strings.TrimSpace(c.LinuxDo.ClientSecret) == "" {
|
if (method == "" || method == "client_secret_post" || method == "client_secret_basic") &&
|
||||||
|
strings.TrimSpace(c.LinuxDo.ClientSecret) == "" {
|
||||||
return fmt.Errorf("linuxdo_connect.client_secret is required when linuxdo_connect.enabled=true and token_auth_method is client_secret_post/client_secret_basic")
|
return fmt.Errorf("linuxdo_connect.client_secret is required when linuxdo_connect.enabled=true and token_auth_method is client_secret_post/client_secret_basic")
|
||||||
}
|
}
|
||||||
if strings.TrimSpace(c.LinuxDo.FrontendRedirectURL) == "" {
|
if strings.TrimSpace(c.LinuxDo.FrontendRedirectURL) == "" {
|
||||||
@@ -813,6 +911,78 @@ func (c *Config) Validate() error {
|
|||||||
if c.Redis.MinIdleConns > c.Redis.PoolSize {
|
if c.Redis.MinIdleConns > c.Redis.PoolSize {
|
||||||
return fmt.Errorf("redis.min_idle_conns cannot exceed redis.pool_size")
|
return fmt.Errorf("redis.min_idle_conns cannot exceed redis.pool_size")
|
||||||
}
|
}
|
||||||
|
if c.Dashboard.Enabled {
|
||||||
|
if c.Dashboard.StatsFreshTTLSeconds <= 0 {
|
||||||
|
return fmt.Errorf("dashboard_cache.stats_fresh_ttl_seconds must be positive")
|
||||||
|
}
|
||||||
|
if c.Dashboard.StatsTTLSeconds <= 0 {
|
||||||
|
return fmt.Errorf("dashboard_cache.stats_ttl_seconds must be positive")
|
||||||
|
}
|
||||||
|
if c.Dashboard.StatsRefreshTimeoutSeconds <= 0 {
|
||||||
|
return fmt.Errorf("dashboard_cache.stats_refresh_timeout_seconds must be positive")
|
||||||
|
}
|
||||||
|
if c.Dashboard.StatsFreshTTLSeconds > c.Dashboard.StatsTTLSeconds {
|
||||||
|
return fmt.Errorf("dashboard_cache.stats_fresh_ttl_seconds must be <= dashboard_cache.stats_ttl_seconds")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if c.Dashboard.StatsFreshTTLSeconds < 0 {
|
||||||
|
return fmt.Errorf("dashboard_cache.stats_fresh_ttl_seconds must be non-negative")
|
||||||
|
}
|
||||||
|
if c.Dashboard.StatsTTLSeconds < 0 {
|
||||||
|
return fmt.Errorf("dashboard_cache.stats_ttl_seconds must be non-negative")
|
||||||
|
}
|
||||||
|
if c.Dashboard.StatsRefreshTimeoutSeconds < 0 {
|
||||||
|
return fmt.Errorf("dashboard_cache.stats_refresh_timeout_seconds must be non-negative")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if c.DashboardAgg.Enabled {
|
||||||
|
if c.DashboardAgg.IntervalSeconds <= 0 {
|
||||||
|
return fmt.Errorf("dashboard_aggregation.interval_seconds must be positive")
|
||||||
|
}
|
||||||
|
if c.DashboardAgg.LookbackSeconds < 0 {
|
||||||
|
return fmt.Errorf("dashboard_aggregation.lookback_seconds must be non-negative")
|
||||||
|
}
|
||||||
|
if c.DashboardAgg.BackfillMaxDays < 0 {
|
||||||
|
return fmt.Errorf("dashboard_aggregation.backfill_max_days must be non-negative")
|
||||||
|
}
|
||||||
|
if c.DashboardAgg.BackfillEnabled && c.DashboardAgg.BackfillMaxDays == 0 {
|
||||||
|
return fmt.Errorf("dashboard_aggregation.backfill_max_days must be positive")
|
||||||
|
}
|
||||||
|
if c.DashboardAgg.Retention.UsageLogsDays <= 0 {
|
||||||
|
return fmt.Errorf("dashboard_aggregation.retention.usage_logs_days must be positive")
|
||||||
|
}
|
||||||
|
if c.DashboardAgg.Retention.HourlyDays <= 0 {
|
||||||
|
return fmt.Errorf("dashboard_aggregation.retention.hourly_days must be positive")
|
||||||
|
}
|
||||||
|
if c.DashboardAgg.Retention.DailyDays <= 0 {
|
||||||
|
return fmt.Errorf("dashboard_aggregation.retention.daily_days must be positive")
|
||||||
|
}
|
||||||
|
if c.DashboardAgg.RecomputeDays < 0 {
|
||||||
|
return fmt.Errorf("dashboard_aggregation.recompute_days must be non-negative")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if c.DashboardAgg.IntervalSeconds < 0 {
|
||||||
|
return fmt.Errorf("dashboard_aggregation.interval_seconds must be non-negative")
|
||||||
|
}
|
||||||
|
if c.DashboardAgg.LookbackSeconds < 0 {
|
||||||
|
return fmt.Errorf("dashboard_aggregation.lookback_seconds must be non-negative")
|
||||||
|
}
|
||||||
|
if c.DashboardAgg.BackfillMaxDays < 0 {
|
||||||
|
return fmt.Errorf("dashboard_aggregation.backfill_max_days must be non-negative")
|
||||||
|
}
|
||||||
|
if c.DashboardAgg.Retention.UsageLogsDays < 0 {
|
||||||
|
return fmt.Errorf("dashboard_aggregation.retention.usage_logs_days must be non-negative")
|
||||||
|
}
|
||||||
|
if c.DashboardAgg.Retention.HourlyDays < 0 {
|
||||||
|
return fmt.Errorf("dashboard_aggregation.retention.hourly_days must be non-negative")
|
||||||
|
}
|
||||||
|
if c.DashboardAgg.Retention.DailyDays < 0 {
|
||||||
|
return fmt.Errorf("dashboard_aggregation.retention.daily_days must be non-negative")
|
||||||
|
}
|
||||||
|
if c.DashboardAgg.RecomputeDays < 0 {
|
||||||
|
return fmt.Errorf("dashboard_aggregation.recompute_days must be non-negative")
|
||||||
|
}
|
||||||
|
}
|
||||||
if c.Gateway.MaxBodySize <= 0 {
|
if c.Gateway.MaxBodySize <= 0 {
|
||||||
return fmt.Errorf("gateway.max_body_size must be positive")
|
return fmt.Errorf("gateway.max_body_size must be positive")
|
||||||
}
|
}
|
||||||
@@ -883,6 +1053,50 @@ func (c *Config) Validate() error {
|
|||||||
if c.Gateway.Scheduling.SlotCleanupInterval < 0 {
|
if c.Gateway.Scheduling.SlotCleanupInterval < 0 {
|
||||||
return fmt.Errorf("gateway.scheduling.slot_cleanup_interval must be non-negative")
|
return fmt.Errorf("gateway.scheduling.slot_cleanup_interval must be non-negative")
|
||||||
}
|
}
|
||||||
|
if c.Gateway.Scheduling.DbFallbackTimeoutSeconds < 0 {
|
||||||
|
return fmt.Errorf("gateway.scheduling.db_fallback_timeout_seconds must be non-negative")
|
||||||
|
}
|
||||||
|
if c.Gateway.Scheduling.DbFallbackMaxQPS < 0 {
|
||||||
|
return fmt.Errorf("gateway.scheduling.db_fallback_max_qps must be non-negative")
|
||||||
|
}
|
||||||
|
if c.Gateway.Scheduling.OutboxPollIntervalSeconds <= 0 {
|
||||||
|
return fmt.Errorf("gateway.scheduling.outbox_poll_interval_seconds must be positive")
|
||||||
|
}
|
||||||
|
if c.Gateway.Scheduling.OutboxLagWarnSeconds < 0 {
|
||||||
|
return fmt.Errorf("gateway.scheduling.outbox_lag_warn_seconds must be non-negative")
|
||||||
|
}
|
||||||
|
if c.Gateway.Scheduling.OutboxLagRebuildSeconds < 0 {
|
||||||
|
return fmt.Errorf("gateway.scheduling.outbox_lag_rebuild_seconds must be non-negative")
|
||||||
|
}
|
||||||
|
if c.Gateway.Scheduling.OutboxLagRebuildFailures <= 0 {
|
||||||
|
return fmt.Errorf("gateway.scheduling.outbox_lag_rebuild_failures must be positive")
|
||||||
|
}
|
||||||
|
if c.Gateway.Scheduling.OutboxBacklogRebuildRows < 0 {
|
||||||
|
return fmt.Errorf("gateway.scheduling.outbox_backlog_rebuild_rows must be non-negative")
|
||||||
|
}
|
||||||
|
if c.Gateway.Scheduling.FullRebuildIntervalSeconds < 0 {
|
||||||
|
return fmt.Errorf("gateway.scheduling.full_rebuild_interval_seconds must be non-negative")
|
||||||
|
}
|
||||||
|
if c.Gateway.Scheduling.OutboxLagWarnSeconds > 0 &&
|
||||||
|
c.Gateway.Scheduling.OutboxLagRebuildSeconds > 0 &&
|
||||||
|
c.Gateway.Scheduling.OutboxLagRebuildSeconds < c.Gateway.Scheduling.OutboxLagWarnSeconds {
|
||||||
|
return fmt.Errorf("gateway.scheduling.outbox_lag_rebuild_seconds must be >= outbox_lag_warn_seconds")
|
||||||
|
}
|
||||||
|
if c.Ops.MetricsCollectorCache.TTL < 0 {
|
||||||
|
return fmt.Errorf("ops.metrics_collector_cache.ttl must be non-negative")
|
||||||
|
}
|
||||||
|
if c.Ops.Cleanup.ErrorLogRetentionDays < 0 {
|
||||||
|
return fmt.Errorf("ops.cleanup.error_log_retention_days must be non-negative")
|
||||||
|
}
|
||||||
|
if c.Ops.Cleanup.MinuteMetricsRetentionDays < 0 {
|
||||||
|
return fmt.Errorf("ops.cleanup.minute_metrics_retention_days must be non-negative")
|
||||||
|
}
|
||||||
|
if c.Ops.Cleanup.HourlyMetricsRetentionDays < 0 {
|
||||||
|
return fmt.Errorf("ops.cleanup.hourly_metrics_retention_days must be non-negative")
|
||||||
|
}
|
||||||
|
if c.Ops.Cleanup.Enabled && strings.TrimSpace(c.Ops.Cleanup.Schedule) == "" {
|
||||||
|
return fmt.Errorf("ops.cleanup.schedule is required when ops.cleanup.enabled=true")
|
||||||
|
}
|
||||||
if c.Concurrency.PingInterval < 5 || c.Concurrency.PingInterval > 30 {
|
if c.Concurrency.PingInterval < 5 || c.Concurrency.PingInterval > 30 {
|
||||||
return fmt.Errorf("concurrency.ping_interval must be between 5-30 seconds")
|
return fmt.Errorf("concurrency.ping_interval must be between 5-30 seconds")
|
||||||
}
|
}
|
||||||
@@ -959,3 +1173,77 @@ func GetServerAddress() string {
|
|||||||
port := v.GetInt("server.port")
|
port := v.GetInt("server.port")
|
||||||
return fmt.Sprintf("%s:%d", host, port)
|
return fmt.Sprintf("%s:%d", host, port)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ValidateAbsoluteHTTPURL 验证是否为有效的绝对 HTTP(S) URL
|
||||||
|
func ValidateAbsoluteHTTPURL(raw string) error {
|
||||||
|
raw = strings.TrimSpace(raw)
|
||||||
|
if raw == "" {
|
||||||
|
return fmt.Errorf("empty url")
|
||||||
|
}
|
||||||
|
u, err := url.Parse(raw)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !u.IsAbs() {
|
||||||
|
return fmt.Errorf("must be absolute")
|
||||||
|
}
|
||||||
|
if !isHTTPScheme(u.Scheme) {
|
||||||
|
return fmt.Errorf("unsupported scheme: %s", u.Scheme)
|
||||||
|
}
|
||||||
|
if strings.TrimSpace(u.Host) == "" {
|
||||||
|
return fmt.Errorf("missing host")
|
||||||
|
}
|
||||||
|
if u.Fragment != "" {
|
||||||
|
return fmt.Errorf("must not include fragment")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateFrontendRedirectURL 验证前端重定向 URL(可以是绝对 URL 或相对路径)
|
||||||
|
func ValidateFrontendRedirectURL(raw string) error {
|
||||||
|
raw = strings.TrimSpace(raw)
|
||||||
|
if raw == "" {
|
||||||
|
return fmt.Errorf("empty url")
|
||||||
|
}
|
||||||
|
if strings.ContainsAny(raw, "\r\n") {
|
||||||
|
return fmt.Errorf("contains invalid characters")
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(raw, "/") {
|
||||||
|
if strings.HasPrefix(raw, "//") {
|
||||||
|
return fmt.Errorf("must not start with //")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
u, err := url.Parse(raw)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !u.IsAbs() {
|
||||||
|
return fmt.Errorf("must be absolute http(s) url or relative path")
|
||||||
|
}
|
||||||
|
if !isHTTPScheme(u.Scheme) {
|
||||||
|
return fmt.Errorf("unsupported scheme: %s", u.Scheme)
|
||||||
|
}
|
||||||
|
if strings.TrimSpace(u.Host) == "" {
|
||||||
|
return fmt.Errorf("missing host")
|
||||||
|
}
|
||||||
|
if u.Fragment != "" {
|
||||||
|
return fmt.Errorf("must not include fragment")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// isHTTPScheme 检查是否为 HTTP 或 HTTPS 协议
|
||||||
|
func isHTTPScheme(scheme string) bool {
|
||||||
|
return strings.EqualFold(scheme, "http") || strings.EqualFold(scheme, "https")
|
||||||
|
}
|
||||||
|
|
||||||
|
func warnIfInsecureURL(field, raw string) {
|
||||||
|
u, err := url.Parse(strings.TrimSpace(raw))
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if strings.EqualFold(u.Scheme, "http") {
|
||||||
|
log.Printf("Warning: %s uses http scheme; use https in production to avoid token leakage.", field)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -39,8 +39,8 @@ func TestLoadDefaultSchedulingConfig(t *testing.T) {
|
|||||||
if cfg.Gateway.Scheduling.StickySessionMaxWaiting != 3 {
|
if cfg.Gateway.Scheduling.StickySessionMaxWaiting != 3 {
|
||||||
t.Fatalf("StickySessionMaxWaiting = %d, want 3", cfg.Gateway.Scheduling.StickySessionMaxWaiting)
|
t.Fatalf("StickySessionMaxWaiting = %d, want 3", cfg.Gateway.Scheduling.StickySessionMaxWaiting)
|
||||||
}
|
}
|
||||||
if cfg.Gateway.Scheduling.StickySessionWaitTimeout != 45*time.Second {
|
if cfg.Gateway.Scheduling.StickySessionWaitTimeout != 120*time.Second {
|
||||||
t.Fatalf("StickySessionWaitTimeout = %v, want 45s", cfg.Gateway.Scheduling.StickySessionWaitTimeout)
|
t.Fatalf("StickySessionWaitTimeout = %v, want 120s", cfg.Gateway.Scheduling.StickySessionWaitTimeout)
|
||||||
}
|
}
|
||||||
if cfg.Gateway.Scheduling.FallbackWaitTimeout != 30*time.Second {
|
if cfg.Gateway.Scheduling.FallbackWaitTimeout != 30*time.Second {
|
||||||
t.Fatalf("FallbackWaitTimeout = %v, want 30s", cfg.Gateway.Scheduling.FallbackWaitTimeout)
|
t.Fatalf("FallbackWaitTimeout = %v, want 30s", cfg.Gateway.Scheduling.FallbackWaitTimeout)
|
||||||
@@ -141,3 +141,142 @@ func TestValidateLinuxDoPKCERequiredForPublicClient(t *testing.T) {
|
|||||||
t.Fatalf("Validate() expected use_pkce error, got: %v", err)
|
t.Fatalf("Validate() expected use_pkce error, got: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestLoadDefaultDashboardCacheConfig(t *testing.T) {
|
||||||
|
viper.Reset()
|
||||||
|
|
||||||
|
cfg, err := Load()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Load() error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !cfg.Dashboard.Enabled {
|
||||||
|
t.Fatalf("Dashboard.Enabled = false, want true")
|
||||||
|
}
|
||||||
|
if cfg.Dashboard.KeyPrefix != "sub2api:" {
|
||||||
|
t.Fatalf("Dashboard.KeyPrefix = %q, want %q", cfg.Dashboard.KeyPrefix, "sub2api:")
|
||||||
|
}
|
||||||
|
if cfg.Dashboard.StatsFreshTTLSeconds != 15 {
|
||||||
|
t.Fatalf("Dashboard.StatsFreshTTLSeconds = %d, want 15", cfg.Dashboard.StatsFreshTTLSeconds)
|
||||||
|
}
|
||||||
|
if cfg.Dashboard.StatsTTLSeconds != 30 {
|
||||||
|
t.Fatalf("Dashboard.StatsTTLSeconds = %d, want 30", cfg.Dashboard.StatsTTLSeconds)
|
||||||
|
}
|
||||||
|
if cfg.Dashboard.StatsRefreshTimeoutSeconds != 30 {
|
||||||
|
t.Fatalf("Dashboard.StatsRefreshTimeoutSeconds = %d, want 30", cfg.Dashboard.StatsRefreshTimeoutSeconds)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateDashboardCacheConfigEnabled(t *testing.T) {
|
||||||
|
viper.Reset()
|
||||||
|
|
||||||
|
cfg, err := Load()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Load() error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg.Dashboard.Enabled = true
|
||||||
|
cfg.Dashboard.StatsFreshTTLSeconds = 10
|
||||||
|
cfg.Dashboard.StatsTTLSeconds = 5
|
||||||
|
err = cfg.Validate()
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("Validate() expected error for stats_fresh_ttl_seconds > stats_ttl_seconds, got nil")
|
||||||
|
}
|
||||||
|
if !strings.Contains(err.Error(), "dashboard_cache.stats_fresh_ttl_seconds") {
|
||||||
|
t.Fatalf("Validate() expected stats_fresh_ttl_seconds error, got: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateDashboardCacheConfigDisabled(t *testing.T) {
|
||||||
|
viper.Reset()
|
||||||
|
|
||||||
|
cfg, err := Load()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Load() error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg.Dashboard.Enabled = false
|
||||||
|
cfg.Dashboard.StatsTTLSeconds = -1
|
||||||
|
err = cfg.Validate()
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("Validate() expected error for negative stats_ttl_seconds, got nil")
|
||||||
|
}
|
||||||
|
if !strings.Contains(err.Error(), "dashboard_cache.stats_ttl_seconds") {
|
||||||
|
t.Fatalf("Validate() expected stats_ttl_seconds error, got: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoadDefaultDashboardAggregationConfig(t *testing.T) {
|
||||||
|
viper.Reset()
|
||||||
|
|
||||||
|
cfg, err := Load()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Load() error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !cfg.DashboardAgg.Enabled {
|
||||||
|
t.Fatalf("DashboardAgg.Enabled = false, want true")
|
||||||
|
}
|
||||||
|
if cfg.DashboardAgg.IntervalSeconds != 60 {
|
||||||
|
t.Fatalf("DashboardAgg.IntervalSeconds = %d, want 60", cfg.DashboardAgg.IntervalSeconds)
|
||||||
|
}
|
||||||
|
if cfg.DashboardAgg.LookbackSeconds != 120 {
|
||||||
|
t.Fatalf("DashboardAgg.LookbackSeconds = %d, want 120", cfg.DashboardAgg.LookbackSeconds)
|
||||||
|
}
|
||||||
|
if cfg.DashboardAgg.BackfillEnabled {
|
||||||
|
t.Fatalf("DashboardAgg.BackfillEnabled = true, want false")
|
||||||
|
}
|
||||||
|
if cfg.DashboardAgg.BackfillMaxDays != 31 {
|
||||||
|
t.Fatalf("DashboardAgg.BackfillMaxDays = %d, want 31", cfg.DashboardAgg.BackfillMaxDays)
|
||||||
|
}
|
||||||
|
if cfg.DashboardAgg.Retention.UsageLogsDays != 90 {
|
||||||
|
t.Fatalf("DashboardAgg.Retention.UsageLogsDays = %d, want 90", cfg.DashboardAgg.Retention.UsageLogsDays)
|
||||||
|
}
|
||||||
|
if cfg.DashboardAgg.Retention.HourlyDays != 180 {
|
||||||
|
t.Fatalf("DashboardAgg.Retention.HourlyDays = %d, want 180", cfg.DashboardAgg.Retention.HourlyDays)
|
||||||
|
}
|
||||||
|
if cfg.DashboardAgg.Retention.DailyDays != 730 {
|
||||||
|
t.Fatalf("DashboardAgg.Retention.DailyDays = %d, want 730", cfg.DashboardAgg.Retention.DailyDays)
|
||||||
|
}
|
||||||
|
if cfg.DashboardAgg.RecomputeDays != 2 {
|
||||||
|
t.Fatalf("DashboardAgg.RecomputeDays = %d, want 2", cfg.DashboardAgg.RecomputeDays)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateDashboardAggregationConfigDisabled(t *testing.T) {
|
||||||
|
viper.Reset()
|
||||||
|
|
||||||
|
cfg, err := Load()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Load() error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg.DashboardAgg.Enabled = false
|
||||||
|
cfg.DashboardAgg.IntervalSeconds = -1
|
||||||
|
err = cfg.Validate()
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("Validate() expected error for negative dashboard_aggregation.interval_seconds, got nil")
|
||||||
|
}
|
||||||
|
if !strings.Contains(err.Error(), "dashboard_aggregation.interval_seconds") {
|
||||||
|
t.Fatalf("Validate() expected interval_seconds error, got: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateDashboardAggregationBackfillMaxDays(t *testing.T) {
|
||||||
|
viper.Reset()
|
||||||
|
|
||||||
|
cfg, err := Load()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Load() error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg.DashboardAgg.BackfillEnabled = true
|
||||||
|
cfg.DashboardAgg.BackfillMaxDays = 0
|
||||||
|
err = cfg.Validate()
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("Validate() expected error for dashboard_aggregation.backfill_max_days, got nil")
|
||||||
|
}
|
||||||
|
if !strings.Contains(err.Error(), "dashboard_aggregation.backfill_max_days") {
|
||||||
|
t.Fatalf("Validate() expected backfill_max_days error, got: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package admin
|
package admin
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -13,15 +14,17 @@ import (
|
|||||||
|
|
||||||
// DashboardHandler handles admin dashboard statistics
|
// DashboardHandler handles admin dashboard statistics
|
||||||
type DashboardHandler struct {
|
type DashboardHandler struct {
|
||||||
dashboardService *service.DashboardService
|
dashboardService *service.DashboardService
|
||||||
startTime time.Time // Server start time for uptime calculation
|
aggregationService *service.DashboardAggregationService
|
||||||
|
startTime time.Time // Server start time for uptime calculation
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDashboardHandler creates a new admin dashboard handler
|
// NewDashboardHandler creates a new admin dashboard handler
|
||||||
func NewDashboardHandler(dashboardService *service.DashboardService) *DashboardHandler {
|
func NewDashboardHandler(dashboardService *service.DashboardService, aggregationService *service.DashboardAggregationService) *DashboardHandler {
|
||||||
return &DashboardHandler{
|
return &DashboardHandler{
|
||||||
dashboardService: dashboardService,
|
dashboardService: dashboardService,
|
||||||
startTime: time.Now(),
|
aggregationService: aggregationService,
|
||||||
|
startTime: time.Now(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -114,6 +117,58 @@ func (h *DashboardHandler) GetStats(c *gin.Context) {
|
|||||||
// 性能指标
|
// 性能指标
|
||||||
"rpm": stats.Rpm,
|
"rpm": stats.Rpm,
|
||||||
"tpm": stats.Tpm,
|
"tpm": stats.Tpm,
|
||||||
|
|
||||||
|
// 预聚合新鲜度
|
||||||
|
"hourly_active_users": stats.HourlyActiveUsers,
|
||||||
|
"stats_updated_at": stats.StatsUpdatedAt,
|
||||||
|
"stats_stale": stats.StatsStale,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type DashboardAggregationBackfillRequest struct {
|
||||||
|
Start string `json:"start"`
|
||||||
|
End string `json:"end"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// BackfillAggregation handles triggering aggregation backfill
|
||||||
|
// POST /api/v1/admin/dashboard/aggregation/backfill
|
||||||
|
func (h *DashboardHandler) BackfillAggregation(c *gin.Context) {
|
||||||
|
if h.aggregationService == nil {
|
||||||
|
response.InternalError(c, "Aggregation service not available")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var req DashboardAggregationBackfillRequest
|
||||||
|
if err := c.ShouldBindJSON(&req); err != nil {
|
||||||
|
response.BadRequest(c, "Invalid request body")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
start, err := time.Parse(time.RFC3339, req.Start)
|
||||||
|
if err != nil {
|
||||||
|
response.BadRequest(c, "Invalid start time")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
end, err := time.Parse(time.RFC3339, req.End)
|
||||||
|
if err != nil {
|
||||||
|
response.BadRequest(c, "Invalid end time")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := h.aggregationService.TriggerBackfill(start, end); err != nil {
|
||||||
|
if errors.Is(err, service.ErrDashboardBackfillDisabled) {
|
||||||
|
response.Forbidden(c, "Backfill is disabled")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if errors.Is(err, service.ErrDashboardBackfillTooLarge) {
|
||||||
|
response.BadRequest(c, "Backfill range too large")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
response.InternalError(c, "Failed to trigger backfill")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
response.Success(c, gin.H{
|
||||||
|
"status": "accepted",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
432
backend/internal/handler/admin/ops_alerts_handler.go
Normal file
432
backend/internal/handler/admin/ops_alerts_handler.go
Normal file
@@ -0,0 +1,432 @@
|
|||||||
|
package admin
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/pkg/response"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/gin-gonic/gin/binding"
|
||||||
|
)
|
||||||
|
|
||||||
|
var validOpsAlertMetricTypes = []string{
|
||||||
|
"success_rate",
|
||||||
|
"error_rate",
|
||||||
|
"upstream_error_rate",
|
||||||
|
"p95_latency_ms",
|
||||||
|
"p99_latency_ms",
|
||||||
|
"cpu_usage_percent",
|
||||||
|
"memory_usage_percent",
|
||||||
|
"concurrency_queue_depth",
|
||||||
|
}
|
||||||
|
|
||||||
|
var validOpsAlertMetricTypeSet = func() map[string]struct{} {
|
||||||
|
set := make(map[string]struct{}, len(validOpsAlertMetricTypes))
|
||||||
|
for _, v := range validOpsAlertMetricTypes {
|
||||||
|
set[v] = struct{}{}
|
||||||
|
}
|
||||||
|
return set
|
||||||
|
}()
|
||||||
|
|
||||||
|
var validOpsAlertOperators = []string{">", "<", ">=", "<=", "==", "!="}
|
||||||
|
|
||||||
|
var validOpsAlertOperatorSet = func() map[string]struct{} {
|
||||||
|
set := make(map[string]struct{}, len(validOpsAlertOperators))
|
||||||
|
for _, v := range validOpsAlertOperators {
|
||||||
|
set[v] = struct{}{}
|
||||||
|
}
|
||||||
|
return set
|
||||||
|
}()
|
||||||
|
|
||||||
|
var validOpsAlertSeverities = []string{"P0", "P1", "P2", "P3"}
|
||||||
|
|
||||||
|
var validOpsAlertSeveritySet = func() map[string]struct{} {
|
||||||
|
set := make(map[string]struct{}, len(validOpsAlertSeverities))
|
||||||
|
for _, v := range validOpsAlertSeverities {
|
||||||
|
set[v] = struct{}{}
|
||||||
|
}
|
||||||
|
return set
|
||||||
|
}()
|
||||||
|
|
||||||
|
type opsAlertRuleValidatedInput struct {
|
||||||
|
Name string
|
||||||
|
MetricType string
|
||||||
|
Operator string
|
||||||
|
Threshold float64
|
||||||
|
|
||||||
|
Severity string
|
||||||
|
|
||||||
|
WindowMinutes int
|
||||||
|
SustainedMinutes int
|
||||||
|
CooldownMinutes int
|
||||||
|
|
||||||
|
Enabled bool
|
||||||
|
NotifyEmail bool
|
||||||
|
|
||||||
|
WindowProvided bool
|
||||||
|
SustainedProvided bool
|
||||||
|
CooldownProvided bool
|
||||||
|
SeverityProvided bool
|
||||||
|
EnabledProvided bool
|
||||||
|
NotifyProvided bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func isPercentOrRateMetric(metricType string) bool {
|
||||||
|
switch metricType {
|
||||||
|
case "success_rate",
|
||||||
|
"error_rate",
|
||||||
|
"upstream_error_rate",
|
||||||
|
"cpu_usage_percent",
|
||||||
|
"memory_usage_percent":
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateOpsAlertRulePayload(raw map[string]json.RawMessage) (*opsAlertRuleValidatedInput, error) {
|
||||||
|
if raw == nil {
|
||||||
|
return nil, fmt.Errorf("invalid request body")
|
||||||
|
}
|
||||||
|
|
||||||
|
requiredFields := []string{"name", "metric_type", "operator", "threshold"}
|
||||||
|
for _, field := range requiredFields {
|
||||||
|
if _, ok := raw[field]; !ok {
|
||||||
|
return nil, fmt.Errorf("%s is required", field)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var name string
|
||||||
|
if err := json.Unmarshal(raw["name"], &name); err != nil || strings.TrimSpace(name) == "" {
|
||||||
|
return nil, fmt.Errorf("name is required")
|
||||||
|
}
|
||||||
|
name = strings.TrimSpace(name)
|
||||||
|
|
||||||
|
var metricType string
|
||||||
|
if err := json.Unmarshal(raw["metric_type"], &metricType); err != nil || strings.TrimSpace(metricType) == "" {
|
||||||
|
return nil, fmt.Errorf("metric_type is required")
|
||||||
|
}
|
||||||
|
metricType = strings.TrimSpace(metricType)
|
||||||
|
if _, ok := validOpsAlertMetricTypeSet[metricType]; !ok {
|
||||||
|
return nil, fmt.Errorf("metric_type must be one of: %s", strings.Join(validOpsAlertMetricTypes, ", "))
|
||||||
|
}
|
||||||
|
|
||||||
|
var operator string
|
||||||
|
if err := json.Unmarshal(raw["operator"], &operator); err != nil || strings.TrimSpace(operator) == "" {
|
||||||
|
return nil, fmt.Errorf("operator is required")
|
||||||
|
}
|
||||||
|
operator = strings.TrimSpace(operator)
|
||||||
|
if _, ok := validOpsAlertOperatorSet[operator]; !ok {
|
||||||
|
return nil, fmt.Errorf("operator must be one of: %s", strings.Join(validOpsAlertOperators, ", "))
|
||||||
|
}
|
||||||
|
|
||||||
|
var threshold float64
|
||||||
|
if err := json.Unmarshal(raw["threshold"], &threshold); err != nil {
|
||||||
|
return nil, fmt.Errorf("threshold must be a number")
|
||||||
|
}
|
||||||
|
if math.IsNaN(threshold) || math.IsInf(threshold, 0) {
|
||||||
|
return nil, fmt.Errorf("threshold must be a finite number")
|
||||||
|
}
|
||||||
|
if isPercentOrRateMetric(metricType) {
|
||||||
|
if threshold < 0 || threshold > 100 {
|
||||||
|
return nil, fmt.Errorf("threshold must be between 0 and 100 for metric_type %s", metricType)
|
||||||
|
}
|
||||||
|
} else if threshold < 0 {
|
||||||
|
return nil, fmt.Errorf("threshold must be >= 0")
|
||||||
|
}
|
||||||
|
|
||||||
|
validated := &opsAlertRuleValidatedInput{
|
||||||
|
Name: name,
|
||||||
|
MetricType: metricType,
|
||||||
|
Operator: operator,
|
||||||
|
Threshold: threshold,
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := raw["severity"]; ok {
|
||||||
|
validated.SeverityProvided = true
|
||||||
|
var sev string
|
||||||
|
if err := json.Unmarshal(v, &sev); err != nil {
|
||||||
|
return nil, fmt.Errorf("severity must be a string")
|
||||||
|
}
|
||||||
|
sev = strings.ToUpper(strings.TrimSpace(sev))
|
||||||
|
if sev != "" {
|
||||||
|
if _, ok := validOpsAlertSeveritySet[sev]; !ok {
|
||||||
|
return nil, fmt.Errorf("severity must be one of: %s", strings.Join(validOpsAlertSeverities, ", "))
|
||||||
|
}
|
||||||
|
validated.Severity = sev
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if validated.Severity == "" {
|
||||||
|
validated.Severity = "P2"
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := raw["enabled"]; ok {
|
||||||
|
validated.EnabledProvided = true
|
||||||
|
if err := json.Unmarshal(v, &validated.Enabled); err != nil {
|
||||||
|
return nil, fmt.Errorf("enabled must be a boolean")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
validated.Enabled = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := raw["notify_email"]; ok {
|
||||||
|
validated.NotifyProvided = true
|
||||||
|
if err := json.Unmarshal(v, &validated.NotifyEmail); err != nil {
|
||||||
|
return nil, fmt.Errorf("notify_email must be a boolean")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
validated.NotifyEmail = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := raw["window_minutes"]; ok {
|
||||||
|
validated.WindowProvided = true
|
||||||
|
if err := json.Unmarshal(v, &validated.WindowMinutes); err != nil {
|
||||||
|
return nil, fmt.Errorf("window_minutes must be an integer")
|
||||||
|
}
|
||||||
|
switch validated.WindowMinutes {
|
||||||
|
case 1, 5, 60:
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("window_minutes must be one of: 1, 5, 60")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
validated.WindowMinutes = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := raw["sustained_minutes"]; ok {
|
||||||
|
validated.SustainedProvided = true
|
||||||
|
if err := json.Unmarshal(v, &validated.SustainedMinutes); err != nil {
|
||||||
|
return nil, fmt.Errorf("sustained_minutes must be an integer")
|
||||||
|
}
|
||||||
|
if validated.SustainedMinutes < 1 || validated.SustainedMinutes > 1440 {
|
||||||
|
return nil, fmt.Errorf("sustained_minutes must be between 1 and 1440")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
validated.SustainedMinutes = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := raw["cooldown_minutes"]; ok {
|
||||||
|
validated.CooldownProvided = true
|
||||||
|
if err := json.Unmarshal(v, &validated.CooldownMinutes); err != nil {
|
||||||
|
return nil, fmt.Errorf("cooldown_minutes must be an integer")
|
||||||
|
}
|
||||||
|
if validated.CooldownMinutes < 0 || validated.CooldownMinutes > 1440 {
|
||||||
|
return nil, fmt.Errorf("cooldown_minutes must be between 0 and 1440")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
validated.CooldownMinutes = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
return validated, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListAlertRules returns all ops alert rules.
|
||||||
|
// GET /api/v1/admin/ops/alert-rules
|
||||||
|
func (h *OpsHandler) ListAlertRules(c *gin.Context) {
|
||||||
|
if h.opsService == nil {
|
||||||
|
response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
rules, err := h.opsService.ListAlertRules(c.Request.Context())
|
||||||
|
if err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
response.Success(c, rules)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateAlertRule creates an ops alert rule.
|
||||||
|
// POST /api/v1/admin/ops/alert-rules
|
||||||
|
func (h *OpsHandler) CreateAlertRule(c *gin.Context) {
|
||||||
|
if h.opsService == nil {
|
||||||
|
response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var raw map[string]json.RawMessage
|
||||||
|
if err := c.ShouldBindBodyWith(&raw, binding.JSON); err != nil {
|
||||||
|
response.BadRequest(c, "Invalid request body")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
validated, err := validateOpsAlertRulePayload(raw)
|
||||||
|
if err != nil {
|
||||||
|
response.BadRequest(c, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var rule service.OpsAlertRule
|
||||||
|
if err := c.ShouldBindBodyWith(&rule, binding.JSON); err != nil {
|
||||||
|
response.BadRequest(c, "Invalid request body")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
rule.Name = validated.Name
|
||||||
|
rule.MetricType = validated.MetricType
|
||||||
|
rule.Operator = validated.Operator
|
||||||
|
rule.Threshold = validated.Threshold
|
||||||
|
rule.WindowMinutes = validated.WindowMinutes
|
||||||
|
rule.SustainedMinutes = validated.SustainedMinutes
|
||||||
|
rule.CooldownMinutes = validated.CooldownMinutes
|
||||||
|
rule.Severity = validated.Severity
|
||||||
|
rule.Enabled = validated.Enabled
|
||||||
|
rule.NotifyEmail = validated.NotifyEmail
|
||||||
|
|
||||||
|
created, err := h.opsService.CreateAlertRule(c.Request.Context(), &rule)
|
||||||
|
if err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
response.Success(c, created)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateAlertRule updates an existing ops alert rule.
|
||||||
|
// PUT /api/v1/admin/ops/alert-rules/:id
|
||||||
|
func (h *OpsHandler) UpdateAlertRule(c *gin.Context) {
|
||||||
|
if h.opsService == nil {
|
||||||
|
response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
id, err := strconv.ParseInt(c.Param("id"), 10, 64)
|
||||||
|
if err != nil || id <= 0 {
|
||||||
|
response.BadRequest(c, "Invalid rule ID")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var raw map[string]json.RawMessage
|
||||||
|
if err := c.ShouldBindBodyWith(&raw, binding.JSON); err != nil {
|
||||||
|
response.BadRequest(c, "Invalid request body")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
validated, err := validateOpsAlertRulePayload(raw)
|
||||||
|
if err != nil {
|
||||||
|
response.BadRequest(c, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var rule service.OpsAlertRule
|
||||||
|
if err := c.ShouldBindBodyWith(&rule, binding.JSON); err != nil {
|
||||||
|
response.BadRequest(c, "Invalid request body")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
rule.ID = id
|
||||||
|
rule.Name = validated.Name
|
||||||
|
rule.MetricType = validated.MetricType
|
||||||
|
rule.Operator = validated.Operator
|
||||||
|
rule.Threshold = validated.Threshold
|
||||||
|
rule.WindowMinutes = validated.WindowMinutes
|
||||||
|
rule.SustainedMinutes = validated.SustainedMinutes
|
||||||
|
rule.CooldownMinutes = validated.CooldownMinutes
|
||||||
|
rule.Severity = validated.Severity
|
||||||
|
rule.Enabled = validated.Enabled
|
||||||
|
rule.NotifyEmail = validated.NotifyEmail
|
||||||
|
|
||||||
|
updated, err := h.opsService.UpdateAlertRule(c.Request.Context(), &rule)
|
||||||
|
if err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
response.Success(c, updated)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteAlertRule deletes an ops alert rule.
|
||||||
|
// DELETE /api/v1/admin/ops/alert-rules/:id
|
||||||
|
func (h *OpsHandler) DeleteAlertRule(c *gin.Context) {
|
||||||
|
if h.opsService == nil {
|
||||||
|
response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
id, err := strconv.ParseInt(c.Param("id"), 10, 64)
|
||||||
|
if err != nil || id <= 0 {
|
||||||
|
response.BadRequest(c, "Invalid rule ID")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := h.opsService.DeleteAlertRule(c.Request.Context(), id); err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
response.Success(c, gin.H{"deleted": true})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListAlertEvents lists recent ops alert events.
|
||||||
|
// GET /api/v1/admin/ops/alert-events
|
||||||
|
func (h *OpsHandler) ListAlertEvents(c *gin.Context) {
|
||||||
|
if h.opsService == nil {
|
||||||
|
response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
limit := 100
|
||||||
|
if raw := strings.TrimSpace(c.Query("limit")); raw != "" {
|
||||||
|
n, err := strconv.Atoi(raw)
|
||||||
|
if err != nil || n <= 0 {
|
||||||
|
response.BadRequest(c, "Invalid limit")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
limit = n
|
||||||
|
}
|
||||||
|
|
||||||
|
filter := &service.OpsAlertEventFilter{
|
||||||
|
Limit: limit,
|
||||||
|
Status: strings.TrimSpace(c.Query("status")),
|
||||||
|
Severity: strings.TrimSpace(c.Query("severity")),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Optional global filter support (platform/group/time range).
|
||||||
|
if platform := strings.TrimSpace(c.Query("platform")); platform != "" {
|
||||||
|
filter.Platform = platform
|
||||||
|
}
|
||||||
|
if v := strings.TrimSpace(c.Query("group_id")); v != "" {
|
||||||
|
id, err := strconv.ParseInt(v, 10, 64)
|
||||||
|
if err != nil || id <= 0 {
|
||||||
|
response.BadRequest(c, "Invalid group_id")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
filter.GroupID = &id
|
||||||
|
}
|
||||||
|
if startTime, endTime, err := parseOpsTimeRange(c, "24h"); err == nil {
|
||||||
|
// Only apply when explicitly provided to avoid surprising default narrowing.
|
||||||
|
if strings.TrimSpace(c.Query("start_time")) != "" || strings.TrimSpace(c.Query("end_time")) != "" || strings.TrimSpace(c.Query("time_range")) != "" {
|
||||||
|
filter.StartTime = &startTime
|
||||||
|
filter.EndTime = &endTime
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
response.BadRequest(c, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
events, err := h.opsService.ListAlertEvents(c.Request.Context(), filter)
|
||||||
|
if err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
response.Success(c, events)
|
||||||
|
}
|
||||||
243
backend/internal/handler/admin/ops_dashboard_handler.go
Normal file
243
backend/internal/handler/admin/ops_dashboard_handler.go
Normal file
@@ -0,0 +1,243 @@
|
|||||||
|
package admin
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/pkg/response"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetDashboardOverview returns vNext ops dashboard overview (raw path).
|
||||||
|
// GET /api/v1/admin/ops/dashboard/overview
|
||||||
|
func (h *OpsHandler) GetDashboardOverview(c *gin.Context) {
|
||||||
|
if h.opsService == nil {
|
||||||
|
response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
startTime, endTime, err := parseOpsTimeRange(c, "1h")
|
||||||
|
if err != nil {
|
||||||
|
response.BadRequest(c, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
filter := &service.OpsDashboardFilter{
|
||||||
|
StartTime: startTime,
|
||||||
|
EndTime: endTime,
|
||||||
|
Platform: strings.TrimSpace(c.Query("platform")),
|
||||||
|
QueryMode: parseOpsQueryMode(c),
|
||||||
|
}
|
||||||
|
if v := strings.TrimSpace(c.Query("group_id")); v != "" {
|
||||||
|
id, err := strconv.ParseInt(v, 10, 64)
|
||||||
|
if err != nil || id <= 0 {
|
||||||
|
response.BadRequest(c, "Invalid group_id")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
filter.GroupID = &id
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := h.opsService.GetDashboardOverview(c.Request.Context(), filter)
|
||||||
|
if err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
response.Success(c, data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDashboardThroughputTrend returns throughput time series (raw path).
|
||||||
|
// GET /api/v1/admin/ops/dashboard/throughput-trend
|
||||||
|
func (h *OpsHandler) GetDashboardThroughputTrend(c *gin.Context) {
|
||||||
|
if h.opsService == nil {
|
||||||
|
response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
startTime, endTime, err := parseOpsTimeRange(c, "1h")
|
||||||
|
if err != nil {
|
||||||
|
response.BadRequest(c, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
filter := &service.OpsDashboardFilter{
|
||||||
|
StartTime: startTime,
|
||||||
|
EndTime: endTime,
|
||||||
|
Platform: strings.TrimSpace(c.Query("platform")),
|
||||||
|
QueryMode: parseOpsQueryMode(c),
|
||||||
|
}
|
||||||
|
if v := strings.TrimSpace(c.Query("group_id")); v != "" {
|
||||||
|
id, err := strconv.ParseInt(v, 10, 64)
|
||||||
|
if err != nil || id <= 0 {
|
||||||
|
response.BadRequest(c, "Invalid group_id")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
filter.GroupID = &id
|
||||||
|
}
|
||||||
|
|
||||||
|
bucketSeconds := pickThroughputBucketSeconds(endTime.Sub(startTime))
|
||||||
|
data, err := h.opsService.GetThroughputTrend(c.Request.Context(), filter, bucketSeconds)
|
||||||
|
if err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
response.Success(c, data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDashboardLatencyHistogram returns the latency distribution histogram (success requests).
|
||||||
|
// GET /api/v1/admin/ops/dashboard/latency-histogram
|
||||||
|
func (h *OpsHandler) GetDashboardLatencyHistogram(c *gin.Context) {
|
||||||
|
if h.opsService == nil {
|
||||||
|
response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
startTime, endTime, err := parseOpsTimeRange(c, "1h")
|
||||||
|
if err != nil {
|
||||||
|
response.BadRequest(c, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
filter := &service.OpsDashboardFilter{
|
||||||
|
StartTime: startTime,
|
||||||
|
EndTime: endTime,
|
||||||
|
Platform: strings.TrimSpace(c.Query("platform")),
|
||||||
|
QueryMode: parseOpsQueryMode(c),
|
||||||
|
}
|
||||||
|
if v := strings.TrimSpace(c.Query("group_id")); v != "" {
|
||||||
|
id, err := strconv.ParseInt(v, 10, 64)
|
||||||
|
if err != nil || id <= 0 {
|
||||||
|
response.BadRequest(c, "Invalid group_id")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
filter.GroupID = &id
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := h.opsService.GetLatencyHistogram(c.Request.Context(), filter)
|
||||||
|
if err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
response.Success(c, data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDashboardErrorTrend returns error counts time series (raw path).
|
||||||
|
// GET /api/v1/admin/ops/dashboard/error-trend
|
||||||
|
func (h *OpsHandler) GetDashboardErrorTrend(c *gin.Context) {
|
||||||
|
if h.opsService == nil {
|
||||||
|
response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
startTime, endTime, err := parseOpsTimeRange(c, "1h")
|
||||||
|
if err != nil {
|
||||||
|
response.BadRequest(c, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
filter := &service.OpsDashboardFilter{
|
||||||
|
StartTime: startTime,
|
||||||
|
EndTime: endTime,
|
||||||
|
Platform: strings.TrimSpace(c.Query("platform")),
|
||||||
|
QueryMode: parseOpsQueryMode(c),
|
||||||
|
}
|
||||||
|
if v := strings.TrimSpace(c.Query("group_id")); v != "" {
|
||||||
|
id, err := strconv.ParseInt(v, 10, 64)
|
||||||
|
if err != nil || id <= 0 {
|
||||||
|
response.BadRequest(c, "Invalid group_id")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
filter.GroupID = &id
|
||||||
|
}
|
||||||
|
|
||||||
|
bucketSeconds := pickThroughputBucketSeconds(endTime.Sub(startTime))
|
||||||
|
data, err := h.opsService.GetErrorTrend(c.Request.Context(), filter, bucketSeconds)
|
||||||
|
if err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
response.Success(c, data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDashboardErrorDistribution returns error distribution by status code (raw path).
|
||||||
|
// GET /api/v1/admin/ops/dashboard/error-distribution
|
||||||
|
func (h *OpsHandler) GetDashboardErrorDistribution(c *gin.Context) {
|
||||||
|
if h.opsService == nil {
|
||||||
|
response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
startTime, endTime, err := parseOpsTimeRange(c, "1h")
|
||||||
|
if err != nil {
|
||||||
|
response.BadRequest(c, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
filter := &service.OpsDashboardFilter{
|
||||||
|
StartTime: startTime,
|
||||||
|
EndTime: endTime,
|
||||||
|
Platform: strings.TrimSpace(c.Query("platform")),
|
||||||
|
QueryMode: parseOpsQueryMode(c),
|
||||||
|
}
|
||||||
|
if v := strings.TrimSpace(c.Query("group_id")); v != "" {
|
||||||
|
id, err := strconv.ParseInt(v, 10, 64)
|
||||||
|
if err != nil || id <= 0 {
|
||||||
|
response.BadRequest(c, "Invalid group_id")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
filter.GroupID = &id
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := h.opsService.GetErrorDistribution(c.Request.Context(), filter)
|
||||||
|
if err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
response.Success(c, data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func pickThroughputBucketSeconds(window time.Duration) int {
|
||||||
|
// Keep buckets predictable and avoid huge responses.
|
||||||
|
switch {
|
||||||
|
case window <= 2*time.Hour:
|
||||||
|
return 60
|
||||||
|
case window <= 24*time.Hour:
|
||||||
|
return 300
|
||||||
|
default:
|
||||||
|
return 3600
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseOpsQueryMode(c *gin.Context) service.OpsQueryMode {
|
||||||
|
if c == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
raw := strings.TrimSpace(c.Query("mode"))
|
||||||
|
if raw == "" {
|
||||||
|
// Empty means "use server default" (DB setting ops_query_mode_default).
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return service.ParseOpsQueryMode(raw)
|
||||||
|
}
|
||||||
364
backend/internal/handler/admin/ops_handler.go
Normal file
364
backend/internal/handler/admin/ops_handler.go
Normal file
@@ -0,0 +1,364 @@
|
|||||||
|
package admin
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/pkg/response"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/server/middleware"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
)
|
||||||
|
|
||||||
|
type OpsHandler struct {
|
||||||
|
opsService *service.OpsService
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewOpsHandler(opsService *service.OpsService) *OpsHandler {
|
||||||
|
return &OpsHandler{opsService: opsService}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetErrorLogs lists ops error logs.
|
||||||
|
// GET /api/v1/admin/ops/errors
|
||||||
|
func (h *OpsHandler) GetErrorLogs(c *gin.Context) {
|
||||||
|
if h.opsService == nil {
|
||||||
|
response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
page, pageSize := response.ParsePagination(c)
|
||||||
|
// Ops list can be larger than standard admin tables.
|
||||||
|
if pageSize > 500 {
|
||||||
|
pageSize = 500
|
||||||
|
}
|
||||||
|
|
||||||
|
startTime, endTime, err := parseOpsTimeRange(c, "1h")
|
||||||
|
if err != nil {
|
||||||
|
response.BadRequest(c, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
filter := &service.OpsErrorLogFilter{
|
||||||
|
Page: page,
|
||||||
|
PageSize: pageSize,
|
||||||
|
}
|
||||||
|
if !startTime.IsZero() {
|
||||||
|
filter.StartTime = &startTime
|
||||||
|
}
|
||||||
|
if !endTime.IsZero() {
|
||||||
|
filter.EndTime = &endTime
|
||||||
|
}
|
||||||
|
|
||||||
|
if platform := strings.TrimSpace(c.Query("platform")); platform != "" {
|
||||||
|
filter.Platform = platform
|
||||||
|
}
|
||||||
|
if v := strings.TrimSpace(c.Query("group_id")); v != "" {
|
||||||
|
id, err := strconv.ParseInt(v, 10, 64)
|
||||||
|
if err != nil || id <= 0 {
|
||||||
|
response.BadRequest(c, "Invalid group_id")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
filter.GroupID = &id
|
||||||
|
}
|
||||||
|
if v := strings.TrimSpace(c.Query("account_id")); v != "" {
|
||||||
|
id, err := strconv.ParseInt(v, 10, 64)
|
||||||
|
if err != nil || id <= 0 {
|
||||||
|
response.BadRequest(c, "Invalid account_id")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
filter.AccountID = &id
|
||||||
|
}
|
||||||
|
if phase := strings.TrimSpace(c.Query("phase")); phase != "" {
|
||||||
|
filter.Phase = phase
|
||||||
|
}
|
||||||
|
if q := strings.TrimSpace(c.Query("q")); q != "" {
|
||||||
|
filter.Query = q
|
||||||
|
}
|
||||||
|
if statusCodesStr := strings.TrimSpace(c.Query("status_codes")); statusCodesStr != "" {
|
||||||
|
parts := strings.Split(statusCodesStr, ",")
|
||||||
|
out := make([]int, 0, len(parts))
|
||||||
|
for _, part := range parts {
|
||||||
|
p := strings.TrimSpace(part)
|
||||||
|
if p == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
n, err := strconv.Atoi(p)
|
||||||
|
if err != nil || n < 0 {
|
||||||
|
response.BadRequest(c, "Invalid status_codes")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
out = append(out, n)
|
||||||
|
}
|
||||||
|
filter.StatusCodes = out
|
||||||
|
}
|
||||||
|
|
||||||
|
result, err := h.opsService.GetErrorLogs(c.Request.Context(), filter)
|
||||||
|
if err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
response.Paginated(c, result.Errors, int64(result.Total), result.Page, result.PageSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetErrorLogByID returns a single error log detail.
|
||||||
|
// GET /api/v1/admin/ops/errors/:id
|
||||||
|
func (h *OpsHandler) GetErrorLogByID(c *gin.Context) {
|
||||||
|
if h.opsService == nil {
|
||||||
|
response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
idStr := strings.TrimSpace(c.Param("id"))
|
||||||
|
id, err := strconv.ParseInt(idStr, 10, 64)
|
||||||
|
if err != nil || id <= 0 {
|
||||||
|
response.BadRequest(c, "Invalid error id")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
detail, err := h.opsService.GetErrorLogByID(c.Request.Context(), id)
|
||||||
|
if err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
response.Success(c, detail)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListRequestDetails returns a request-level list (success + error) for drill-down.
|
||||||
|
// GET /api/v1/admin/ops/requests
|
||||||
|
func (h *OpsHandler) ListRequestDetails(c *gin.Context) {
|
||||||
|
if h.opsService == nil {
|
||||||
|
response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
page, pageSize := response.ParsePagination(c)
|
||||||
|
if pageSize > 100 {
|
||||||
|
pageSize = 100
|
||||||
|
}
|
||||||
|
|
||||||
|
startTime, endTime, err := parseOpsTimeRange(c, "1h")
|
||||||
|
if err != nil {
|
||||||
|
response.BadRequest(c, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
filter := &service.OpsRequestDetailFilter{
|
||||||
|
Page: page,
|
||||||
|
PageSize: pageSize,
|
||||||
|
StartTime: &startTime,
|
||||||
|
EndTime: &endTime,
|
||||||
|
}
|
||||||
|
|
||||||
|
filter.Kind = strings.TrimSpace(c.Query("kind"))
|
||||||
|
filter.Platform = strings.TrimSpace(c.Query("platform"))
|
||||||
|
filter.Model = strings.TrimSpace(c.Query("model"))
|
||||||
|
filter.RequestID = strings.TrimSpace(c.Query("request_id"))
|
||||||
|
filter.Query = strings.TrimSpace(c.Query("q"))
|
||||||
|
filter.Sort = strings.TrimSpace(c.Query("sort"))
|
||||||
|
|
||||||
|
if v := strings.TrimSpace(c.Query("user_id")); v != "" {
|
||||||
|
id, err := strconv.ParseInt(v, 10, 64)
|
||||||
|
if err != nil || id <= 0 {
|
||||||
|
response.BadRequest(c, "Invalid user_id")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
filter.UserID = &id
|
||||||
|
}
|
||||||
|
if v := strings.TrimSpace(c.Query("api_key_id")); v != "" {
|
||||||
|
id, err := strconv.ParseInt(v, 10, 64)
|
||||||
|
if err != nil || id <= 0 {
|
||||||
|
response.BadRequest(c, "Invalid api_key_id")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
filter.APIKeyID = &id
|
||||||
|
}
|
||||||
|
if v := strings.TrimSpace(c.Query("account_id")); v != "" {
|
||||||
|
id, err := strconv.ParseInt(v, 10, 64)
|
||||||
|
if err != nil || id <= 0 {
|
||||||
|
response.BadRequest(c, "Invalid account_id")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
filter.AccountID = &id
|
||||||
|
}
|
||||||
|
if v := strings.TrimSpace(c.Query("group_id")); v != "" {
|
||||||
|
id, err := strconv.ParseInt(v, 10, 64)
|
||||||
|
if err != nil || id <= 0 {
|
||||||
|
response.BadRequest(c, "Invalid group_id")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
filter.GroupID = &id
|
||||||
|
}
|
||||||
|
|
||||||
|
if v := strings.TrimSpace(c.Query("min_duration_ms")); v != "" {
|
||||||
|
parsed, err := strconv.Atoi(v)
|
||||||
|
if err != nil || parsed < 0 {
|
||||||
|
response.BadRequest(c, "Invalid min_duration_ms")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
filter.MinDurationMs = &parsed
|
||||||
|
}
|
||||||
|
if v := strings.TrimSpace(c.Query("max_duration_ms")); v != "" {
|
||||||
|
parsed, err := strconv.Atoi(v)
|
||||||
|
if err != nil || parsed < 0 {
|
||||||
|
response.BadRequest(c, "Invalid max_duration_ms")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
filter.MaxDurationMs = &parsed
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err := h.opsService.ListRequestDetails(c.Request.Context(), filter)
|
||||||
|
if err != nil {
|
||||||
|
// Invalid sort/kind/platform etc should be a bad request; keep it simple.
|
||||||
|
if strings.Contains(strings.ToLower(err.Error()), "invalid") {
|
||||||
|
response.BadRequest(c, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
response.Error(c, http.StatusInternalServerError, "Failed to list request details")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
response.Paginated(c, out.Items, out.Total, out.Page, out.PageSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
type opsRetryRequest struct {
|
||||||
|
Mode string `json:"mode"`
|
||||||
|
PinnedAccountID *int64 `json:"pinned_account_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryErrorRequest retries a failed request using stored request_body.
|
||||||
|
// POST /api/v1/admin/ops/errors/:id/retry
|
||||||
|
func (h *OpsHandler) RetryErrorRequest(c *gin.Context) {
|
||||||
|
if h.opsService == nil {
|
||||||
|
response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
subject, ok := middleware.GetAuthSubjectFromContext(c)
|
||||||
|
if !ok || subject.UserID <= 0 {
|
||||||
|
response.Error(c, http.StatusUnauthorized, "Unauthorized")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
idStr := strings.TrimSpace(c.Param("id"))
|
||||||
|
id, err := strconv.ParseInt(idStr, 10, 64)
|
||||||
|
if err != nil || id <= 0 {
|
||||||
|
response.BadRequest(c, "Invalid error id")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
req := opsRetryRequest{Mode: service.OpsRetryModeClient}
|
||||||
|
if err := c.ShouldBindJSON(&req); err != nil && !errors.Is(err, io.EOF) {
|
||||||
|
response.BadRequest(c, "Invalid request: "+err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if strings.TrimSpace(req.Mode) == "" {
|
||||||
|
req.Mode = service.OpsRetryModeClient
|
||||||
|
}
|
||||||
|
|
||||||
|
result, err := h.opsService.RetryError(c.Request.Context(), subject.UserID, id, req.Mode, req.PinnedAccountID)
|
||||||
|
if err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
response.Success(c, result)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseOpsTimeRange(c *gin.Context, defaultRange string) (time.Time, time.Time, error) {
|
||||||
|
startStr := strings.TrimSpace(c.Query("start_time"))
|
||||||
|
endStr := strings.TrimSpace(c.Query("end_time"))
|
||||||
|
|
||||||
|
parseTS := func(s string) (time.Time, error) {
|
||||||
|
if s == "" {
|
||||||
|
return time.Time{}, nil
|
||||||
|
}
|
||||||
|
if t, err := time.Parse(time.RFC3339Nano, s); err == nil {
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
return time.Parse(time.RFC3339, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
start, err := parseTS(startStr)
|
||||||
|
if err != nil {
|
||||||
|
return time.Time{}, time.Time{}, err
|
||||||
|
}
|
||||||
|
end, err := parseTS(endStr)
|
||||||
|
if err != nil {
|
||||||
|
return time.Time{}, time.Time{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// start/end explicitly provided (even partially)
|
||||||
|
if startStr != "" || endStr != "" {
|
||||||
|
if end.IsZero() {
|
||||||
|
end = time.Now()
|
||||||
|
}
|
||||||
|
if start.IsZero() {
|
||||||
|
dur, _ := parseOpsDuration(defaultRange)
|
||||||
|
start = end.Add(-dur)
|
||||||
|
}
|
||||||
|
if start.After(end) {
|
||||||
|
return time.Time{}, time.Time{}, fmt.Errorf("invalid time range: start_time must be <= end_time")
|
||||||
|
}
|
||||||
|
if end.Sub(start) > 30*24*time.Hour {
|
||||||
|
return time.Time{}, time.Time{}, fmt.Errorf("invalid time range: max window is 30 days")
|
||||||
|
}
|
||||||
|
return start, end, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// time_range fallback
|
||||||
|
tr := strings.TrimSpace(c.Query("time_range"))
|
||||||
|
if tr == "" {
|
||||||
|
tr = defaultRange
|
||||||
|
}
|
||||||
|
dur, ok := parseOpsDuration(tr)
|
||||||
|
if !ok {
|
||||||
|
dur, _ = parseOpsDuration(defaultRange)
|
||||||
|
}
|
||||||
|
|
||||||
|
end = time.Now()
|
||||||
|
start = end.Add(-dur)
|
||||||
|
if end.Sub(start) > 30*24*time.Hour {
|
||||||
|
return time.Time{}, time.Time{}, fmt.Errorf("invalid time range: max window is 30 days")
|
||||||
|
}
|
||||||
|
return start, end, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseOpsDuration(v string) (time.Duration, bool) {
|
||||||
|
switch strings.TrimSpace(v) {
|
||||||
|
case "5m":
|
||||||
|
return 5 * time.Minute, true
|
||||||
|
case "30m":
|
||||||
|
return 30 * time.Minute, true
|
||||||
|
case "1h":
|
||||||
|
return time.Hour, true
|
||||||
|
case "6h":
|
||||||
|
return 6 * time.Hour, true
|
||||||
|
case "24h":
|
||||||
|
return 24 * time.Hour, true
|
||||||
|
default:
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
}
|
||||||
213
backend/internal/handler/admin/ops_realtime_handler.go
Normal file
213
backend/internal/handler/admin/ops_realtime_handler.go
Normal file
@@ -0,0 +1,213 @@
|
|||||||
|
package admin
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/pkg/response"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetConcurrencyStats returns real-time concurrency usage aggregated by platform/group/account.
|
||||||
|
// GET /api/v1/admin/ops/concurrency
|
||||||
|
func (h *OpsHandler) GetConcurrencyStats(c *gin.Context) {
|
||||||
|
if h.opsService == nil {
|
||||||
|
response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !h.opsService.IsRealtimeMonitoringEnabled(c.Request.Context()) {
|
||||||
|
response.Success(c, gin.H{
|
||||||
|
"enabled": false,
|
||||||
|
"platform": map[string]*service.PlatformConcurrencyInfo{},
|
||||||
|
"group": map[int64]*service.GroupConcurrencyInfo{},
|
||||||
|
"account": map[int64]*service.AccountConcurrencyInfo{},
|
||||||
|
"timestamp": time.Now().UTC(),
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
platformFilter := strings.TrimSpace(c.Query("platform"))
|
||||||
|
var groupID *int64
|
||||||
|
if v := strings.TrimSpace(c.Query("group_id")); v != "" {
|
||||||
|
id, err := strconv.ParseInt(v, 10, 64)
|
||||||
|
if err != nil || id <= 0 {
|
||||||
|
response.BadRequest(c, "Invalid group_id")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
groupID = &id
|
||||||
|
}
|
||||||
|
|
||||||
|
platform, group, account, collectedAt, err := h.opsService.GetConcurrencyStats(c.Request.Context(), platformFilter, groupID)
|
||||||
|
if err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
payload := gin.H{
|
||||||
|
"enabled": true,
|
||||||
|
"platform": platform,
|
||||||
|
"group": group,
|
||||||
|
"account": account,
|
||||||
|
}
|
||||||
|
if collectedAt != nil {
|
||||||
|
payload["timestamp"] = collectedAt.UTC()
|
||||||
|
}
|
||||||
|
response.Success(c, payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAccountAvailability returns account availability statistics.
|
||||||
|
// GET /api/v1/admin/ops/account-availability
|
||||||
|
//
|
||||||
|
// Query params:
|
||||||
|
// - platform: optional
|
||||||
|
// - group_id: optional
|
||||||
|
func (h *OpsHandler) GetAccountAvailability(c *gin.Context) {
|
||||||
|
if h.opsService == nil {
|
||||||
|
response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !h.opsService.IsRealtimeMonitoringEnabled(c.Request.Context()) {
|
||||||
|
response.Success(c, gin.H{
|
||||||
|
"enabled": false,
|
||||||
|
"platform": map[string]*service.PlatformAvailability{},
|
||||||
|
"group": map[int64]*service.GroupAvailability{},
|
||||||
|
"account": map[int64]*service.AccountAvailability{},
|
||||||
|
"timestamp": time.Now().UTC(),
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
platform := strings.TrimSpace(c.Query("platform"))
|
||||||
|
var groupID *int64
|
||||||
|
if v := strings.TrimSpace(c.Query("group_id")); v != "" {
|
||||||
|
id, err := strconv.ParseInt(v, 10, 64)
|
||||||
|
if err != nil || id <= 0 {
|
||||||
|
response.BadRequest(c, "Invalid group_id")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
groupID = &id
|
||||||
|
}
|
||||||
|
|
||||||
|
platformStats, groupStats, accountStats, collectedAt, err := h.opsService.GetAccountAvailabilityStats(c.Request.Context(), platform, groupID)
|
||||||
|
if err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
payload := gin.H{
|
||||||
|
"enabled": true,
|
||||||
|
"platform": platformStats,
|
||||||
|
"group": groupStats,
|
||||||
|
"account": accountStats,
|
||||||
|
}
|
||||||
|
if collectedAt != nil {
|
||||||
|
payload["timestamp"] = collectedAt.UTC()
|
||||||
|
}
|
||||||
|
response.Success(c, payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseOpsRealtimeWindow(v string) (time.Duration, string, bool) {
|
||||||
|
switch strings.ToLower(strings.TrimSpace(v)) {
|
||||||
|
case "", "1min", "1m":
|
||||||
|
return 1 * time.Minute, "1min", true
|
||||||
|
case "5min", "5m":
|
||||||
|
return 5 * time.Minute, "5min", true
|
||||||
|
case "30min", "30m":
|
||||||
|
return 30 * time.Minute, "30min", true
|
||||||
|
case "1h", "60m", "60min":
|
||||||
|
return 1 * time.Hour, "1h", true
|
||||||
|
default:
|
||||||
|
return 0, "", false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetRealtimeTrafficSummary returns QPS/TPS current/peak/avg for the selected window.
|
||||||
|
// GET /api/v1/admin/ops/realtime-traffic
|
||||||
|
//
|
||||||
|
// Query params:
|
||||||
|
// - window: 1min|5min|30min|1h (default: 1min)
|
||||||
|
// - platform: optional
|
||||||
|
// - group_id: optional
|
||||||
|
func (h *OpsHandler) GetRealtimeTrafficSummary(c *gin.Context) {
|
||||||
|
if h.opsService == nil {
|
||||||
|
response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
windowDur, windowLabel, ok := parseOpsRealtimeWindow(c.Query("window"))
|
||||||
|
if !ok {
|
||||||
|
response.BadRequest(c, "Invalid window")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
platform := strings.TrimSpace(c.Query("platform"))
|
||||||
|
var groupID *int64
|
||||||
|
if v := strings.TrimSpace(c.Query("group_id")); v != "" {
|
||||||
|
id, err := strconv.ParseInt(v, 10, 64)
|
||||||
|
if err != nil || id <= 0 {
|
||||||
|
response.BadRequest(c, "Invalid group_id")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
groupID = &id
|
||||||
|
}
|
||||||
|
|
||||||
|
endTime := time.Now().UTC()
|
||||||
|
startTime := endTime.Add(-windowDur)
|
||||||
|
|
||||||
|
if !h.opsService.IsRealtimeMonitoringEnabled(c.Request.Context()) {
|
||||||
|
disabledSummary := &service.OpsRealtimeTrafficSummary{
|
||||||
|
Window: windowLabel,
|
||||||
|
StartTime: startTime,
|
||||||
|
EndTime: endTime,
|
||||||
|
Platform: platform,
|
||||||
|
GroupID: groupID,
|
||||||
|
QPS: service.OpsRateSummary{},
|
||||||
|
TPS: service.OpsRateSummary{},
|
||||||
|
}
|
||||||
|
response.Success(c, gin.H{
|
||||||
|
"enabled": false,
|
||||||
|
"summary": disabledSummary,
|
||||||
|
"timestamp": endTime,
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
filter := &service.OpsDashboardFilter{
|
||||||
|
StartTime: startTime,
|
||||||
|
EndTime: endTime,
|
||||||
|
Platform: platform,
|
||||||
|
GroupID: groupID,
|
||||||
|
QueryMode: service.OpsQueryModeRaw,
|
||||||
|
}
|
||||||
|
|
||||||
|
summary, err := h.opsService.GetRealtimeTrafficSummary(c.Request.Context(), filter)
|
||||||
|
if err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if summary != nil {
|
||||||
|
summary.Window = windowLabel
|
||||||
|
}
|
||||||
|
response.Success(c, gin.H{
|
||||||
|
"enabled": true,
|
||||||
|
"summary": summary,
|
||||||
|
"timestamp": endTime,
|
||||||
|
})
|
||||||
|
}
|
||||||
194
backend/internal/handler/admin/ops_settings_handler.go
Normal file
194
backend/internal/handler/admin/ops_settings_handler.go
Normal file
@@ -0,0 +1,194 @@
|
|||||||
|
package admin
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/pkg/response"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetEmailNotificationConfig returns Ops email notification config (DB-backed).
|
||||||
|
// GET /api/v1/admin/ops/email-notification/config
|
||||||
|
func (h *OpsHandler) GetEmailNotificationConfig(c *gin.Context) {
|
||||||
|
if h.opsService == nil {
|
||||||
|
response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg, err := h.opsService.GetEmailNotificationConfig(c.Request.Context())
|
||||||
|
if err != nil {
|
||||||
|
response.Error(c, http.StatusInternalServerError, "Failed to get email notification config")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
response.Success(c, cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateEmailNotificationConfig updates Ops email notification config (DB-backed).
|
||||||
|
// PUT /api/v1/admin/ops/email-notification/config
|
||||||
|
func (h *OpsHandler) UpdateEmailNotificationConfig(c *gin.Context) {
|
||||||
|
if h.opsService == nil {
|
||||||
|
response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var req service.OpsEmailNotificationConfigUpdateRequest
|
||||||
|
if err := c.ShouldBindJSON(&req); err != nil {
|
||||||
|
response.BadRequest(c, "Invalid request body")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
updated, err := h.opsService.UpdateEmailNotificationConfig(c.Request.Context(), &req)
|
||||||
|
if err != nil {
|
||||||
|
// Most failures here are validation errors from request payload; treat as 400.
|
||||||
|
response.Error(c, http.StatusBadRequest, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
response.Success(c, updated)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAlertRuntimeSettings returns Ops alert evaluator runtime settings (DB-backed).
|
||||||
|
// GET /api/v1/admin/ops/runtime/alert
|
||||||
|
func (h *OpsHandler) GetAlertRuntimeSettings(c *gin.Context) {
|
||||||
|
if h.opsService == nil {
|
||||||
|
response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg, err := h.opsService.GetOpsAlertRuntimeSettings(c.Request.Context())
|
||||||
|
if err != nil {
|
||||||
|
response.Error(c, http.StatusInternalServerError, "Failed to get alert runtime settings")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
response.Success(c, cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateAlertRuntimeSettings updates Ops alert evaluator runtime settings (DB-backed).
|
||||||
|
// PUT /api/v1/admin/ops/runtime/alert
|
||||||
|
func (h *OpsHandler) UpdateAlertRuntimeSettings(c *gin.Context) {
|
||||||
|
if h.opsService == nil {
|
||||||
|
response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var req service.OpsAlertRuntimeSettings
|
||||||
|
if err := c.ShouldBindJSON(&req); err != nil {
|
||||||
|
response.BadRequest(c, "Invalid request body")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
updated, err := h.opsService.UpdateOpsAlertRuntimeSettings(c.Request.Context(), &req)
|
||||||
|
if err != nil {
|
||||||
|
response.Error(c, http.StatusBadRequest, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
response.Success(c, updated)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAdvancedSettings returns Ops advanced settings (DB-backed).
|
||||||
|
// GET /api/v1/admin/ops/advanced-settings
|
||||||
|
func (h *OpsHandler) GetAdvancedSettings(c *gin.Context) {
|
||||||
|
if h.opsService == nil {
|
||||||
|
response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg, err := h.opsService.GetOpsAdvancedSettings(c.Request.Context())
|
||||||
|
if err != nil {
|
||||||
|
response.Error(c, http.StatusInternalServerError, "Failed to get advanced settings")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
response.Success(c, cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateAdvancedSettings updates Ops advanced settings (DB-backed).
|
||||||
|
// PUT /api/v1/admin/ops/advanced-settings
|
||||||
|
func (h *OpsHandler) UpdateAdvancedSettings(c *gin.Context) {
|
||||||
|
if h.opsService == nil {
|
||||||
|
response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var req service.OpsAdvancedSettings
|
||||||
|
if err := c.ShouldBindJSON(&req); err != nil {
|
||||||
|
response.BadRequest(c, "Invalid request body")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
updated, err := h.opsService.UpdateOpsAdvancedSettings(c.Request.Context(), &req)
|
||||||
|
if err != nil {
|
||||||
|
response.Error(c, http.StatusBadRequest, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
response.Success(c, updated)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMetricThresholds returns Ops metric thresholds (DB-backed).
|
||||||
|
// GET /api/v1/admin/ops/settings/metric-thresholds
|
||||||
|
func (h *OpsHandler) GetMetricThresholds(c *gin.Context) {
|
||||||
|
if h.opsService == nil {
|
||||||
|
response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg, err := h.opsService.GetMetricThresholds(c.Request.Context())
|
||||||
|
if err != nil {
|
||||||
|
response.Error(c, http.StatusInternalServerError, "Failed to get metric thresholds")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
response.Success(c, cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateMetricThresholds updates Ops metric thresholds (DB-backed).
|
||||||
|
// PUT /api/v1/admin/ops/settings/metric-thresholds
|
||||||
|
func (h *OpsHandler) UpdateMetricThresholds(c *gin.Context) {
|
||||||
|
if h.opsService == nil {
|
||||||
|
response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var req service.OpsMetricThresholds
|
||||||
|
if err := c.ShouldBindJSON(&req); err != nil {
|
||||||
|
response.BadRequest(c, "Invalid request body")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
updated, err := h.opsService.UpdateMetricThresholds(c.Request.Context(), &req)
|
||||||
|
if err != nil {
|
||||||
|
response.Error(c, http.StatusBadRequest, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
response.Success(c, updated)
|
||||||
|
}
|
||||||
771
backend/internal/handler/admin/ops_ws_handler.go
Normal file
771
backend/internal/handler/admin/ops_ws_handler.go
Normal file
@@ -0,0 +1,771 @@
|
|||||||
|
package admin
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"log"
|
||||||
|
"math"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/netip"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/gorilla/websocket"
|
||||||
|
)
|
||||||
|
|
||||||
|
type OpsWSProxyConfig struct {
|
||||||
|
TrustProxy bool
|
||||||
|
TrustedProxies []netip.Prefix
|
||||||
|
OriginPolicy string
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
envOpsWSTrustProxy = "OPS_WS_TRUST_PROXY"
|
||||||
|
envOpsWSTrustedProxies = "OPS_WS_TRUSTED_PROXIES"
|
||||||
|
envOpsWSOriginPolicy = "OPS_WS_ORIGIN_POLICY"
|
||||||
|
envOpsWSMaxConns = "OPS_WS_MAX_CONNS"
|
||||||
|
envOpsWSMaxConnsPerIP = "OPS_WS_MAX_CONNS_PER_IP"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
OriginPolicyStrict = "strict"
|
||||||
|
OriginPolicyPermissive = "permissive"
|
||||||
|
)
|
||||||
|
|
||||||
|
var opsWSProxyConfig = loadOpsWSProxyConfigFromEnv()
|
||||||
|
|
||||||
|
var upgrader = websocket.Upgrader{
|
||||||
|
CheckOrigin: func(r *http.Request) bool {
|
||||||
|
return isAllowedOpsWSOrigin(r)
|
||||||
|
},
|
||||||
|
// Subprotocol negotiation:
|
||||||
|
// - The frontend passes ["sub2api-admin", "jwt.<token>"].
|
||||||
|
// - We always select "sub2api-admin" so the token is never echoed back in the handshake response.
|
||||||
|
Subprotocols: []string{"sub2api-admin"},
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
qpsWSPushInterval = 2 * time.Second
|
||||||
|
qpsWSRefreshInterval = 5 * time.Second
|
||||||
|
qpsWSRequestCountWindow = 1 * time.Minute
|
||||||
|
|
||||||
|
defaultMaxWSConns = 100
|
||||||
|
defaultMaxWSConnsPerIP = 20
|
||||||
|
)
|
||||||
|
|
||||||
|
var wsConnCount atomic.Int32
|
||||||
|
var wsConnCountByIP sync.Map // map[string]*atomic.Int32
|
||||||
|
|
||||||
|
const qpsWSIdleStopDelay = 30 * time.Second
|
||||||
|
|
||||||
|
const (
|
||||||
|
opsWSCloseRealtimeDisabled = 4001
|
||||||
|
)
|
||||||
|
|
||||||
|
var qpsWSIdleStopMu sync.Mutex
|
||||||
|
var qpsWSIdleStopTimer *time.Timer
|
||||||
|
|
||||||
|
func cancelQPSWSIdleStop() {
|
||||||
|
qpsWSIdleStopMu.Lock()
|
||||||
|
if qpsWSIdleStopTimer != nil {
|
||||||
|
qpsWSIdleStopTimer.Stop()
|
||||||
|
qpsWSIdleStopTimer = nil
|
||||||
|
}
|
||||||
|
qpsWSIdleStopMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func scheduleQPSWSIdleStop() {
|
||||||
|
qpsWSIdleStopMu.Lock()
|
||||||
|
if qpsWSIdleStopTimer != nil {
|
||||||
|
qpsWSIdleStopMu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
qpsWSIdleStopTimer = time.AfterFunc(qpsWSIdleStopDelay, func() {
|
||||||
|
// Only stop if truly idle at fire time.
|
||||||
|
if wsConnCount.Load() == 0 {
|
||||||
|
qpsWSCache.Stop()
|
||||||
|
}
|
||||||
|
qpsWSIdleStopMu.Lock()
|
||||||
|
qpsWSIdleStopTimer = nil
|
||||||
|
qpsWSIdleStopMu.Unlock()
|
||||||
|
})
|
||||||
|
qpsWSIdleStopMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
type opsWSRuntimeLimits struct {
|
||||||
|
MaxConns int32
|
||||||
|
MaxConnsPerIP int32
|
||||||
|
}
|
||||||
|
|
||||||
|
var opsWSLimits = loadOpsWSRuntimeLimitsFromEnv()
|
||||||
|
|
||||||
|
const (
|
||||||
|
qpsWSWriteTimeout = 10 * time.Second
|
||||||
|
qpsWSPongWait = 60 * time.Second
|
||||||
|
qpsWSPingInterval = 30 * time.Second
|
||||||
|
|
||||||
|
// We don't expect clients to send application messages; we only read to process control frames (Pong/Close).
|
||||||
|
qpsWSMaxReadBytes = 1024
|
||||||
|
)
|
||||||
|
|
||||||
|
type opsWSQPSCache struct {
|
||||||
|
refreshInterval time.Duration
|
||||||
|
requestCountWindow time.Duration
|
||||||
|
|
||||||
|
lastUpdatedUnixNano atomic.Int64
|
||||||
|
payload atomic.Value // []byte
|
||||||
|
|
||||||
|
opsService *service.OpsService
|
||||||
|
cancel context.CancelFunc
|
||||||
|
done chan struct{}
|
||||||
|
|
||||||
|
mu sync.Mutex
|
||||||
|
running bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var qpsWSCache = &opsWSQPSCache{
|
||||||
|
refreshInterval: qpsWSRefreshInterval,
|
||||||
|
requestCountWindow: qpsWSRequestCountWindow,
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *opsWSQPSCache) start(opsService *service.OpsService) {
|
||||||
|
if c == nil || opsService == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
c.mu.Lock()
|
||||||
|
if c.running {
|
||||||
|
c.mu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// If a previous refresh loop is currently stopping, wait for it to fully exit.
|
||||||
|
done := c.done
|
||||||
|
if done != nil {
|
||||||
|
c.mu.Unlock()
|
||||||
|
<-done
|
||||||
|
|
||||||
|
c.mu.Lock()
|
||||||
|
if c.done == done && !c.running {
|
||||||
|
c.done = nil
|
||||||
|
}
|
||||||
|
c.mu.Unlock()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
c.opsService = opsService
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
c.cancel = cancel
|
||||||
|
c.done = make(chan struct{})
|
||||||
|
done = c.done
|
||||||
|
c.running = true
|
||||||
|
c.mu.Unlock()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer close(done)
|
||||||
|
c.refreshLoop(ctx)
|
||||||
|
}()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop stops the background refresh loop.
|
||||||
|
// It is safe to call multiple times.
|
||||||
|
func (c *opsWSQPSCache) Stop() {
|
||||||
|
if c == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.mu.Lock()
|
||||||
|
if !c.running {
|
||||||
|
done := c.done
|
||||||
|
c.mu.Unlock()
|
||||||
|
if done != nil {
|
||||||
|
<-done
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
cancel := c.cancel
|
||||||
|
c.cancel = nil
|
||||||
|
c.running = false
|
||||||
|
c.opsService = nil
|
||||||
|
done := c.done
|
||||||
|
c.mu.Unlock()
|
||||||
|
|
||||||
|
if cancel != nil {
|
||||||
|
cancel()
|
||||||
|
}
|
||||||
|
if done != nil {
|
||||||
|
<-done
|
||||||
|
}
|
||||||
|
|
||||||
|
c.mu.Lock()
|
||||||
|
if c.done == done && !c.running {
|
||||||
|
c.done = nil
|
||||||
|
}
|
||||||
|
c.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *opsWSQPSCache) refreshLoop(ctx context.Context) {
|
||||||
|
ticker := time.NewTicker(c.refreshInterval)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
c.refresh(ctx)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
c.refresh(ctx)
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *opsWSQPSCache) refresh(parentCtx context.Context) {
|
||||||
|
if c == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.mu.Lock()
|
||||||
|
opsService := c.opsService
|
||||||
|
c.mu.Unlock()
|
||||||
|
if opsService == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if parentCtx == nil {
|
||||||
|
parentCtx = context.Background()
|
||||||
|
}
|
||||||
|
ctx, cancel := context.WithTimeout(parentCtx, 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
now := time.Now().UTC()
|
||||||
|
stats, err := opsService.GetWindowStats(ctx, now.Add(-c.requestCountWindow), now)
|
||||||
|
if err != nil || stats == nil {
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("[OpsWS] refresh: get window stats failed: %v", err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
requestCount := stats.SuccessCount + stats.ErrorCountTotal
|
||||||
|
qps := 0.0
|
||||||
|
tps := 0.0
|
||||||
|
if c.requestCountWindow > 0 {
|
||||||
|
seconds := c.requestCountWindow.Seconds()
|
||||||
|
qps = roundTo1DP(float64(requestCount) / seconds)
|
||||||
|
tps = roundTo1DP(float64(stats.TokenConsumed) / seconds)
|
||||||
|
}
|
||||||
|
|
||||||
|
payload := gin.H{
|
||||||
|
"type": "qps_update",
|
||||||
|
"timestamp": now.Format(time.RFC3339),
|
||||||
|
"data": gin.H{
|
||||||
|
"qps": qps,
|
||||||
|
"tps": tps,
|
||||||
|
"request_count": requestCount,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
msg, err := json.Marshal(payload)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("[OpsWS] refresh: marshal payload failed: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.payload.Store(msg)
|
||||||
|
c.lastUpdatedUnixNano.Store(now.UnixNano())
|
||||||
|
}
|
||||||
|
|
||||||
|
func roundTo1DP(v float64) float64 {
|
||||||
|
return math.Round(v*10) / 10
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *opsWSQPSCache) getPayload() []byte {
|
||||||
|
if c == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if cached, ok := c.payload.Load().([]byte); ok && cached != nil {
|
||||||
|
return cached
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func closeWS(conn *websocket.Conn, code int, reason string) {
|
||||||
|
if conn == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
msg := websocket.FormatCloseMessage(code, reason)
|
||||||
|
_ = conn.WriteControl(websocket.CloseMessage, msg, time.Now().Add(qpsWSWriteTimeout))
|
||||||
|
_ = conn.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// QPSWSHandler handles realtime QPS push via WebSocket.
|
||||||
|
// GET /api/v1/admin/ops/ws/qps
|
||||||
|
func (h *OpsHandler) QPSWSHandler(c *gin.Context) {
|
||||||
|
clientIP := requestClientIP(c.Request)
|
||||||
|
|
||||||
|
if h == nil || h.opsService == nil {
|
||||||
|
c.JSON(http.StatusServiceUnavailable, gin.H{"error": "ops service not initialized"})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// If realtime monitoring is disabled, prefer a successful WS upgrade followed by a clean close
|
||||||
|
// with a deterministic close code. This prevents clients from spinning on 404/1006 reconnect loops.
|
||||||
|
if !h.opsService.IsRealtimeMonitoringEnabled(c.Request.Context()) {
|
||||||
|
conn, err := upgrader.Upgrade(c.Writer, c.Request, nil)
|
||||||
|
if err != nil {
|
||||||
|
c.JSON(http.StatusNotFound, gin.H{"error": "ops realtime monitoring is disabled"})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
closeWS(conn, opsWSCloseRealtimeDisabled, "realtime_disabled")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
cancelQPSWSIdleStop()
|
||||||
|
// Lazily start the background refresh loop so unit tests that never hit the
|
||||||
|
// websocket route don't spawn goroutines that depend on DB/Redis stubs.
|
||||||
|
qpsWSCache.start(h.opsService)
|
||||||
|
|
||||||
|
// Reserve a global slot before upgrading the connection to keep the limit strict.
|
||||||
|
if !tryAcquireOpsWSTotalSlot(opsWSLimits.MaxConns) {
|
||||||
|
log.Printf("[OpsWS] connection limit reached: %d/%d", wsConnCount.Load(), opsWSLimits.MaxConns)
|
||||||
|
c.JSON(http.StatusServiceUnavailable, gin.H{"error": "too many connections"})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if wsConnCount.Add(-1) == 0 {
|
||||||
|
scheduleQPSWSIdleStop()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if opsWSLimits.MaxConnsPerIP > 0 && clientIP != "" {
|
||||||
|
if !tryAcquireOpsWSIPSlot(clientIP, opsWSLimits.MaxConnsPerIP) {
|
||||||
|
log.Printf("[OpsWS] per-ip connection limit reached: ip=%s limit=%d", clientIP, opsWSLimits.MaxConnsPerIP)
|
||||||
|
c.JSON(http.StatusServiceUnavailable, gin.H{"error": "too many connections"})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer releaseOpsWSIPSlot(clientIP)
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, err := upgrader.Upgrade(c.Writer, c.Request, nil)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("[OpsWS] upgrade failed: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
_ = conn.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
handleQPSWebSocket(c.Request.Context(), conn)
|
||||||
|
}
|
||||||
|
|
||||||
|
func tryAcquireOpsWSTotalSlot(limit int32) bool {
|
||||||
|
if limit <= 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
current := wsConnCount.Load()
|
||||||
|
if current >= limit {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if wsConnCount.CompareAndSwap(current, current+1) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func tryAcquireOpsWSIPSlot(clientIP string, limit int32) bool {
|
||||||
|
if strings.TrimSpace(clientIP) == "" || limit <= 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
v, _ := wsConnCountByIP.LoadOrStore(clientIP, &atomic.Int32{})
|
||||||
|
counter, ok := v.(*atomic.Int32)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
current := counter.Load()
|
||||||
|
if current >= limit {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if counter.CompareAndSwap(current, current+1) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func releaseOpsWSIPSlot(clientIP string) {
|
||||||
|
if strings.TrimSpace(clientIP) == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
v, ok := wsConnCountByIP.Load(clientIP)
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
counter, ok := v.(*atomic.Int32)
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
next := counter.Add(-1)
|
||||||
|
if next <= 0 {
|
||||||
|
// Best-effort cleanup; safe even if a new slot was acquired concurrently.
|
||||||
|
wsConnCountByIP.Delete(clientIP)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleQPSWebSocket(parentCtx context.Context, conn *websocket.Conn) {
|
||||||
|
if conn == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(parentCtx)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
var closeOnce sync.Once
|
||||||
|
closeConn := func() {
|
||||||
|
closeOnce.Do(func() {
|
||||||
|
_ = conn.Close()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
closeFrameCh := make(chan []byte, 1)
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
conn.SetReadLimit(qpsWSMaxReadBytes)
|
||||||
|
if err := conn.SetReadDeadline(time.Now().Add(qpsWSPongWait)); err != nil {
|
||||||
|
log.Printf("[OpsWS] set read deadline failed: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
conn.SetPongHandler(func(string) error {
|
||||||
|
return conn.SetReadDeadline(time.Now().Add(qpsWSPongWait))
|
||||||
|
})
|
||||||
|
conn.SetCloseHandler(func(code int, text string) error {
|
||||||
|
select {
|
||||||
|
case closeFrameCh <- websocket.FormatCloseMessage(code, text):
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
cancel()
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
for {
|
||||||
|
_, _, err := conn.ReadMessage()
|
||||||
|
if err != nil {
|
||||||
|
if websocket.IsUnexpectedCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway, websocket.CloseNoStatusReceived) {
|
||||||
|
log.Printf("[OpsWS] read failed: %v", err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Push QPS data every 2 seconds (values are globally cached and refreshed at most once per qpsWSRefreshInterval).
|
||||||
|
pushTicker := time.NewTicker(qpsWSPushInterval)
|
||||||
|
defer pushTicker.Stop()
|
||||||
|
|
||||||
|
// Heartbeat ping every 30 seconds.
|
||||||
|
pingTicker := time.NewTicker(qpsWSPingInterval)
|
||||||
|
defer pingTicker.Stop()
|
||||||
|
|
||||||
|
writeWithTimeout := func(messageType int, data []byte) error {
|
||||||
|
if err := conn.SetWriteDeadline(time.Now().Add(qpsWSWriteTimeout)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return conn.WriteMessage(messageType, data)
|
||||||
|
}
|
||||||
|
|
||||||
|
sendClose := func(closeFrame []byte) {
|
||||||
|
if closeFrame == nil {
|
||||||
|
closeFrame = websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")
|
||||||
|
}
|
||||||
|
_ = writeWithTimeout(websocket.CloseMessage, closeFrame)
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-pushTicker.C:
|
||||||
|
msg := qpsWSCache.getPayload()
|
||||||
|
if msg == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := writeWithTimeout(websocket.TextMessage, msg); err != nil {
|
||||||
|
log.Printf("[OpsWS] write failed: %v", err)
|
||||||
|
cancel()
|
||||||
|
closeConn()
|
||||||
|
wg.Wait()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
case <-pingTicker.C:
|
||||||
|
if err := writeWithTimeout(websocket.PingMessage, nil); err != nil {
|
||||||
|
log.Printf("[OpsWS] ping failed: %v", err)
|
||||||
|
cancel()
|
||||||
|
closeConn()
|
||||||
|
wg.Wait()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
case closeFrame := <-closeFrameCh:
|
||||||
|
sendClose(closeFrame)
|
||||||
|
closeConn()
|
||||||
|
wg.Wait()
|
||||||
|
return
|
||||||
|
|
||||||
|
case <-ctx.Done():
|
||||||
|
var closeFrame []byte
|
||||||
|
select {
|
||||||
|
case closeFrame = <-closeFrameCh:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
sendClose(closeFrame)
|
||||||
|
|
||||||
|
closeConn()
|
||||||
|
wg.Wait()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isAllowedOpsWSOrigin(r *http.Request) bool {
|
||||||
|
if r == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
origin := strings.TrimSpace(r.Header.Get("Origin"))
|
||||||
|
if origin == "" {
|
||||||
|
switch strings.ToLower(strings.TrimSpace(opsWSProxyConfig.OriginPolicy)) {
|
||||||
|
case OriginPolicyStrict:
|
||||||
|
return false
|
||||||
|
case OriginPolicyPermissive, "":
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
parsed, err := url.Parse(origin)
|
||||||
|
if err != nil || parsed.Hostname() == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
originHost := strings.ToLower(parsed.Hostname())
|
||||||
|
|
||||||
|
trustProxyHeaders := shouldTrustOpsWSProxyHeaders(r)
|
||||||
|
reqHost := hostWithoutPort(r.Host)
|
||||||
|
if trustProxyHeaders {
|
||||||
|
xfHost := strings.TrimSpace(r.Header.Get("X-Forwarded-Host"))
|
||||||
|
if xfHost != "" {
|
||||||
|
xfHost = strings.TrimSpace(strings.Split(xfHost, ",")[0])
|
||||||
|
if xfHost != "" {
|
||||||
|
reqHost = hostWithoutPort(xfHost)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
reqHost = strings.ToLower(reqHost)
|
||||||
|
if reqHost == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return originHost == reqHost
|
||||||
|
}
|
||||||
|
|
||||||
|
func shouldTrustOpsWSProxyHeaders(r *http.Request) bool {
|
||||||
|
if r == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !opsWSProxyConfig.TrustProxy {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
peerIP, ok := requestPeerIP(r)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return isAddrInTrustedProxies(peerIP, opsWSProxyConfig.TrustedProxies)
|
||||||
|
}
|
||||||
|
|
||||||
|
func requestPeerIP(r *http.Request) (netip.Addr, bool) {
|
||||||
|
if r == nil {
|
||||||
|
return netip.Addr{}, false
|
||||||
|
}
|
||||||
|
host, _, err := net.SplitHostPort(strings.TrimSpace(r.RemoteAddr))
|
||||||
|
if err != nil {
|
||||||
|
host = strings.TrimSpace(r.RemoteAddr)
|
||||||
|
}
|
||||||
|
host = strings.TrimPrefix(host, "[")
|
||||||
|
host = strings.TrimSuffix(host, "]")
|
||||||
|
if host == "" {
|
||||||
|
return netip.Addr{}, false
|
||||||
|
}
|
||||||
|
addr, err := netip.ParseAddr(host)
|
||||||
|
if err != nil {
|
||||||
|
return netip.Addr{}, false
|
||||||
|
}
|
||||||
|
return addr.Unmap(), true
|
||||||
|
}
|
||||||
|
|
||||||
|
func requestClientIP(r *http.Request) string {
|
||||||
|
if r == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
trustProxyHeaders := shouldTrustOpsWSProxyHeaders(r)
|
||||||
|
if trustProxyHeaders {
|
||||||
|
xff := strings.TrimSpace(r.Header.Get("X-Forwarded-For"))
|
||||||
|
if xff != "" {
|
||||||
|
// Use the left-most entry (original client). If multiple proxies add values, they are comma-separated.
|
||||||
|
xff = strings.TrimSpace(strings.Split(xff, ",")[0])
|
||||||
|
xff = strings.TrimPrefix(xff, "[")
|
||||||
|
xff = strings.TrimSuffix(xff, "]")
|
||||||
|
if addr, err := netip.ParseAddr(xff); err == nil && addr.IsValid() {
|
||||||
|
return addr.Unmap().String()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if peer, ok := requestPeerIP(r); ok && peer.IsValid() {
|
||||||
|
return peer.String()
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func isAddrInTrustedProxies(addr netip.Addr, trusted []netip.Prefix) bool {
|
||||||
|
if !addr.IsValid() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, p := range trusted {
|
||||||
|
if p.Contains(addr) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadOpsWSProxyConfigFromEnv() OpsWSProxyConfig {
|
||||||
|
cfg := OpsWSProxyConfig{
|
||||||
|
TrustProxy: true,
|
||||||
|
TrustedProxies: defaultTrustedProxies(),
|
||||||
|
OriginPolicy: OriginPolicyPermissive,
|
||||||
|
}
|
||||||
|
|
||||||
|
if v := strings.TrimSpace(os.Getenv(envOpsWSTrustProxy)); v != "" {
|
||||||
|
if parsed, err := strconv.ParseBool(v); err == nil {
|
||||||
|
cfg.TrustProxy = parsed
|
||||||
|
} else {
|
||||||
|
log.Printf("[OpsWS] invalid %s=%q (expected bool); using default=%v", envOpsWSTrustProxy, v, cfg.TrustProxy)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if raw := strings.TrimSpace(os.Getenv(envOpsWSTrustedProxies)); raw != "" {
|
||||||
|
prefixes, invalid := parseTrustedProxyList(raw)
|
||||||
|
if len(invalid) > 0 {
|
||||||
|
log.Printf("[OpsWS] invalid %s entries ignored: %s", envOpsWSTrustedProxies, strings.Join(invalid, ", "))
|
||||||
|
}
|
||||||
|
cfg.TrustedProxies = prefixes
|
||||||
|
}
|
||||||
|
|
||||||
|
if v := strings.TrimSpace(os.Getenv(envOpsWSOriginPolicy)); v != "" {
|
||||||
|
normalized := strings.ToLower(v)
|
||||||
|
switch normalized {
|
||||||
|
case OriginPolicyStrict, OriginPolicyPermissive:
|
||||||
|
cfg.OriginPolicy = normalized
|
||||||
|
default:
|
||||||
|
log.Printf("[OpsWS] invalid %s=%q (expected %q or %q); using default=%q", envOpsWSOriginPolicy, v, OriginPolicyStrict, OriginPolicyPermissive, cfg.OriginPolicy)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadOpsWSRuntimeLimitsFromEnv() opsWSRuntimeLimits {
|
||||||
|
cfg := opsWSRuntimeLimits{
|
||||||
|
MaxConns: defaultMaxWSConns,
|
||||||
|
MaxConnsPerIP: defaultMaxWSConnsPerIP,
|
||||||
|
}
|
||||||
|
|
||||||
|
if v := strings.TrimSpace(os.Getenv(envOpsWSMaxConns)); v != "" {
|
||||||
|
if parsed, err := strconv.Atoi(v); err == nil && parsed > 0 {
|
||||||
|
cfg.MaxConns = int32(parsed)
|
||||||
|
} else {
|
||||||
|
log.Printf("[OpsWS] invalid %s=%q (expected int>0); using default=%d", envOpsWSMaxConns, v, cfg.MaxConns)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v := strings.TrimSpace(os.Getenv(envOpsWSMaxConnsPerIP)); v != "" {
|
||||||
|
if parsed, err := strconv.Atoi(v); err == nil && parsed >= 0 {
|
||||||
|
cfg.MaxConnsPerIP = int32(parsed)
|
||||||
|
} else {
|
||||||
|
log.Printf("[OpsWS] invalid %s=%q (expected int>=0); using default=%d", envOpsWSMaxConnsPerIP, v, cfg.MaxConnsPerIP)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultTrustedProxies() []netip.Prefix {
|
||||||
|
prefixes, _ := parseTrustedProxyList("127.0.0.0/8,::1/128")
|
||||||
|
return prefixes
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseTrustedProxyList(raw string) (prefixes []netip.Prefix, invalid []string) {
|
||||||
|
for _, token := range strings.Split(raw, ",") {
|
||||||
|
item := strings.TrimSpace(token)
|
||||||
|
if item == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
p netip.Prefix
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
if strings.Contains(item, "/") {
|
||||||
|
p, err = netip.ParsePrefix(item)
|
||||||
|
} else {
|
||||||
|
var addr netip.Addr
|
||||||
|
addr, err = netip.ParseAddr(item)
|
||||||
|
if err == nil {
|
||||||
|
addr = addr.Unmap()
|
||||||
|
bits := 128
|
||||||
|
if addr.Is4() {
|
||||||
|
bits = 32
|
||||||
|
}
|
||||||
|
p = netip.PrefixFrom(addr, bits)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil || !p.IsValid() {
|
||||||
|
invalid = append(invalid, item)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
prefixes = append(prefixes, p.Masked())
|
||||||
|
}
|
||||||
|
return prefixes, invalid
|
||||||
|
}
|
||||||
|
|
||||||
|
func hostWithoutPort(hostport string) string {
|
||||||
|
hostport = strings.TrimSpace(hostport)
|
||||||
|
if hostport == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
if host, _, err := net.SplitHostPort(hostport); err == nil {
|
||||||
|
return host
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(hostport, "[") && strings.HasSuffix(hostport, "]") {
|
||||||
|
return strings.Trim(hostport, "[]")
|
||||||
|
}
|
||||||
|
parts := strings.Split(hostport, ":")
|
||||||
|
return parts[0]
|
||||||
|
}
|
||||||
209
backend/internal/handler/admin/promo_handler.go
Normal file
209
backend/internal/handler/admin/promo_handler.go
Normal file
@@ -0,0 +1,209 @@
|
|||||||
|
package admin
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/handler/dto"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/pkg/pagination"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/pkg/response"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PromoHandler handles admin promo code management
|
||||||
|
type PromoHandler struct {
|
||||||
|
promoService *service.PromoService
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPromoHandler creates a new admin promo handler
|
||||||
|
func NewPromoHandler(promoService *service.PromoService) *PromoHandler {
|
||||||
|
return &PromoHandler{
|
||||||
|
promoService: promoService,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatePromoCodeRequest represents create promo code request
|
||||||
|
type CreatePromoCodeRequest struct {
|
||||||
|
Code string `json:"code"` // 可选,为空则自动生成
|
||||||
|
BonusAmount float64 `json:"bonus_amount" binding:"required,min=0"` // 赠送余额
|
||||||
|
MaxUses int `json:"max_uses" binding:"min=0"` // 最大使用次数,0=无限
|
||||||
|
ExpiresAt *int64 `json:"expires_at"` // 过期时间戳(秒)
|
||||||
|
Notes string `json:"notes"` // 备注
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatePromoCodeRequest represents update promo code request
|
||||||
|
type UpdatePromoCodeRequest struct {
|
||||||
|
Code *string `json:"code"`
|
||||||
|
BonusAmount *float64 `json:"bonus_amount" binding:"omitempty,min=0"`
|
||||||
|
MaxUses *int `json:"max_uses" binding:"omitempty,min=0"`
|
||||||
|
Status *string `json:"status" binding:"omitempty,oneof=active disabled"`
|
||||||
|
ExpiresAt *int64 `json:"expires_at"`
|
||||||
|
Notes *string `json:"notes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// List handles listing all promo codes with pagination
|
||||||
|
// GET /api/v1/admin/promo-codes
|
||||||
|
func (h *PromoHandler) List(c *gin.Context) {
|
||||||
|
page, pageSize := response.ParsePagination(c)
|
||||||
|
status := c.Query("status")
|
||||||
|
search := strings.TrimSpace(c.Query("search"))
|
||||||
|
if len(search) > 100 {
|
||||||
|
search = search[:100]
|
||||||
|
}
|
||||||
|
|
||||||
|
params := pagination.PaginationParams{
|
||||||
|
Page: page,
|
||||||
|
PageSize: pageSize,
|
||||||
|
}
|
||||||
|
|
||||||
|
codes, paginationResult, err := h.promoService.List(c.Request.Context(), params, status, search)
|
||||||
|
if err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make([]dto.PromoCode, 0, len(codes))
|
||||||
|
for i := range codes {
|
||||||
|
out = append(out, *dto.PromoCodeFromService(&codes[i]))
|
||||||
|
}
|
||||||
|
response.Paginated(c, out, paginationResult.Total, page, pageSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetByID handles getting a promo code by ID
|
||||||
|
// GET /api/v1/admin/promo-codes/:id
|
||||||
|
func (h *PromoHandler) GetByID(c *gin.Context) {
|
||||||
|
codeID, err := strconv.ParseInt(c.Param("id"), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
response.BadRequest(c, "Invalid promo code ID")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
code, err := h.promoService.GetByID(c.Request.Context(), codeID)
|
||||||
|
if err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
response.Success(c, dto.PromoCodeFromService(code))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create handles creating a new promo code
|
||||||
|
// POST /api/v1/admin/promo-codes
|
||||||
|
func (h *PromoHandler) Create(c *gin.Context) {
|
||||||
|
var req CreatePromoCodeRequest
|
||||||
|
if err := c.ShouldBindJSON(&req); err != nil {
|
||||||
|
response.BadRequest(c, "Invalid request: "+err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
input := &service.CreatePromoCodeInput{
|
||||||
|
Code: req.Code,
|
||||||
|
BonusAmount: req.BonusAmount,
|
||||||
|
MaxUses: req.MaxUses,
|
||||||
|
Notes: req.Notes,
|
||||||
|
}
|
||||||
|
|
||||||
|
if req.ExpiresAt != nil {
|
||||||
|
t := time.Unix(*req.ExpiresAt, 0)
|
||||||
|
input.ExpiresAt = &t
|
||||||
|
}
|
||||||
|
|
||||||
|
code, err := h.promoService.Create(c.Request.Context(), input)
|
||||||
|
if err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
response.Success(c, dto.PromoCodeFromService(code))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update handles updating a promo code
|
||||||
|
// PUT /api/v1/admin/promo-codes/:id
|
||||||
|
func (h *PromoHandler) Update(c *gin.Context) {
|
||||||
|
codeID, err := strconv.ParseInt(c.Param("id"), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
response.BadRequest(c, "Invalid promo code ID")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var req UpdatePromoCodeRequest
|
||||||
|
if err := c.ShouldBindJSON(&req); err != nil {
|
||||||
|
response.BadRequest(c, "Invalid request: "+err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
input := &service.UpdatePromoCodeInput{
|
||||||
|
Code: req.Code,
|
||||||
|
BonusAmount: req.BonusAmount,
|
||||||
|
MaxUses: req.MaxUses,
|
||||||
|
Status: req.Status,
|
||||||
|
Notes: req.Notes,
|
||||||
|
}
|
||||||
|
|
||||||
|
if req.ExpiresAt != nil {
|
||||||
|
if *req.ExpiresAt == 0 {
|
||||||
|
// 0 表示清除过期时间
|
||||||
|
input.ExpiresAt = nil
|
||||||
|
} else {
|
||||||
|
t := time.Unix(*req.ExpiresAt, 0)
|
||||||
|
input.ExpiresAt = &t
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
code, err := h.promoService.Update(c.Request.Context(), codeID, input)
|
||||||
|
if err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
response.Success(c, dto.PromoCodeFromService(code))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete handles deleting a promo code
|
||||||
|
// DELETE /api/v1/admin/promo-codes/:id
|
||||||
|
func (h *PromoHandler) Delete(c *gin.Context) {
|
||||||
|
codeID, err := strconv.ParseInt(c.Param("id"), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
response.BadRequest(c, "Invalid promo code ID")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = h.promoService.Delete(c.Request.Context(), codeID)
|
||||||
|
if err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
response.Success(c, gin.H{"message": "Promo code deleted successfully"})
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUsages handles getting usage records for a promo code
|
||||||
|
// GET /api/v1/admin/promo-codes/:id/usages
|
||||||
|
func (h *PromoHandler) GetUsages(c *gin.Context) {
|
||||||
|
codeID, err := strconv.ParseInt(c.Param("id"), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
response.BadRequest(c, "Invalid promo code ID")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
page, pageSize := response.ParsePagination(c)
|
||||||
|
params := pagination.PaginationParams{
|
||||||
|
Page: page,
|
||||||
|
PageSize: pageSize,
|
||||||
|
}
|
||||||
|
|
||||||
|
usages, paginationResult, err := h.promoService.ListUsages(c.Request.Context(), codeID, params)
|
||||||
|
if err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make([]dto.PromoCodeUsage, 0, len(usages))
|
||||||
|
for i := range usages {
|
||||||
|
out = append(out, *dto.PromoCodeUsageFromService(&usages[i]))
|
||||||
|
}
|
||||||
|
response.Paginated(c, out, paginationResult.Total, page, pageSize)
|
||||||
|
}
|
||||||
@@ -19,14 +19,16 @@ type SettingHandler struct {
|
|||||||
settingService *service.SettingService
|
settingService *service.SettingService
|
||||||
emailService *service.EmailService
|
emailService *service.EmailService
|
||||||
turnstileService *service.TurnstileService
|
turnstileService *service.TurnstileService
|
||||||
|
opsService *service.OpsService
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSettingHandler 创建系统设置处理器
|
// NewSettingHandler 创建系统设置处理器
|
||||||
func NewSettingHandler(settingService *service.SettingService, emailService *service.EmailService, turnstileService *service.TurnstileService) *SettingHandler {
|
func NewSettingHandler(settingService *service.SettingService, emailService *service.EmailService, turnstileService *service.TurnstileService, opsService *service.OpsService) *SettingHandler {
|
||||||
return &SettingHandler{
|
return &SettingHandler{
|
||||||
settingService: settingService,
|
settingService: settingService,
|
||||||
emailService: emailService,
|
emailService: emailService,
|
||||||
turnstileService: turnstileService,
|
turnstileService: turnstileService,
|
||||||
|
opsService: opsService,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -39,6 +41,9 @@ func (h *SettingHandler) GetSettings(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check if ops monitoring is enabled (respects config.ops.enabled)
|
||||||
|
opsEnabled := h.opsService != nil && h.opsService.IsMonitoringEnabled(c.Request.Context())
|
||||||
|
|
||||||
response.Success(c, dto.SystemSettings{
|
response.Success(c, dto.SystemSettings{
|
||||||
RegistrationEnabled: settings.RegistrationEnabled,
|
RegistrationEnabled: settings.RegistrationEnabled,
|
||||||
EmailVerifyEnabled: settings.EmailVerifyEnabled,
|
EmailVerifyEnabled: settings.EmailVerifyEnabled,
|
||||||
@@ -62,6 +67,7 @@ func (h *SettingHandler) GetSettings(c *gin.Context) {
|
|||||||
APIBaseURL: settings.APIBaseURL,
|
APIBaseURL: settings.APIBaseURL,
|
||||||
ContactInfo: settings.ContactInfo,
|
ContactInfo: settings.ContactInfo,
|
||||||
DocURL: settings.DocURL,
|
DocURL: settings.DocURL,
|
||||||
|
HomeContent: settings.HomeContent,
|
||||||
DefaultConcurrency: settings.DefaultConcurrency,
|
DefaultConcurrency: settings.DefaultConcurrency,
|
||||||
DefaultBalance: settings.DefaultBalance,
|
DefaultBalance: settings.DefaultBalance,
|
||||||
EnableModelFallback: settings.EnableModelFallback,
|
EnableModelFallback: settings.EnableModelFallback,
|
||||||
@@ -71,6 +77,10 @@ func (h *SettingHandler) GetSettings(c *gin.Context) {
|
|||||||
FallbackModelAntigravity: settings.FallbackModelAntigravity,
|
FallbackModelAntigravity: settings.FallbackModelAntigravity,
|
||||||
EnableIdentityPatch: settings.EnableIdentityPatch,
|
EnableIdentityPatch: settings.EnableIdentityPatch,
|
||||||
IdentityPatchPrompt: settings.IdentityPatchPrompt,
|
IdentityPatchPrompt: settings.IdentityPatchPrompt,
|
||||||
|
OpsMonitoringEnabled: opsEnabled && settings.OpsMonitoringEnabled,
|
||||||
|
OpsRealtimeMonitoringEnabled: settings.OpsRealtimeMonitoringEnabled,
|
||||||
|
OpsQueryModeDefault: settings.OpsQueryModeDefault,
|
||||||
|
OpsMetricsIntervalSeconds: settings.OpsMetricsIntervalSeconds,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -94,7 +104,7 @@ type UpdateSettingsRequest struct {
|
|||||||
TurnstileSiteKey string `json:"turnstile_site_key"`
|
TurnstileSiteKey string `json:"turnstile_site_key"`
|
||||||
TurnstileSecretKey string `json:"turnstile_secret_key"`
|
TurnstileSecretKey string `json:"turnstile_secret_key"`
|
||||||
|
|
||||||
// LinuxDo Connect OAuth 登录(终端用户 SSO)
|
// LinuxDo Connect OAuth 登录
|
||||||
LinuxDoConnectEnabled bool `json:"linuxdo_connect_enabled"`
|
LinuxDoConnectEnabled bool `json:"linuxdo_connect_enabled"`
|
||||||
LinuxDoConnectClientID string `json:"linuxdo_connect_client_id"`
|
LinuxDoConnectClientID string `json:"linuxdo_connect_client_id"`
|
||||||
LinuxDoConnectClientSecret string `json:"linuxdo_connect_client_secret"`
|
LinuxDoConnectClientSecret string `json:"linuxdo_connect_client_secret"`
|
||||||
@@ -107,6 +117,7 @@ type UpdateSettingsRequest struct {
|
|||||||
APIBaseURL string `json:"api_base_url"`
|
APIBaseURL string `json:"api_base_url"`
|
||||||
ContactInfo string `json:"contact_info"`
|
ContactInfo string `json:"contact_info"`
|
||||||
DocURL string `json:"doc_url"`
|
DocURL string `json:"doc_url"`
|
||||||
|
HomeContent string `json:"home_content"`
|
||||||
|
|
||||||
// 默认配置
|
// 默认配置
|
||||||
DefaultConcurrency int `json:"default_concurrency"`
|
DefaultConcurrency int `json:"default_concurrency"`
|
||||||
@@ -122,6 +133,12 @@ type UpdateSettingsRequest struct {
|
|||||||
// Identity patch configuration (Claude -> Gemini)
|
// Identity patch configuration (Claude -> Gemini)
|
||||||
EnableIdentityPatch bool `json:"enable_identity_patch"`
|
EnableIdentityPatch bool `json:"enable_identity_patch"`
|
||||||
IdentityPatchPrompt string `json:"identity_patch_prompt"`
|
IdentityPatchPrompt string `json:"identity_patch_prompt"`
|
||||||
|
|
||||||
|
// Ops monitoring (vNext)
|
||||||
|
OpsMonitoringEnabled *bool `json:"ops_monitoring_enabled"`
|
||||||
|
OpsRealtimeMonitoringEnabled *bool `json:"ops_realtime_monitoring_enabled"`
|
||||||
|
OpsQueryModeDefault *string `json:"ops_query_mode_default"`
|
||||||
|
OpsMetricsIntervalSeconds *int `json:"ops_metrics_interval_seconds"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateSettings 更新系统设置
|
// UpdateSettings 更新系统设置
|
||||||
@@ -206,6 +223,18 @@ func (h *SettingHandler) UpdateSettings(c *gin.Context) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ops metrics collector interval validation (seconds).
|
||||||
|
if req.OpsMetricsIntervalSeconds != nil {
|
||||||
|
v := *req.OpsMetricsIntervalSeconds
|
||||||
|
if v < 60 {
|
||||||
|
v = 60
|
||||||
|
}
|
||||||
|
if v > 3600 {
|
||||||
|
v = 3600
|
||||||
|
}
|
||||||
|
req.OpsMetricsIntervalSeconds = &v
|
||||||
|
}
|
||||||
|
|
||||||
settings := &service.SystemSettings{
|
settings := &service.SystemSettings{
|
||||||
RegistrationEnabled: req.RegistrationEnabled,
|
RegistrationEnabled: req.RegistrationEnabled,
|
||||||
EmailVerifyEnabled: req.EmailVerifyEnabled,
|
EmailVerifyEnabled: req.EmailVerifyEnabled,
|
||||||
@@ -229,6 +258,7 @@ func (h *SettingHandler) UpdateSettings(c *gin.Context) {
|
|||||||
APIBaseURL: req.APIBaseURL,
|
APIBaseURL: req.APIBaseURL,
|
||||||
ContactInfo: req.ContactInfo,
|
ContactInfo: req.ContactInfo,
|
||||||
DocURL: req.DocURL,
|
DocURL: req.DocURL,
|
||||||
|
HomeContent: req.HomeContent,
|
||||||
DefaultConcurrency: req.DefaultConcurrency,
|
DefaultConcurrency: req.DefaultConcurrency,
|
||||||
DefaultBalance: req.DefaultBalance,
|
DefaultBalance: req.DefaultBalance,
|
||||||
EnableModelFallback: req.EnableModelFallback,
|
EnableModelFallback: req.EnableModelFallback,
|
||||||
@@ -238,6 +268,30 @@ func (h *SettingHandler) UpdateSettings(c *gin.Context) {
|
|||||||
FallbackModelAntigravity: req.FallbackModelAntigravity,
|
FallbackModelAntigravity: req.FallbackModelAntigravity,
|
||||||
EnableIdentityPatch: req.EnableIdentityPatch,
|
EnableIdentityPatch: req.EnableIdentityPatch,
|
||||||
IdentityPatchPrompt: req.IdentityPatchPrompt,
|
IdentityPatchPrompt: req.IdentityPatchPrompt,
|
||||||
|
OpsMonitoringEnabled: func() bool {
|
||||||
|
if req.OpsMonitoringEnabled != nil {
|
||||||
|
return *req.OpsMonitoringEnabled
|
||||||
|
}
|
||||||
|
return previousSettings.OpsMonitoringEnabled
|
||||||
|
}(),
|
||||||
|
OpsRealtimeMonitoringEnabled: func() bool {
|
||||||
|
if req.OpsRealtimeMonitoringEnabled != nil {
|
||||||
|
return *req.OpsRealtimeMonitoringEnabled
|
||||||
|
}
|
||||||
|
return previousSettings.OpsRealtimeMonitoringEnabled
|
||||||
|
}(),
|
||||||
|
OpsQueryModeDefault: func() string {
|
||||||
|
if req.OpsQueryModeDefault != nil {
|
||||||
|
return *req.OpsQueryModeDefault
|
||||||
|
}
|
||||||
|
return previousSettings.OpsQueryModeDefault
|
||||||
|
}(),
|
||||||
|
OpsMetricsIntervalSeconds: func() int {
|
||||||
|
if req.OpsMetricsIntervalSeconds != nil {
|
||||||
|
return *req.OpsMetricsIntervalSeconds
|
||||||
|
}
|
||||||
|
return previousSettings.OpsMetricsIntervalSeconds
|
||||||
|
}(),
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := h.settingService.UpdateSettings(c.Request.Context(), settings); err != nil {
|
if err := h.settingService.UpdateSettings(c.Request.Context(), settings); err != nil {
|
||||||
@@ -277,6 +331,7 @@ func (h *SettingHandler) UpdateSettings(c *gin.Context) {
|
|||||||
APIBaseURL: updatedSettings.APIBaseURL,
|
APIBaseURL: updatedSettings.APIBaseURL,
|
||||||
ContactInfo: updatedSettings.ContactInfo,
|
ContactInfo: updatedSettings.ContactInfo,
|
||||||
DocURL: updatedSettings.DocURL,
|
DocURL: updatedSettings.DocURL,
|
||||||
|
HomeContent: updatedSettings.HomeContent,
|
||||||
DefaultConcurrency: updatedSettings.DefaultConcurrency,
|
DefaultConcurrency: updatedSettings.DefaultConcurrency,
|
||||||
DefaultBalance: updatedSettings.DefaultBalance,
|
DefaultBalance: updatedSettings.DefaultBalance,
|
||||||
EnableModelFallback: updatedSettings.EnableModelFallback,
|
EnableModelFallback: updatedSettings.EnableModelFallback,
|
||||||
@@ -286,6 +341,10 @@ func (h *SettingHandler) UpdateSettings(c *gin.Context) {
|
|||||||
FallbackModelAntigravity: updatedSettings.FallbackModelAntigravity,
|
FallbackModelAntigravity: updatedSettings.FallbackModelAntigravity,
|
||||||
EnableIdentityPatch: updatedSettings.EnableIdentityPatch,
|
EnableIdentityPatch: updatedSettings.EnableIdentityPatch,
|
||||||
IdentityPatchPrompt: updatedSettings.IdentityPatchPrompt,
|
IdentityPatchPrompt: updatedSettings.IdentityPatchPrompt,
|
||||||
|
OpsMonitoringEnabled: updatedSettings.OpsMonitoringEnabled,
|
||||||
|
OpsRealtimeMonitoringEnabled: updatedSettings.OpsRealtimeMonitoringEnabled,
|
||||||
|
OpsQueryModeDefault: updatedSettings.OpsQueryModeDefault,
|
||||||
|
OpsMetricsIntervalSeconds: updatedSettings.OpsMetricsIntervalSeconds,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -377,6 +436,9 @@ func diffSettings(before *service.SystemSettings, after *service.SystemSettings,
|
|||||||
if before.DocURL != after.DocURL {
|
if before.DocURL != after.DocURL {
|
||||||
changed = append(changed, "doc_url")
|
changed = append(changed, "doc_url")
|
||||||
}
|
}
|
||||||
|
if before.HomeContent != after.HomeContent {
|
||||||
|
changed = append(changed, "home_content")
|
||||||
|
}
|
||||||
if before.DefaultConcurrency != after.DefaultConcurrency {
|
if before.DefaultConcurrency != after.DefaultConcurrency {
|
||||||
changed = append(changed, "default_concurrency")
|
changed = append(changed, "default_concurrency")
|
||||||
}
|
}
|
||||||
@@ -404,6 +466,18 @@ func diffSettings(before *service.SystemSettings, after *service.SystemSettings,
|
|||||||
if before.IdentityPatchPrompt != after.IdentityPatchPrompt {
|
if before.IdentityPatchPrompt != after.IdentityPatchPrompt {
|
||||||
changed = append(changed, "identity_patch_prompt")
|
changed = append(changed, "identity_patch_prompt")
|
||||||
}
|
}
|
||||||
|
if before.OpsMonitoringEnabled != after.OpsMonitoringEnabled {
|
||||||
|
changed = append(changed, "ops_monitoring_enabled")
|
||||||
|
}
|
||||||
|
if before.OpsRealtimeMonitoringEnabled != after.OpsRealtimeMonitoringEnabled {
|
||||||
|
changed = append(changed, "ops_realtime_monitoring_enabled")
|
||||||
|
}
|
||||||
|
if before.OpsQueryModeDefault != after.OpsQueryModeDefault {
|
||||||
|
changed = append(changed, "ops_query_mode_default")
|
||||||
|
}
|
||||||
|
if before.OpsMetricsIntervalSeconds != after.OpsMetricsIntervalSeconds {
|
||||||
|
changed = append(changed, "ops_metrics_interval_seconds")
|
||||||
|
}
|
||||||
return changed
|
return changed
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -580,3 +654,68 @@ func (h *SettingHandler) DeleteAdminAPIKey(c *gin.Context) {
|
|||||||
|
|
||||||
response.Success(c, gin.H{"message": "Admin API key deleted"})
|
response.Success(c, gin.H{"message": "Admin API key deleted"})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetStreamTimeoutSettings 获取流超时处理配置
|
||||||
|
// GET /api/v1/admin/settings/stream-timeout
|
||||||
|
func (h *SettingHandler) GetStreamTimeoutSettings(c *gin.Context) {
|
||||||
|
settings, err := h.settingService.GetStreamTimeoutSettings(c.Request.Context())
|
||||||
|
if err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
response.Success(c, dto.StreamTimeoutSettings{
|
||||||
|
Enabled: settings.Enabled,
|
||||||
|
Action: settings.Action,
|
||||||
|
TempUnschedMinutes: settings.TempUnschedMinutes,
|
||||||
|
ThresholdCount: settings.ThresholdCount,
|
||||||
|
ThresholdWindowMinutes: settings.ThresholdWindowMinutes,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateStreamTimeoutSettingsRequest 更新流超时配置请求
|
||||||
|
type UpdateStreamTimeoutSettingsRequest struct {
|
||||||
|
Enabled bool `json:"enabled"`
|
||||||
|
Action string `json:"action"`
|
||||||
|
TempUnschedMinutes int `json:"temp_unsched_minutes"`
|
||||||
|
ThresholdCount int `json:"threshold_count"`
|
||||||
|
ThresholdWindowMinutes int `json:"threshold_window_minutes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateStreamTimeoutSettings 更新流超时处理配置
|
||||||
|
// PUT /api/v1/admin/settings/stream-timeout
|
||||||
|
func (h *SettingHandler) UpdateStreamTimeoutSettings(c *gin.Context) {
|
||||||
|
var req UpdateStreamTimeoutSettingsRequest
|
||||||
|
if err := c.ShouldBindJSON(&req); err != nil {
|
||||||
|
response.BadRequest(c, "Invalid request: "+err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
settings := &service.StreamTimeoutSettings{
|
||||||
|
Enabled: req.Enabled,
|
||||||
|
Action: req.Action,
|
||||||
|
TempUnschedMinutes: req.TempUnschedMinutes,
|
||||||
|
ThresholdCount: req.ThresholdCount,
|
||||||
|
ThresholdWindowMinutes: req.ThresholdWindowMinutes,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := h.settingService.SetStreamTimeoutSettings(c.Request.Context(), settings); err != nil {
|
||||||
|
response.BadRequest(c, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// 重新获取设置返回
|
||||||
|
updatedSettings, err := h.settingService.GetStreamTimeoutSettings(c.Request.Context())
|
||||||
|
if err != nil {
|
||||||
|
response.ErrorFrom(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
response.Success(c, dto.StreamTimeoutSettings{
|
||||||
|
Enabled: updatedSettings.Enabled,
|
||||||
|
Action: updatedSettings.Action,
|
||||||
|
TempUnschedMinutes: updatedSettings.TempUnschedMinutes,
|
||||||
|
ThresholdCount: updatedSettings.ThresholdCount,
|
||||||
|
ThresholdWindowMinutes: updatedSettings.ThresholdWindowMinutes,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|||||||
@@ -27,16 +27,20 @@ func NewAPIKeyHandler(apiKeyService *service.APIKeyService) *APIKeyHandler {
|
|||||||
|
|
||||||
// CreateAPIKeyRequest represents the create API key request payload
|
// CreateAPIKeyRequest represents the create API key request payload
|
||||||
type CreateAPIKeyRequest struct {
|
type CreateAPIKeyRequest struct {
|
||||||
Name string `json:"name" binding:"required"`
|
Name string `json:"name" binding:"required"`
|
||||||
GroupID *int64 `json:"group_id"` // nullable
|
GroupID *int64 `json:"group_id"` // nullable
|
||||||
CustomKey *string `json:"custom_key"` // 可选的自定义key
|
CustomKey *string `json:"custom_key"` // 可选的自定义key
|
||||||
|
IPWhitelist []string `json:"ip_whitelist"` // IP 白名单
|
||||||
|
IPBlacklist []string `json:"ip_blacklist"` // IP 黑名单
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateAPIKeyRequest represents the update API key request payload
|
// UpdateAPIKeyRequest represents the update API key request payload
|
||||||
type UpdateAPIKeyRequest struct {
|
type UpdateAPIKeyRequest struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
GroupID *int64 `json:"group_id"`
|
GroupID *int64 `json:"group_id"`
|
||||||
Status string `json:"status" binding:"omitempty,oneof=active inactive"`
|
Status string `json:"status" binding:"omitempty,oneof=active inactive"`
|
||||||
|
IPWhitelist []string `json:"ip_whitelist"` // IP 白名单
|
||||||
|
IPBlacklist []string `json:"ip_blacklist"` // IP 黑名单
|
||||||
}
|
}
|
||||||
|
|
||||||
// List handles listing user's API keys with pagination
|
// List handles listing user's API keys with pagination
|
||||||
@@ -110,9 +114,11 @@ func (h *APIKeyHandler) Create(c *gin.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
svcReq := service.CreateAPIKeyRequest{
|
svcReq := service.CreateAPIKeyRequest{
|
||||||
Name: req.Name,
|
Name: req.Name,
|
||||||
GroupID: req.GroupID,
|
GroupID: req.GroupID,
|
||||||
CustomKey: req.CustomKey,
|
CustomKey: req.CustomKey,
|
||||||
|
IPWhitelist: req.IPWhitelist,
|
||||||
|
IPBlacklist: req.IPBlacklist,
|
||||||
}
|
}
|
||||||
key, err := h.apiKeyService.Create(c.Request.Context(), subject.UserID, svcReq)
|
key, err := h.apiKeyService.Create(c.Request.Context(), subject.UserID, svcReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -144,7 +150,10 @@ func (h *APIKeyHandler) Update(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
svcReq := service.UpdateAPIKeyRequest{}
|
svcReq := service.UpdateAPIKeyRequest{
|
||||||
|
IPWhitelist: req.IPWhitelist,
|
||||||
|
IPBlacklist: req.IPBlacklist,
|
||||||
|
}
|
||||||
if req.Name != "" {
|
if req.Name != "" {
|
||||||
svcReq.Name = &req.Name
|
svcReq.Name = &req.Name
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,19 +12,21 @@ import (
|
|||||||
|
|
||||||
// AuthHandler handles authentication-related requests
|
// AuthHandler handles authentication-related requests
|
||||||
type AuthHandler struct {
|
type AuthHandler struct {
|
||||||
cfg *config.Config
|
cfg *config.Config
|
||||||
authService *service.AuthService
|
authService *service.AuthService
|
||||||
userService *service.UserService
|
userService *service.UserService
|
||||||
settingSvc *service.SettingService
|
settingSvc *service.SettingService
|
||||||
|
promoService *service.PromoService
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAuthHandler creates a new AuthHandler
|
// NewAuthHandler creates a new AuthHandler
|
||||||
func NewAuthHandler(cfg *config.Config, authService *service.AuthService, userService *service.UserService, settingService *service.SettingService) *AuthHandler {
|
func NewAuthHandler(cfg *config.Config, authService *service.AuthService, userService *service.UserService, settingService *service.SettingService, promoService *service.PromoService) *AuthHandler {
|
||||||
return &AuthHandler{
|
return &AuthHandler{
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
authService: authService,
|
authService: authService,
|
||||||
userService: userService,
|
userService: userService,
|
||||||
settingSvc: settingService,
|
settingSvc: settingService,
|
||||||
|
promoService: promoService,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -34,6 +36,7 @@ type RegisterRequest struct {
|
|||||||
Password string `json:"password" binding:"required,min=6"`
|
Password string `json:"password" binding:"required,min=6"`
|
||||||
VerifyCode string `json:"verify_code"`
|
VerifyCode string `json:"verify_code"`
|
||||||
TurnstileToken string `json:"turnstile_token"`
|
TurnstileToken string `json:"turnstile_token"`
|
||||||
|
PromoCode string `json:"promo_code"` // 注册优惠码
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendVerifyCodeRequest 发送验证码请求
|
// SendVerifyCodeRequest 发送验证码请求
|
||||||
@@ -79,7 +82,7 @@ func (h *AuthHandler) Register(c *gin.Context) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
token, user, err := h.authService.RegisterWithVerification(c.Request.Context(), req.Email, req.Password, req.VerifyCode)
|
token, user, err := h.authService.RegisterWithVerification(c.Request.Context(), req.Email, req.Password, req.VerifyCode, req.PromoCode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
response.ErrorFrom(c, err)
|
response.ErrorFrom(c, err)
|
||||||
return
|
return
|
||||||
@@ -174,3 +177,63 @@ func (h *AuthHandler) GetCurrentUser(c *gin.Context) {
|
|||||||
|
|
||||||
response.Success(c, UserResponse{User: dto.UserFromService(user), RunMode: runMode})
|
response.Success(c, UserResponse{User: dto.UserFromService(user), RunMode: runMode})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ValidatePromoCodeRequest 验证优惠码请求
|
||||||
|
type ValidatePromoCodeRequest struct {
|
||||||
|
Code string `json:"code" binding:"required"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidatePromoCodeResponse 验证优惠码响应
|
||||||
|
type ValidatePromoCodeResponse struct {
|
||||||
|
Valid bool `json:"valid"`
|
||||||
|
BonusAmount float64 `json:"bonus_amount,omitempty"`
|
||||||
|
ErrorCode string `json:"error_code,omitempty"`
|
||||||
|
Message string `json:"message,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidatePromoCode 验证优惠码(公开接口,注册前调用)
|
||||||
|
// POST /api/v1/auth/validate-promo-code
|
||||||
|
func (h *AuthHandler) ValidatePromoCode(c *gin.Context) {
|
||||||
|
var req ValidatePromoCodeRequest
|
||||||
|
if err := c.ShouldBindJSON(&req); err != nil {
|
||||||
|
response.BadRequest(c, "Invalid request: "+err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
promoCode, err := h.promoService.ValidatePromoCode(c.Request.Context(), req.Code)
|
||||||
|
if err != nil {
|
||||||
|
// 根据错误类型返回对应的错误码
|
||||||
|
errorCode := "PROMO_CODE_INVALID"
|
||||||
|
switch err {
|
||||||
|
case service.ErrPromoCodeNotFound:
|
||||||
|
errorCode = "PROMO_CODE_NOT_FOUND"
|
||||||
|
case service.ErrPromoCodeExpired:
|
||||||
|
errorCode = "PROMO_CODE_EXPIRED"
|
||||||
|
case service.ErrPromoCodeDisabled:
|
||||||
|
errorCode = "PROMO_CODE_DISABLED"
|
||||||
|
case service.ErrPromoCodeMaxUsed:
|
||||||
|
errorCode = "PROMO_CODE_MAX_USED"
|
||||||
|
case service.ErrPromoCodeAlreadyUsed:
|
||||||
|
errorCode = "PROMO_CODE_ALREADY_USED"
|
||||||
|
}
|
||||||
|
|
||||||
|
response.Success(c, ValidatePromoCodeResponse{
|
||||||
|
Valid: false,
|
||||||
|
ErrorCode: errorCode,
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if promoCode == nil {
|
||||||
|
response.Success(c, ValidatePromoCodeResponse{
|
||||||
|
Valid: false,
|
||||||
|
ErrorCode: "PROMO_CODE_INVALID",
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
response.Success(c, ValidatePromoCodeResponse{
|
||||||
|
Valid: true,
|
||||||
|
BonusAmount: promoCode.BonusAmount,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|||||||
@@ -53,16 +53,18 @@ func APIKeyFromService(k *service.APIKey) *APIKey {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return &APIKey{
|
return &APIKey{
|
||||||
ID: k.ID,
|
ID: k.ID,
|
||||||
UserID: k.UserID,
|
UserID: k.UserID,
|
||||||
Key: k.Key,
|
Key: k.Key,
|
||||||
Name: k.Name,
|
Name: k.Name,
|
||||||
GroupID: k.GroupID,
|
GroupID: k.GroupID,
|
||||||
Status: k.Status,
|
Status: k.Status,
|
||||||
CreatedAt: k.CreatedAt,
|
IPWhitelist: k.IPWhitelist,
|
||||||
UpdatedAt: k.UpdatedAt,
|
IPBlacklist: k.IPBlacklist,
|
||||||
User: UserFromServiceShallow(k.User),
|
CreatedAt: k.CreatedAt,
|
||||||
Group: GroupFromServiceShallow(k.Group),
|
UpdatedAt: k.UpdatedAt,
|
||||||
|
User: UserFromServiceShallow(k.User),
|
||||||
|
Group: GroupFromServiceShallow(k.Group),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -250,11 +252,12 @@ func AccountSummaryFromService(a *service.Account) *AccountSummary {
|
|||||||
|
|
||||||
// usageLogFromServiceBase is a helper that converts service UsageLog to DTO.
|
// usageLogFromServiceBase is a helper that converts service UsageLog to DTO.
|
||||||
// The account parameter allows caller to control what Account info is included.
|
// The account parameter allows caller to control what Account info is included.
|
||||||
func usageLogFromServiceBase(l *service.UsageLog, account *AccountSummary) *UsageLog {
|
// The includeIPAddress parameter controls whether to include the IP address (admin-only).
|
||||||
|
func usageLogFromServiceBase(l *service.UsageLog, account *AccountSummary, includeIPAddress bool) *UsageLog {
|
||||||
if l == nil {
|
if l == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return &UsageLog{
|
result := &UsageLog{
|
||||||
ID: l.ID,
|
ID: l.ID,
|
||||||
UserID: l.UserID,
|
UserID: l.UserID,
|
||||||
APIKeyID: l.APIKeyID,
|
APIKeyID: l.APIKeyID,
|
||||||
@@ -290,21 +293,26 @@ func usageLogFromServiceBase(l *service.UsageLog, account *AccountSummary) *Usag
|
|||||||
Group: GroupFromServiceShallow(l.Group),
|
Group: GroupFromServiceShallow(l.Group),
|
||||||
Subscription: UserSubscriptionFromService(l.Subscription),
|
Subscription: UserSubscriptionFromService(l.Subscription),
|
||||||
}
|
}
|
||||||
|
// IP 地址仅对管理员可见
|
||||||
|
if includeIPAddress {
|
||||||
|
result.IPAddress = l.IPAddress
|
||||||
|
}
|
||||||
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
// UsageLogFromService converts a service UsageLog to DTO for regular users.
|
// UsageLogFromService converts a service UsageLog to DTO for regular users.
|
||||||
// It excludes Account details - users should not see account information.
|
// It excludes Account details and IP address - users should not see these.
|
||||||
func UsageLogFromService(l *service.UsageLog) *UsageLog {
|
func UsageLogFromService(l *service.UsageLog) *UsageLog {
|
||||||
return usageLogFromServiceBase(l, nil)
|
return usageLogFromServiceBase(l, nil, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UsageLogFromServiceAdmin converts a service UsageLog to DTO for admin users.
|
// UsageLogFromServiceAdmin converts a service UsageLog to DTO for admin users.
|
||||||
// It includes minimal Account info (ID, Name only).
|
// It includes minimal Account info (ID, Name only) and IP address.
|
||||||
func UsageLogFromServiceAdmin(l *service.UsageLog) *UsageLog {
|
func UsageLogFromServiceAdmin(l *service.UsageLog) *UsageLog {
|
||||||
if l == nil {
|
if l == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return usageLogFromServiceBase(l, AccountSummaryFromService(l.Account))
|
return usageLogFromServiceBase(l, AccountSummaryFromService(l.Account), true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func SettingFromService(s *service.Setting) *Setting {
|
func SettingFromService(s *service.Setting) *Setting {
|
||||||
@@ -362,3 +370,35 @@ func BulkAssignResultFromService(r *service.BulkAssignResult) *BulkAssignResult
|
|||||||
Errors: r.Errors,
|
Errors: r.Errors,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func PromoCodeFromService(pc *service.PromoCode) *PromoCode {
|
||||||
|
if pc == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &PromoCode{
|
||||||
|
ID: pc.ID,
|
||||||
|
Code: pc.Code,
|
||||||
|
BonusAmount: pc.BonusAmount,
|
||||||
|
MaxUses: pc.MaxUses,
|
||||||
|
UsedCount: pc.UsedCount,
|
||||||
|
Status: pc.Status,
|
||||||
|
ExpiresAt: pc.ExpiresAt,
|
||||||
|
Notes: pc.Notes,
|
||||||
|
CreatedAt: pc.CreatedAt,
|
||||||
|
UpdatedAt: pc.UpdatedAt,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func PromoCodeUsageFromService(u *service.PromoCodeUsage) *PromoCodeUsage {
|
||||||
|
if u == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &PromoCodeUsage{
|
||||||
|
ID: u.ID,
|
||||||
|
PromoCodeID: u.PromoCodeID,
|
||||||
|
UserID: u.UserID,
|
||||||
|
BonusAmount: u.BonusAmount,
|
||||||
|
UsedAt: u.UsedAt,
|
||||||
|
User: UserFromServiceShallow(u.User),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -28,6 +28,7 @@ type SystemSettings struct {
|
|||||||
APIBaseURL string `json:"api_base_url"`
|
APIBaseURL string `json:"api_base_url"`
|
||||||
ContactInfo string `json:"contact_info"`
|
ContactInfo string `json:"contact_info"`
|
||||||
DocURL string `json:"doc_url"`
|
DocURL string `json:"doc_url"`
|
||||||
|
HomeContent string `json:"home_content"`
|
||||||
|
|
||||||
DefaultConcurrency int `json:"default_concurrency"`
|
DefaultConcurrency int `json:"default_concurrency"`
|
||||||
DefaultBalance float64 `json:"default_balance"`
|
DefaultBalance float64 `json:"default_balance"`
|
||||||
@@ -42,6 +43,12 @@ type SystemSettings struct {
|
|||||||
// Identity patch configuration (Claude -> Gemini)
|
// Identity patch configuration (Claude -> Gemini)
|
||||||
EnableIdentityPatch bool `json:"enable_identity_patch"`
|
EnableIdentityPatch bool `json:"enable_identity_patch"`
|
||||||
IdentityPatchPrompt string `json:"identity_patch_prompt"`
|
IdentityPatchPrompt string `json:"identity_patch_prompt"`
|
||||||
|
|
||||||
|
// Ops monitoring (vNext)
|
||||||
|
OpsMonitoringEnabled bool `json:"ops_monitoring_enabled"`
|
||||||
|
OpsRealtimeMonitoringEnabled bool `json:"ops_realtime_monitoring_enabled"`
|
||||||
|
OpsQueryModeDefault string `json:"ops_query_mode_default"`
|
||||||
|
OpsMetricsIntervalSeconds int `json:"ops_metrics_interval_seconds"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type PublicSettings struct {
|
type PublicSettings struct {
|
||||||
@@ -55,6 +62,16 @@ type PublicSettings struct {
|
|||||||
APIBaseURL string `json:"api_base_url"`
|
APIBaseURL string `json:"api_base_url"`
|
||||||
ContactInfo string `json:"contact_info"`
|
ContactInfo string `json:"contact_info"`
|
||||||
DocURL string `json:"doc_url"`
|
DocURL string `json:"doc_url"`
|
||||||
|
HomeContent string `json:"home_content"`
|
||||||
LinuxDoOAuthEnabled bool `json:"linuxdo_oauth_enabled"`
|
LinuxDoOAuthEnabled bool `json:"linuxdo_oauth_enabled"`
|
||||||
Version string `json:"version"`
|
Version string `json:"version"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// StreamTimeoutSettings 流超时处理配置 DTO
|
||||||
|
type StreamTimeoutSettings struct {
|
||||||
|
Enabled bool `json:"enabled"`
|
||||||
|
Action string `json:"action"`
|
||||||
|
TempUnschedMinutes int `json:"temp_unsched_minutes"`
|
||||||
|
ThresholdCount int `json:"threshold_count"`
|
||||||
|
ThresholdWindowMinutes int `json:"threshold_window_minutes"`
|
||||||
|
}
|
||||||
|
|||||||
@@ -20,14 +20,16 @@ type User struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type APIKey struct {
|
type APIKey struct {
|
||||||
ID int64 `json:"id"`
|
ID int64 `json:"id"`
|
||||||
UserID int64 `json:"user_id"`
|
UserID int64 `json:"user_id"`
|
||||||
Key string `json:"key"`
|
Key string `json:"key"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
GroupID *int64 `json:"group_id"`
|
GroupID *int64 `json:"group_id"`
|
||||||
Status string `json:"status"`
|
Status string `json:"status"`
|
||||||
CreatedAt time.Time `json:"created_at"`
|
IPWhitelist []string `json:"ip_whitelist"`
|
||||||
UpdatedAt time.Time `json:"updated_at"`
|
IPBlacklist []string `json:"ip_blacklist"`
|
||||||
|
CreatedAt time.Time `json:"created_at"`
|
||||||
|
UpdatedAt time.Time `json:"updated_at"`
|
||||||
|
|
||||||
User *User `json:"user,omitempty"`
|
User *User `json:"user,omitempty"`
|
||||||
Group *Group `json:"group,omitempty"`
|
Group *Group `json:"group,omitempty"`
|
||||||
@@ -187,6 +189,9 @@ type UsageLog struct {
|
|||||||
// User-Agent
|
// User-Agent
|
||||||
UserAgent *string `json:"user_agent"`
|
UserAgent *string `json:"user_agent"`
|
||||||
|
|
||||||
|
// IP 地址(仅管理员可见)
|
||||||
|
IPAddress *string `json:"ip_address,omitempty"`
|
||||||
|
|
||||||
CreatedAt time.Time `json:"created_at"`
|
CreatedAt time.Time `json:"created_at"`
|
||||||
|
|
||||||
User *User `json:"user,omitempty"`
|
User *User `json:"user,omitempty"`
|
||||||
@@ -245,3 +250,28 @@ type BulkAssignResult struct {
|
|||||||
Subscriptions []UserSubscription `json:"subscriptions"`
|
Subscriptions []UserSubscription `json:"subscriptions"`
|
||||||
Errors []string `json:"errors"`
|
Errors []string `json:"errors"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PromoCode 注册优惠码
|
||||||
|
type PromoCode struct {
|
||||||
|
ID int64 `json:"id"`
|
||||||
|
Code string `json:"code"`
|
||||||
|
BonusAmount float64 `json:"bonus_amount"`
|
||||||
|
MaxUses int `json:"max_uses"`
|
||||||
|
UsedCount int `json:"used_count"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
ExpiresAt *time.Time `json:"expires_at"`
|
||||||
|
Notes string `json:"notes"`
|
||||||
|
CreatedAt time.Time `json:"created_at"`
|
||||||
|
UpdatedAt time.Time `json:"updated_at"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeUsage 优惠码使用记录
|
||||||
|
type PromoCodeUsage struct {
|
||||||
|
ID int64 `json:"id"`
|
||||||
|
PromoCodeID int64 `json:"promo_code_id"`
|
||||||
|
UserID int64 `json:"user_id"`
|
||||||
|
BonusAmount float64 `json:"bonus_amount"`
|
||||||
|
UsedAt time.Time `json:"used_at"`
|
||||||
|
|
||||||
|
User *User `json:"user,omitempty"`
|
||||||
|
}
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ import (
|
|||||||
"github.com/Wei-Shaw/sub2api/internal/pkg/antigravity"
|
"github.com/Wei-Shaw/sub2api/internal/pkg/antigravity"
|
||||||
"github.com/Wei-Shaw/sub2api/internal/pkg/claude"
|
"github.com/Wei-Shaw/sub2api/internal/pkg/claude"
|
||||||
pkgerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors"
|
pkgerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/pkg/ip"
|
||||||
"github.com/Wei-Shaw/sub2api/internal/pkg/openai"
|
"github.com/Wei-Shaw/sub2api/internal/pkg/openai"
|
||||||
middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware"
|
middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware"
|
||||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||||
@@ -88,6 +89,11 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 检查是否为 Claude Code 客户端,设置到 context 中
|
||||||
|
SetClaudeCodeClientContext(c, body)
|
||||||
|
|
||||||
|
setOpsRequestContext(c, "", false, body)
|
||||||
|
|
||||||
parsedReq, err := service.ParseGatewayRequest(body)
|
parsedReq, err := service.ParseGatewayRequest(body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "Failed to parse request body")
|
h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "Failed to parse request body")
|
||||||
@@ -96,8 +102,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
|||||||
reqModel := parsedReq.Model
|
reqModel := parsedReq.Model
|
||||||
reqStream := parsedReq.Stream
|
reqStream := parsedReq.Stream
|
||||||
|
|
||||||
// 设置 Claude Code 客户端标识到 context(用于分组限制检查)
|
setOpsRequestContext(c, reqModel, reqStream, body)
|
||||||
SetClaudeCodeClientContext(c, body)
|
|
||||||
|
|
||||||
// 验证 model 必填
|
// 验证 model 必填
|
||||||
if reqModel == "" {
|
if reqModel == "" {
|
||||||
@@ -111,12 +116,10 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
|||||||
// 获取订阅信息(可能为nil)- 提前获取用于后续检查
|
// 获取订阅信息(可能为nil)- 提前获取用于后续检查
|
||||||
subscription, _ := middleware2.GetSubscriptionFromContext(c)
|
subscription, _ := middleware2.GetSubscriptionFromContext(c)
|
||||||
|
|
||||||
// 获取 User-Agent
|
|
||||||
userAgent := c.Request.UserAgent()
|
|
||||||
|
|
||||||
// 0. 检查wait队列是否已满
|
// 0. 检查wait队列是否已满
|
||||||
maxWait := service.CalculateMaxWait(subject.Concurrency)
|
maxWait := service.CalculateMaxWait(subject.Concurrency)
|
||||||
canWait, err := h.concurrencyHelper.IncrementWaitCount(c.Request.Context(), subject.UserID, maxWait)
|
canWait, err := h.concurrencyHelper.IncrementWaitCount(c.Request.Context(), subject.UserID, maxWait)
|
||||||
|
waitCounted := false
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Increment wait count failed: %v", err)
|
log.Printf("Increment wait count failed: %v", err)
|
||||||
// On error, allow request to proceed
|
// On error, allow request to proceed
|
||||||
@@ -124,8 +127,15 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
|||||||
h.errorResponse(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later")
|
h.errorResponse(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// 确保在函数退出时减少wait计数
|
if err == nil && canWait {
|
||||||
defer h.concurrencyHelper.DecrementWaitCount(c.Request.Context(), subject.UserID)
|
waitCounted = true
|
||||||
|
}
|
||||||
|
// Ensure we decrement if we exit before acquiring the user slot.
|
||||||
|
defer func() {
|
||||||
|
if waitCounted {
|
||||||
|
h.concurrencyHelper.DecrementWaitCount(c.Request.Context(), subject.UserID)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
// 1. 首先获取用户并发槽位
|
// 1. 首先获取用户并发槽位
|
||||||
userReleaseFunc, err := h.concurrencyHelper.AcquireUserSlotWithWait(c, subject.UserID, subject.Concurrency, reqStream, &streamStarted)
|
userReleaseFunc, err := h.concurrencyHelper.AcquireUserSlotWithWait(c, subject.UserID, subject.Concurrency, reqStream, &streamStarted)
|
||||||
@@ -134,6 +144,11 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
|||||||
h.handleConcurrencyError(c, err, "user", streamStarted)
|
h.handleConcurrencyError(c, err, "user", streamStarted)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
// User slot acquired: no longer waiting in the queue.
|
||||||
|
if waitCounted {
|
||||||
|
h.concurrencyHelper.DecrementWaitCount(c.Request.Context(), subject.UserID)
|
||||||
|
waitCounted = false
|
||||||
|
}
|
||||||
// 在请求结束或 Context 取消时确保释放槽位,避免客户端断开造成泄漏
|
// 在请求结束或 Context 取消时确保释放槽位,避免客户端断开造成泄漏
|
||||||
userReleaseFunc = wrapReleaseOnDone(c.Request.Context(), userReleaseFunc)
|
userReleaseFunc = wrapReleaseOnDone(c.Request.Context(), userReleaseFunc)
|
||||||
if userReleaseFunc != nil {
|
if userReleaseFunc != nil {
|
||||||
@@ -180,6 +195,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
account := selection.Account
|
account := selection.Account
|
||||||
|
setOpsSelectedAccount(c, account.ID)
|
||||||
|
|
||||||
// 检查预热请求拦截(在账号选择后、转发前检查)
|
// 检查预热请求拦截(在账号选择后、转发前检查)
|
||||||
if account.IsInterceptWarmupEnabled() && isWarmupRequest(body) {
|
if account.IsInterceptWarmupEnabled() && isWarmupRequest(body) {
|
||||||
@@ -196,12 +212,12 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
|||||||
|
|
||||||
// 3. 获取账号并发槽位
|
// 3. 获取账号并发槽位
|
||||||
accountReleaseFunc := selection.ReleaseFunc
|
accountReleaseFunc := selection.ReleaseFunc
|
||||||
var accountWaitRelease func()
|
|
||||||
if !selection.Acquired {
|
if !selection.Acquired {
|
||||||
if selection.WaitPlan == nil {
|
if selection.WaitPlan == nil {
|
||||||
h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts", streamStarted)
|
h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts", streamStarted)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
accountWaitCounted := false
|
||||||
canWait, err := h.concurrencyHelper.IncrementAccountWaitCount(c.Request.Context(), account.ID, selection.WaitPlan.MaxWaiting)
|
canWait, err := h.concurrencyHelper.IncrementAccountWaitCount(c.Request.Context(), account.ID, selection.WaitPlan.MaxWaiting)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Increment account wait count failed: %v", err)
|
log.Printf("Increment account wait count failed: %v", err)
|
||||||
@@ -209,12 +225,16 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
|||||||
log.Printf("Account wait queue full: account=%d", account.ID)
|
log.Printf("Account wait queue full: account=%d", account.ID)
|
||||||
h.handleStreamingAwareError(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later", streamStarted)
|
h.handleStreamingAwareError(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later", streamStarted)
|
||||||
return
|
return
|
||||||
} else {
|
}
|
||||||
// Only set release function if increment succeeded
|
if err == nil && canWait {
|
||||||
accountWaitRelease = func() {
|
accountWaitCounted = true
|
||||||
|
}
|
||||||
|
// Ensure the wait counter is decremented if we exit before acquiring the slot.
|
||||||
|
defer func() {
|
||||||
|
if accountWaitCounted {
|
||||||
h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID)
|
h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID)
|
||||||
}
|
}
|
||||||
}
|
}()
|
||||||
|
|
||||||
accountReleaseFunc, err = h.concurrencyHelper.AcquireAccountSlotWithWaitTimeout(
|
accountReleaseFunc, err = h.concurrencyHelper.AcquireAccountSlotWithWaitTimeout(
|
||||||
c,
|
c,
|
||||||
@@ -225,20 +245,21 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
|||||||
&streamStarted,
|
&streamStarted,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if accountWaitRelease != nil {
|
|
||||||
accountWaitRelease()
|
|
||||||
}
|
|
||||||
log.Printf("Account concurrency acquire failed: %v", err)
|
log.Printf("Account concurrency acquire failed: %v", err)
|
||||||
h.handleConcurrencyError(c, err, "account", streamStarted)
|
h.handleConcurrencyError(c, err, "account", streamStarted)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
// Slot acquired: no longer waiting in queue.
|
||||||
|
if accountWaitCounted {
|
||||||
|
h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID)
|
||||||
|
accountWaitCounted = false
|
||||||
|
}
|
||||||
if err := h.gatewayService.BindStickySession(c.Request.Context(), apiKey.GroupID, sessionKey, account.ID); err != nil {
|
if err := h.gatewayService.BindStickySession(c.Request.Context(), apiKey.GroupID, sessionKey, account.ID); err != nil {
|
||||||
log.Printf("Bind sticky session failed: %v", err)
|
log.Printf("Bind sticky session failed: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// 账号槽位/等待计数需要在超时或断开时安全回收
|
// 账号槽位/等待计数需要在超时或断开时安全回收
|
||||||
accountReleaseFunc = wrapReleaseOnDone(c.Request.Context(), accountReleaseFunc)
|
accountReleaseFunc = wrapReleaseOnDone(c.Request.Context(), accountReleaseFunc)
|
||||||
accountWaitRelease = wrapReleaseOnDone(c.Request.Context(), accountWaitRelease)
|
|
||||||
|
|
||||||
// 转发请求 - 根据账号平台分流
|
// 转发请求 - 根据账号平台分流
|
||||||
var result *service.ForwardResult
|
var result *service.ForwardResult
|
||||||
@@ -250,9 +271,6 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
|||||||
if accountReleaseFunc != nil {
|
if accountReleaseFunc != nil {
|
||||||
accountReleaseFunc()
|
accountReleaseFunc()
|
||||||
}
|
}
|
||||||
if accountWaitRelease != nil {
|
|
||||||
accountWaitRelease()
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
var failoverErr *service.UpstreamFailoverError
|
var failoverErr *service.UpstreamFailoverError
|
||||||
if errors.As(err, &failoverErr) {
|
if errors.As(err, &failoverErr) {
|
||||||
@@ -272,8 +290,12 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 捕获请求信息(用于异步记录,避免在 goroutine 中访问 gin.Context)
|
||||||
|
userAgent := c.GetHeader("User-Agent")
|
||||||
|
clientIP := ip.GetClientIP(c)
|
||||||
|
|
||||||
// 异步记录使用量(subscription已在函数开头获取)
|
// 异步记录使用量(subscription已在函数开头获取)
|
||||||
go func(result *service.ForwardResult, usedAccount *service.Account, ua string) {
|
go func(result *service.ForwardResult, usedAccount *service.Account, ua, clientIP string) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
if err := h.gatewayService.RecordUsage(ctx, &service.RecordUsageInput{
|
if err := h.gatewayService.RecordUsage(ctx, &service.RecordUsageInput{
|
||||||
@@ -283,10 +305,11 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
|||||||
Account: usedAccount,
|
Account: usedAccount,
|
||||||
Subscription: subscription,
|
Subscription: subscription,
|
||||||
UserAgent: ua,
|
UserAgent: ua,
|
||||||
|
IPAddress: clientIP,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
log.Printf("Record usage failed: %v", err)
|
log.Printf("Record usage failed: %v", err)
|
||||||
}
|
}
|
||||||
}(result, account, userAgent)
|
}(result, account, userAgent, clientIP)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -308,6 +331,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
account := selection.Account
|
account := selection.Account
|
||||||
|
setOpsSelectedAccount(c, account.ID)
|
||||||
|
|
||||||
// 检查预热请求拦截(在账号选择后、转发前检查)
|
// 检查预热请求拦截(在账号选择后、转发前检查)
|
||||||
if account.IsInterceptWarmupEnabled() && isWarmupRequest(body) {
|
if account.IsInterceptWarmupEnabled() && isWarmupRequest(body) {
|
||||||
@@ -324,12 +348,12 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
|||||||
|
|
||||||
// 3. 获取账号并发槽位
|
// 3. 获取账号并发槽位
|
||||||
accountReleaseFunc := selection.ReleaseFunc
|
accountReleaseFunc := selection.ReleaseFunc
|
||||||
var accountWaitRelease func()
|
|
||||||
if !selection.Acquired {
|
if !selection.Acquired {
|
||||||
if selection.WaitPlan == nil {
|
if selection.WaitPlan == nil {
|
||||||
h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts", streamStarted)
|
h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts", streamStarted)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
accountWaitCounted := false
|
||||||
canWait, err := h.concurrencyHelper.IncrementAccountWaitCount(c.Request.Context(), account.ID, selection.WaitPlan.MaxWaiting)
|
canWait, err := h.concurrencyHelper.IncrementAccountWaitCount(c.Request.Context(), account.ID, selection.WaitPlan.MaxWaiting)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Increment account wait count failed: %v", err)
|
log.Printf("Increment account wait count failed: %v", err)
|
||||||
@@ -337,12 +361,15 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
|||||||
log.Printf("Account wait queue full: account=%d", account.ID)
|
log.Printf("Account wait queue full: account=%d", account.ID)
|
||||||
h.handleStreamingAwareError(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later", streamStarted)
|
h.handleStreamingAwareError(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later", streamStarted)
|
||||||
return
|
return
|
||||||
} else {
|
}
|
||||||
// Only set release function if increment succeeded
|
if err == nil && canWait {
|
||||||
accountWaitRelease = func() {
|
accountWaitCounted = true
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if accountWaitCounted {
|
||||||
h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID)
|
h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID)
|
||||||
}
|
}
|
||||||
}
|
}()
|
||||||
|
|
||||||
accountReleaseFunc, err = h.concurrencyHelper.AcquireAccountSlotWithWaitTimeout(
|
accountReleaseFunc, err = h.concurrencyHelper.AcquireAccountSlotWithWaitTimeout(
|
||||||
c,
|
c,
|
||||||
@@ -353,20 +380,20 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
|||||||
&streamStarted,
|
&streamStarted,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if accountWaitRelease != nil {
|
|
||||||
accountWaitRelease()
|
|
||||||
}
|
|
||||||
log.Printf("Account concurrency acquire failed: %v", err)
|
log.Printf("Account concurrency acquire failed: %v", err)
|
||||||
h.handleConcurrencyError(c, err, "account", streamStarted)
|
h.handleConcurrencyError(c, err, "account", streamStarted)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if accountWaitCounted {
|
||||||
|
h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID)
|
||||||
|
accountWaitCounted = false
|
||||||
|
}
|
||||||
if err := h.gatewayService.BindStickySession(c.Request.Context(), apiKey.GroupID, sessionKey, account.ID); err != nil {
|
if err := h.gatewayService.BindStickySession(c.Request.Context(), apiKey.GroupID, sessionKey, account.ID); err != nil {
|
||||||
log.Printf("Bind sticky session failed: %v", err)
|
log.Printf("Bind sticky session failed: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// 账号槽位/等待计数需要在超时或断开时安全回收
|
// 账号槽位/等待计数需要在超时或断开时安全回收
|
||||||
accountReleaseFunc = wrapReleaseOnDone(c.Request.Context(), accountReleaseFunc)
|
accountReleaseFunc = wrapReleaseOnDone(c.Request.Context(), accountReleaseFunc)
|
||||||
accountWaitRelease = wrapReleaseOnDone(c.Request.Context(), accountWaitRelease)
|
|
||||||
|
|
||||||
// 转发请求 - 根据账号平台分流
|
// 转发请求 - 根据账号平台分流
|
||||||
var result *service.ForwardResult
|
var result *service.ForwardResult
|
||||||
@@ -378,9 +405,6 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
|||||||
if accountReleaseFunc != nil {
|
if accountReleaseFunc != nil {
|
||||||
accountReleaseFunc()
|
accountReleaseFunc()
|
||||||
}
|
}
|
||||||
if accountWaitRelease != nil {
|
|
||||||
accountWaitRelease()
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
var failoverErr *service.UpstreamFailoverError
|
var failoverErr *service.UpstreamFailoverError
|
||||||
if errors.As(err, &failoverErr) {
|
if errors.As(err, &failoverErr) {
|
||||||
@@ -400,8 +424,12 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 捕获请求信息(用于异步记录,避免在 goroutine 中访问 gin.Context)
|
||||||
|
userAgent := c.GetHeader("User-Agent")
|
||||||
|
clientIP := ip.GetClientIP(c)
|
||||||
|
|
||||||
// 异步记录使用量(subscription已在函数开头获取)
|
// 异步记录使用量(subscription已在函数开头获取)
|
||||||
go func(result *service.ForwardResult, usedAccount *service.Account, ua string) {
|
go func(result *service.ForwardResult, usedAccount *service.Account, ua, clientIP string) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
if err := h.gatewayService.RecordUsage(ctx, &service.RecordUsageInput{
|
if err := h.gatewayService.RecordUsage(ctx, &service.RecordUsageInput{
|
||||||
@@ -411,10 +439,11 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
|||||||
Account: usedAccount,
|
Account: usedAccount,
|
||||||
Subscription: subscription,
|
Subscription: subscription,
|
||||||
UserAgent: ua,
|
UserAgent: ua,
|
||||||
|
IPAddress: clientIP,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
log.Printf("Record usage failed: %v", err)
|
log.Printf("Record usage failed: %v", err)
|
||||||
}
|
}
|
||||||
}(result, account, userAgent)
|
}(result, account, userAgent, clientIP)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -680,21 +709,22 @@ func (h *GatewayHandler) CountTokens(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
setOpsRequestContext(c, "", false, body)
|
||||||
|
|
||||||
parsedReq, err := service.ParseGatewayRequest(body)
|
parsedReq, err := service.ParseGatewayRequest(body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "Failed to parse request body")
|
h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "Failed to parse request body")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// 设置 Claude Code 客户端标识到 context(用于分组限制检查)
|
|
||||||
SetClaudeCodeClientContext(c, body)
|
|
||||||
|
|
||||||
// 验证 model 必填
|
// 验证 model 必填
|
||||||
if parsedReq.Model == "" {
|
if parsedReq.Model == "" {
|
||||||
h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "model is required")
|
h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "model is required")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
setOpsRequestContext(c, parsedReq.Model, parsedReq.Stream, body)
|
||||||
|
|
||||||
// 获取订阅信息(可能为nil)
|
// 获取订阅信息(可能为nil)
|
||||||
subscription, _ := middleware2.GetSubscriptionFromContext(c)
|
subscription, _ := middleware2.GetSubscriptionFromContext(c)
|
||||||
|
|
||||||
@@ -715,6 +745,7 @@ func (h *GatewayHandler) CountTokens(c *gin.Context) {
|
|||||||
h.errorResponse(c, http.StatusServiceUnavailable, "api_error", "No available accounts: "+err.Error())
|
h.errorResponse(c, http.StatusServiceUnavailable, "api_error", "No available accounts: "+err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
setOpsSelectedAccount(c, account.ID)
|
||||||
|
|
||||||
// 转发请求(不记录使用量)
|
// 转发请求(不记录使用量)
|
||||||
if err := h.gatewayService.ForwardCountTokens(c.Request.Context(), c, account, parsedReq); err != nil {
|
if err := h.gatewayService.ForwardCountTokens(c.Request.Context(), c, account, parsedReq); err != nil {
|
||||||
|
|||||||
@@ -161,25 +161,32 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
setOpsRequestContext(c, modelName, stream, body)
|
||||||
|
|
||||||
// Get subscription (may be nil)
|
// Get subscription (may be nil)
|
||||||
subscription, _ := middleware.GetSubscriptionFromContext(c)
|
subscription, _ := middleware.GetSubscriptionFromContext(c)
|
||||||
|
|
||||||
// 获取 User-Agent
|
|
||||||
userAgent := c.Request.UserAgent()
|
|
||||||
|
|
||||||
// For Gemini native API, do not send Claude-style ping frames.
|
// For Gemini native API, do not send Claude-style ping frames.
|
||||||
geminiConcurrency := NewConcurrencyHelper(h.concurrencyHelper.concurrencyService, SSEPingFormatNone, 0)
|
geminiConcurrency := NewConcurrencyHelper(h.concurrencyHelper.concurrencyService, SSEPingFormatNone, 0)
|
||||||
|
|
||||||
// 0) wait queue check
|
// 0) wait queue check
|
||||||
maxWait := service.CalculateMaxWait(authSubject.Concurrency)
|
maxWait := service.CalculateMaxWait(authSubject.Concurrency)
|
||||||
canWait, err := geminiConcurrency.IncrementWaitCount(c.Request.Context(), authSubject.UserID, maxWait)
|
canWait, err := geminiConcurrency.IncrementWaitCount(c.Request.Context(), authSubject.UserID, maxWait)
|
||||||
|
waitCounted := false
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Increment wait count failed: %v", err)
|
log.Printf("Increment wait count failed: %v", err)
|
||||||
} else if !canWait {
|
} else if !canWait {
|
||||||
googleError(c, http.StatusTooManyRequests, "Too many pending requests, please retry later")
|
googleError(c, http.StatusTooManyRequests, "Too many pending requests, please retry later")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer geminiConcurrency.DecrementWaitCount(c.Request.Context(), authSubject.UserID)
|
if err == nil && canWait {
|
||||||
|
waitCounted = true
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if waitCounted {
|
||||||
|
geminiConcurrency.DecrementWaitCount(c.Request.Context(), authSubject.UserID)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
// 1) user concurrency slot
|
// 1) user concurrency slot
|
||||||
streamStarted := false
|
streamStarted := false
|
||||||
@@ -188,6 +195,10 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) {
|
|||||||
googleError(c, http.StatusTooManyRequests, err.Error())
|
googleError(c, http.StatusTooManyRequests, err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if waitCounted {
|
||||||
|
geminiConcurrency.DecrementWaitCount(c.Request.Context(), authSubject.UserID)
|
||||||
|
waitCounted = false
|
||||||
|
}
|
||||||
// 确保请求取消时也会释放槽位,避免长连接被动中断造成泄漏
|
// 确保请求取消时也会释放槽位,避免长连接被动中断造成泄漏
|
||||||
userReleaseFunc = wrapReleaseOnDone(c.Request.Context(), userReleaseFunc)
|
userReleaseFunc = wrapReleaseOnDone(c.Request.Context(), userReleaseFunc)
|
||||||
if userReleaseFunc != nil {
|
if userReleaseFunc != nil {
|
||||||
@@ -203,10 +214,6 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) {
|
|||||||
|
|
||||||
// 3) select account (sticky session based on request body)
|
// 3) select account (sticky session based on request body)
|
||||||
parsedReq, _ := service.ParseGatewayRequest(body)
|
parsedReq, _ := service.ParseGatewayRequest(body)
|
||||||
|
|
||||||
// 设置 Claude Code 客户端标识到 context(用于分组限制检查)
|
|
||||||
SetClaudeCodeClientContext(c, body)
|
|
||||||
|
|
||||||
sessionHash := h.gatewayService.GenerateSessionHash(parsedReq)
|
sessionHash := h.gatewayService.GenerateSessionHash(parsedReq)
|
||||||
sessionKey := sessionHash
|
sessionKey := sessionHash
|
||||||
if sessionHash != "" {
|
if sessionHash != "" {
|
||||||
@@ -228,15 +235,16 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
account := selection.Account
|
account := selection.Account
|
||||||
|
setOpsSelectedAccount(c, account.ID)
|
||||||
|
|
||||||
// 4) account concurrency slot
|
// 4) account concurrency slot
|
||||||
accountReleaseFunc := selection.ReleaseFunc
|
accountReleaseFunc := selection.ReleaseFunc
|
||||||
var accountWaitRelease func()
|
|
||||||
if !selection.Acquired {
|
if !selection.Acquired {
|
||||||
if selection.WaitPlan == nil {
|
if selection.WaitPlan == nil {
|
||||||
googleError(c, http.StatusServiceUnavailable, "No available Gemini accounts")
|
googleError(c, http.StatusServiceUnavailable, "No available Gemini accounts")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
accountWaitCounted := false
|
||||||
canWait, err := geminiConcurrency.IncrementAccountWaitCount(c.Request.Context(), account.ID, selection.WaitPlan.MaxWaiting)
|
canWait, err := geminiConcurrency.IncrementAccountWaitCount(c.Request.Context(), account.ID, selection.WaitPlan.MaxWaiting)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Increment account wait count failed: %v", err)
|
log.Printf("Increment account wait count failed: %v", err)
|
||||||
@@ -244,12 +252,15 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) {
|
|||||||
log.Printf("Account wait queue full: account=%d", account.ID)
|
log.Printf("Account wait queue full: account=%d", account.ID)
|
||||||
googleError(c, http.StatusTooManyRequests, "Too many pending requests, please retry later")
|
googleError(c, http.StatusTooManyRequests, "Too many pending requests, please retry later")
|
||||||
return
|
return
|
||||||
} else {
|
}
|
||||||
// Only set release function if increment succeeded
|
if err == nil && canWait {
|
||||||
accountWaitRelease = func() {
|
accountWaitCounted = true
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if accountWaitCounted {
|
||||||
geminiConcurrency.DecrementAccountWaitCount(c.Request.Context(), account.ID)
|
geminiConcurrency.DecrementAccountWaitCount(c.Request.Context(), account.ID)
|
||||||
}
|
}
|
||||||
}
|
}()
|
||||||
|
|
||||||
accountReleaseFunc, err = geminiConcurrency.AcquireAccountSlotWithWaitTimeout(
|
accountReleaseFunc, err = geminiConcurrency.AcquireAccountSlotWithWaitTimeout(
|
||||||
c,
|
c,
|
||||||
@@ -260,19 +271,19 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) {
|
|||||||
&streamStarted,
|
&streamStarted,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if accountWaitRelease != nil {
|
|
||||||
accountWaitRelease()
|
|
||||||
}
|
|
||||||
googleError(c, http.StatusTooManyRequests, err.Error())
|
googleError(c, http.StatusTooManyRequests, err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if accountWaitCounted {
|
||||||
|
geminiConcurrency.DecrementAccountWaitCount(c.Request.Context(), account.ID)
|
||||||
|
accountWaitCounted = false
|
||||||
|
}
|
||||||
if err := h.gatewayService.BindStickySession(c.Request.Context(), apiKey.GroupID, sessionKey, account.ID); err != nil {
|
if err := h.gatewayService.BindStickySession(c.Request.Context(), apiKey.GroupID, sessionKey, account.ID); err != nil {
|
||||||
log.Printf("Bind sticky session failed: %v", err)
|
log.Printf("Bind sticky session failed: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// 账号槽位/等待计数需要在超时或断开时安全回收
|
// 账号槽位/等待计数需要在超时或断开时安全回收
|
||||||
accountReleaseFunc = wrapReleaseOnDone(c.Request.Context(), accountReleaseFunc)
|
accountReleaseFunc = wrapReleaseOnDone(c.Request.Context(), accountReleaseFunc)
|
||||||
accountWaitRelease = wrapReleaseOnDone(c.Request.Context(), accountWaitRelease)
|
|
||||||
|
|
||||||
// 5) forward (根据平台分流)
|
// 5) forward (根据平台分流)
|
||||||
var result *service.ForwardResult
|
var result *service.ForwardResult
|
||||||
@@ -284,9 +295,6 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) {
|
|||||||
if accountReleaseFunc != nil {
|
if accountReleaseFunc != nil {
|
||||||
accountReleaseFunc()
|
accountReleaseFunc()
|
||||||
}
|
}
|
||||||
if accountWaitRelease != nil {
|
|
||||||
accountWaitRelease()
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
var failoverErr *service.UpstreamFailoverError
|
var failoverErr *service.UpstreamFailoverError
|
||||||
if errors.As(err, &failoverErr) {
|
if errors.As(err, &failoverErr) {
|
||||||
@@ -306,8 +314,12 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 捕获请求信息(用于异步记录,避免在 goroutine 中访问 gin.Context)
|
||||||
|
userAgent := c.GetHeader("User-Agent")
|
||||||
|
clientIP := c.ClientIP()
|
||||||
|
|
||||||
// 6) record usage async
|
// 6) record usage async
|
||||||
go func(result *service.ForwardResult, usedAccount *service.Account, ua string) {
|
go func(result *service.ForwardResult, usedAccount *service.Account, ua, ip string) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
if err := h.gatewayService.RecordUsage(ctx, &service.RecordUsageInput{
|
if err := h.gatewayService.RecordUsage(ctx, &service.RecordUsageInput{
|
||||||
@@ -317,10 +329,11 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) {
|
|||||||
Account: usedAccount,
|
Account: usedAccount,
|
||||||
Subscription: subscription,
|
Subscription: subscription,
|
||||||
UserAgent: ua,
|
UserAgent: ua,
|
||||||
|
IPAddress: ip,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
log.Printf("Record usage failed: %v", err)
|
log.Printf("Record usage failed: %v", err)
|
||||||
}
|
}
|
||||||
}(result, account, userAgent)
|
}(result, account, userAgent, clientIP)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,7 +16,9 @@ type AdminHandlers struct {
|
|||||||
AntigravityOAuth *admin.AntigravityOAuthHandler
|
AntigravityOAuth *admin.AntigravityOAuthHandler
|
||||||
Proxy *admin.ProxyHandler
|
Proxy *admin.ProxyHandler
|
||||||
Redeem *admin.RedeemHandler
|
Redeem *admin.RedeemHandler
|
||||||
|
Promo *admin.PromoHandler
|
||||||
Setting *admin.SettingHandler
|
Setting *admin.SettingHandler
|
||||||
|
Ops *admin.OpsHandler
|
||||||
System *admin.SystemHandler
|
System *admin.SystemHandler
|
||||||
Subscription *admin.SubscriptionHandler
|
Subscription *admin.SubscriptionHandler
|
||||||
Usage *admin.UsageHandler
|
Usage *admin.UsageHandler
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Wei-Shaw/sub2api/internal/config"
|
"github.com/Wei-Shaw/sub2api/internal/config"
|
||||||
@@ -75,6 +76,8 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
setOpsRequestContext(c, "", false, body)
|
||||||
|
|
||||||
// Parse request body to map for potential modification
|
// Parse request body to map for potential modification
|
||||||
var reqBody map[string]any
|
var reqBody map[string]any
|
||||||
if err := json.Unmarshal(body, &reqBody); err != nil {
|
if err := json.Unmarshal(body, &reqBody); err != nil {
|
||||||
@@ -92,18 +95,24 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// For non-Codex CLI requests, set default instructions
|
|
||||||
userAgent := c.GetHeader("User-Agent")
|
userAgent := c.GetHeader("User-Agent")
|
||||||
if !openai.IsCodexCLIRequest(userAgent) {
|
if !openai.IsCodexCLIRequest(userAgent) {
|
||||||
reqBody["instructions"] = openai.DefaultInstructions
|
existingInstructions, _ := reqBody["instructions"].(string)
|
||||||
// Re-serialize body
|
if strings.TrimSpace(existingInstructions) == "" {
|
||||||
body, err = json.Marshal(reqBody)
|
if instructions := strings.TrimSpace(service.GetOpenCodeInstructions()); instructions != "" {
|
||||||
if err != nil {
|
reqBody["instructions"] = instructions
|
||||||
h.errorResponse(c, http.StatusInternalServerError, "api_error", "Failed to process request")
|
// Re-serialize body
|
||||||
return
|
body, err = json.Marshal(reqBody)
|
||||||
|
if err != nil {
|
||||||
|
h.errorResponse(c, http.StatusInternalServerError, "api_error", "Failed to process request")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
setOpsRequestContext(c, reqModel, reqStream, body)
|
||||||
|
|
||||||
// Track if we've started streaming (for error handling)
|
// Track if we've started streaming (for error handling)
|
||||||
streamStarted := false
|
streamStarted := false
|
||||||
|
|
||||||
@@ -113,6 +122,7 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) {
|
|||||||
// 0. Check if wait queue is full
|
// 0. Check if wait queue is full
|
||||||
maxWait := service.CalculateMaxWait(subject.Concurrency)
|
maxWait := service.CalculateMaxWait(subject.Concurrency)
|
||||||
canWait, err := h.concurrencyHelper.IncrementWaitCount(c.Request.Context(), subject.UserID, maxWait)
|
canWait, err := h.concurrencyHelper.IncrementWaitCount(c.Request.Context(), subject.UserID, maxWait)
|
||||||
|
waitCounted := false
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Increment wait count failed: %v", err)
|
log.Printf("Increment wait count failed: %v", err)
|
||||||
// On error, allow request to proceed
|
// On error, allow request to proceed
|
||||||
@@ -120,8 +130,14 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) {
|
|||||||
h.errorResponse(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later")
|
h.errorResponse(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Ensure wait count is decremented when function exits
|
if err == nil && canWait {
|
||||||
defer h.concurrencyHelper.DecrementWaitCount(c.Request.Context(), subject.UserID)
|
waitCounted = true
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if waitCounted {
|
||||||
|
h.concurrencyHelper.DecrementWaitCount(c.Request.Context(), subject.UserID)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
// 1. First acquire user concurrency slot
|
// 1. First acquire user concurrency slot
|
||||||
userReleaseFunc, err := h.concurrencyHelper.AcquireUserSlotWithWait(c, subject.UserID, subject.Concurrency, reqStream, &streamStarted)
|
userReleaseFunc, err := h.concurrencyHelper.AcquireUserSlotWithWait(c, subject.UserID, subject.Concurrency, reqStream, &streamStarted)
|
||||||
@@ -130,6 +146,11 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) {
|
|||||||
h.handleConcurrencyError(c, err, "user", streamStarted)
|
h.handleConcurrencyError(c, err, "user", streamStarted)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
// User slot acquired: no longer waiting.
|
||||||
|
if waitCounted {
|
||||||
|
h.concurrencyHelper.DecrementWaitCount(c.Request.Context(), subject.UserID)
|
||||||
|
waitCounted = false
|
||||||
|
}
|
||||||
// 确保请求取消时也会释放槽位,避免长连接被动中断造成泄漏
|
// 确保请求取消时也会释放槽位,避免长连接被动中断造成泄漏
|
||||||
userReleaseFunc = wrapReleaseOnDone(c.Request.Context(), userReleaseFunc)
|
userReleaseFunc = wrapReleaseOnDone(c.Request.Context(), userReleaseFunc)
|
||||||
if userReleaseFunc != nil {
|
if userReleaseFunc != nil {
|
||||||
@@ -167,15 +188,16 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) {
|
|||||||
}
|
}
|
||||||
account := selection.Account
|
account := selection.Account
|
||||||
log.Printf("[OpenAI Handler] Selected account: id=%d name=%s", account.ID, account.Name)
|
log.Printf("[OpenAI Handler] Selected account: id=%d name=%s", account.ID, account.Name)
|
||||||
|
setOpsSelectedAccount(c, account.ID)
|
||||||
|
|
||||||
// 3. Acquire account concurrency slot
|
// 3. Acquire account concurrency slot
|
||||||
accountReleaseFunc := selection.ReleaseFunc
|
accountReleaseFunc := selection.ReleaseFunc
|
||||||
var accountWaitRelease func()
|
|
||||||
if !selection.Acquired {
|
if !selection.Acquired {
|
||||||
if selection.WaitPlan == nil {
|
if selection.WaitPlan == nil {
|
||||||
h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts", streamStarted)
|
h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts", streamStarted)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
accountWaitCounted := false
|
||||||
canWait, err := h.concurrencyHelper.IncrementAccountWaitCount(c.Request.Context(), account.ID, selection.WaitPlan.MaxWaiting)
|
canWait, err := h.concurrencyHelper.IncrementAccountWaitCount(c.Request.Context(), account.ID, selection.WaitPlan.MaxWaiting)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Increment account wait count failed: %v", err)
|
log.Printf("Increment account wait count failed: %v", err)
|
||||||
@@ -183,12 +205,15 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) {
|
|||||||
log.Printf("Account wait queue full: account=%d", account.ID)
|
log.Printf("Account wait queue full: account=%d", account.ID)
|
||||||
h.handleStreamingAwareError(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later", streamStarted)
|
h.handleStreamingAwareError(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later", streamStarted)
|
||||||
return
|
return
|
||||||
} else {
|
}
|
||||||
// Only set release function if increment succeeded
|
if err == nil && canWait {
|
||||||
accountWaitRelease = func() {
|
accountWaitCounted = true
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if accountWaitCounted {
|
||||||
h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID)
|
h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID)
|
||||||
}
|
}
|
||||||
}
|
}()
|
||||||
|
|
||||||
accountReleaseFunc, err = h.concurrencyHelper.AcquireAccountSlotWithWaitTimeout(
|
accountReleaseFunc, err = h.concurrencyHelper.AcquireAccountSlotWithWaitTimeout(
|
||||||
c,
|
c,
|
||||||
@@ -199,29 +224,26 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) {
|
|||||||
&streamStarted,
|
&streamStarted,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if accountWaitRelease != nil {
|
|
||||||
accountWaitRelease()
|
|
||||||
}
|
|
||||||
log.Printf("Account concurrency acquire failed: %v", err)
|
log.Printf("Account concurrency acquire failed: %v", err)
|
||||||
h.handleConcurrencyError(c, err, "account", streamStarted)
|
h.handleConcurrencyError(c, err, "account", streamStarted)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if accountWaitCounted {
|
||||||
|
h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID)
|
||||||
|
accountWaitCounted = false
|
||||||
|
}
|
||||||
if err := h.gatewayService.BindStickySession(c.Request.Context(), apiKey.GroupID, sessionHash, account.ID); err != nil {
|
if err := h.gatewayService.BindStickySession(c.Request.Context(), apiKey.GroupID, sessionHash, account.ID); err != nil {
|
||||||
log.Printf("Bind sticky session failed: %v", err)
|
log.Printf("Bind sticky session failed: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// 账号槽位/等待计数需要在超时或断开时安全回收
|
// 账号槽位/等待计数需要在超时或断开时安全回收
|
||||||
accountReleaseFunc = wrapReleaseOnDone(c.Request.Context(), accountReleaseFunc)
|
accountReleaseFunc = wrapReleaseOnDone(c.Request.Context(), accountReleaseFunc)
|
||||||
accountWaitRelease = wrapReleaseOnDone(c.Request.Context(), accountWaitRelease)
|
|
||||||
|
|
||||||
// Forward request
|
// Forward request
|
||||||
result, err := h.gatewayService.Forward(c.Request.Context(), c, account, body)
|
result, err := h.gatewayService.Forward(c.Request.Context(), c, account, body)
|
||||||
if accountReleaseFunc != nil {
|
if accountReleaseFunc != nil {
|
||||||
accountReleaseFunc()
|
accountReleaseFunc()
|
||||||
}
|
}
|
||||||
if accountWaitRelease != nil {
|
|
||||||
accountWaitRelease()
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
var failoverErr *service.UpstreamFailoverError
|
var failoverErr *service.UpstreamFailoverError
|
||||||
if errors.As(err, &failoverErr) {
|
if errors.As(err, &failoverErr) {
|
||||||
@@ -241,8 +263,12 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 捕获请求信息(用于异步记录,避免在 goroutine 中访问 gin.Context)
|
||||||
|
userAgent := c.GetHeader("User-Agent")
|
||||||
|
clientIP := c.ClientIP()
|
||||||
|
|
||||||
// Async record usage
|
// Async record usage
|
||||||
go func(result *service.OpenAIForwardResult, usedAccount *service.Account, ua string) {
|
go func(result *service.OpenAIForwardResult, usedAccount *service.Account, ua, ip string) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
if err := h.gatewayService.RecordUsage(ctx, &service.OpenAIRecordUsageInput{
|
if err := h.gatewayService.RecordUsage(ctx, &service.OpenAIRecordUsageInput{
|
||||||
@@ -252,10 +278,11 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) {
|
|||||||
Account: usedAccount,
|
Account: usedAccount,
|
||||||
Subscription: subscription,
|
Subscription: subscription,
|
||||||
UserAgent: ua,
|
UserAgent: ua,
|
||||||
|
IPAddress: ip,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
log.Printf("Record usage failed: %v", err)
|
log.Printf("Record usage failed: %v", err)
|
||||||
}
|
}
|
||||||
}(result, account, userAgent)
|
}(result, account, userAgent, clientIP)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
954
backend/internal/handler/ops_error_logger.go
Normal file
954
backend/internal/handler/ops_error_logger.go
Normal file
@@ -0,0 +1,954 @@
|
|||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"log"
|
||||||
|
"runtime"
|
||||||
|
"runtime/debug"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey"
|
||||||
|
middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
opsModelKey = "ops_model"
|
||||||
|
opsStreamKey = "ops_stream"
|
||||||
|
opsRequestBodyKey = "ops_request_body"
|
||||||
|
opsAccountIDKey = "ops_account_id"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
opsErrorLogTimeout = 5 * time.Second
|
||||||
|
opsErrorLogDrainTimeout = 10 * time.Second
|
||||||
|
|
||||||
|
opsErrorLogMinWorkerCount = 4
|
||||||
|
opsErrorLogMaxWorkerCount = 32
|
||||||
|
|
||||||
|
opsErrorLogQueueSizePerWorker = 128
|
||||||
|
opsErrorLogMinQueueSize = 256
|
||||||
|
opsErrorLogMaxQueueSize = 8192
|
||||||
|
)
|
||||||
|
|
||||||
|
type opsErrorLogJob struct {
|
||||||
|
ops *service.OpsService
|
||||||
|
entry *service.OpsInsertErrorLogInput
|
||||||
|
requestBody []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
opsErrorLogOnce sync.Once
|
||||||
|
opsErrorLogQueue chan opsErrorLogJob
|
||||||
|
|
||||||
|
opsErrorLogStopOnce sync.Once
|
||||||
|
opsErrorLogWorkersWg sync.WaitGroup
|
||||||
|
opsErrorLogMu sync.RWMutex
|
||||||
|
opsErrorLogStopping bool
|
||||||
|
opsErrorLogQueueLen atomic.Int64
|
||||||
|
opsErrorLogEnqueued atomic.Int64
|
||||||
|
opsErrorLogDropped atomic.Int64
|
||||||
|
opsErrorLogProcessed atomic.Int64
|
||||||
|
|
||||||
|
opsErrorLogLastDropLogAt atomic.Int64
|
||||||
|
|
||||||
|
opsErrorLogShutdownCh = make(chan struct{})
|
||||||
|
opsErrorLogShutdownOnce sync.Once
|
||||||
|
opsErrorLogDrained atomic.Bool
|
||||||
|
)
|
||||||
|
|
||||||
|
func startOpsErrorLogWorkers() {
|
||||||
|
opsErrorLogMu.Lock()
|
||||||
|
defer opsErrorLogMu.Unlock()
|
||||||
|
|
||||||
|
if opsErrorLogStopping {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
workerCount, queueSize := opsErrorLogConfig()
|
||||||
|
opsErrorLogQueue = make(chan opsErrorLogJob, queueSize)
|
||||||
|
opsErrorLogQueueLen.Store(0)
|
||||||
|
|
||||||
|
opsErrorLogWorkersWg.Add(workerCount)
|
||||||
|
for i := 0; i < workerCount; i++ {
|
||||||
|
go func() {
|
||||||
|
defer opsErrorLogWorkersWg.Done()
|
||||||
|
for job := range opsErrorLogQueue {
|
||||||
|
opsErrorLogQueueLen.Add(-1)
|
||||||
|
if job.ops == nil || job.entry == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
func() {
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
log.Printf("[OpsErrorLogger] worker panic: %v\n%s", r, debug.Stack())
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), opsErrorLogTimeout)
|
||||||
|
_ = job.ops.RecordError(ctx, job.entry, job.requestBody)
|
||||||
|
cancel()
|
||||||
|
opsErrorLogProcessed.Add(1)
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func enqueueOpsErrorLog(ops *service.OpsService, entry *service.OpsInsertErrorLogInput, requestBody []byte) {
|
||||||
|
if ops == nil || entry == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-opsErrorLogShutdownCh:
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
opsErrorLogMu.RLock()
|
||||||
|
stopping := opsErrorLogStopping
|
||||||
|
opsErrorLogMu.RUnlock()
|
||||||
|
if stopping {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
opsErrorLogOnce.Do(startOpsErrorLogWorkers)
|
||||||
|
|
||||||
|
opsErrorLogMu.RLock()
|
||||||
|
defer opsErrorLogMu.RUnlock()
|
||||||
|
if opsErrorLogStopping || opsErrorLogQueue == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case opsErrorLogQueue <- opsErrorLogJob{ops: ops, entry: entry, requestBody: requestBody}:
|
||||||
|
opsErrorLogQueueLen.Add(1)
|
||||||
|
opsErrorLogEnqueued.Add(1)
|
||||||
|
default:
|
||||||
|
// Queue is full; drop to avoid blocking request handling.
|
||||||
|
opsErrorLogDropped.Add(1)
|
||||||
|
maybeLogOpsErrorLogDrop()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func StopOpsErrorLogWorkers() bool {
|
||||||
|
opsErrorLogStopOnce.Do(func() {
|
||||||
|
opsErrorLogShutdownOnce.Do(func() {
|
||||||
|
close(opsErrorLogShutdownCh)
|
||||||
|
})
|
||||||
|
opsErrorLogDrained.Store(stopOpsErrorLogWorkers())
|
||||||
|
})
|
||||||
|
return opsErrorLogDrained.Load()
|
||||||
|
}
|
||||||
|
|
||||||
|
func stopOpsErrorLogWorkers() bool {
|
||||||
|
opsErrorLogMu.Lock()
|
||||||
|
opsErrorLogStopping = true
|
||||||
|
ch := opsErrorLogQueue
|
||||||
|
if ch != nil {
|
||||||
|
close(ch)
|
||||||
|
}
|
||||||
|
opsErrorLogQueue = nil
|
||||||
|
opsErrorLogMu.Unlock()
|
||||||
|
|
||||||
|
if ch == nil {
|
||||||
|
opsErrorLogQueueLen.Store(0)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
done := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
opsErrorLogWorkersWg.Wait()
|
||||||
|
close(done)
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-done:
|
||||||
|
opsErrorLogQueueLen.Store(0)
|
||||||
|
return true
|
||||||
|
case <-time.After(opsErrorLogDrainTimeout):
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func OpsErrorLogQueueLength() int64 {
|
||||||
|
return opsErrorLogQueueLen.Load()
|
||||||
|
}
|
||||||
|
|
||||||
|
func OpsErrorLogQueueCapacity() int {
|
||||||
|
opsErrorLogMu.RLock()
|
||||||
|
ch := opsErrorLogQueue
|
||||||
|
opsErrorLogMu.RUnlock()
|
||||||
|
if ch == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return cap(ch)
|
||||||
|
}
|
||||||
|
|
||||||
|
func OpsErrorLogDroppedTotal() int64 {
|
||||||
|
return opsErrorLogDropped.Load()
|
||||||
|
}
|
||||||
|
|
||||||
|
func OpsErrorLogEnqueuedTotal() int64 {
|
||||||
|
return opsErrorLogEnqueued.Load()
|
||||||
|
}
|
||||||
|
|
||||||
|
func OpsErrorLogProcessedTotal() int64 {
|
||||||
|
return opsErrorLogProcessed.Load()
|
||||||
|
}
|
||||||
|
|
||||||
|
func maybeLogOpsErrorLogDrop() {
|
||||||
|
now := time.Now().Unix()
|
||||||
|
|
||||||
|
for {
|
||||||
|
last := opsErrorLogLastDropLogAt.Load()
|
||||||
|
if last != 0 && now-last < 60 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if opsErrorLogLastDropLogAt.CompareAndSwap(last, now) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
queued := opsErrorLogQueueLen.Load()
|
||||||
|
queueCap := OpsErrorLogQueueCapacity()
|
||||||
|
|
||||||
|
log.Printf(
|
||||||
|
"[OpsErrorLogger] queue is full; dropping logs (queued=%d cap=%d enqueued_total=%d dropped_total=%d processed_total=%d)",
|
||||||
|
queued,
|
||||||
|
queueCap,
|
||||||
|
opsErrorLogEnqueued.Load(),
|
||||||
|
opsErrorLogDropped.Load(),
|
||||||
|
opsErrorLogProcessed.Load(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func opsErrorLogConfig() (workerCount int, queueSize int) {
|
||||||
|
workerCount = runtime.GOMAXPROCS(0) * 2
|
||||||
|
if workerCount < opsErrorLogMinWorkerCount {
|
||||||
|
workerCount = opsErrorLogMinWorkerCount
|
||||||
|
}
|
||||||
|
if workerCount > opsErrorLogMaxWorkerCount {
|
||||||
|
workerCount = opsErrorLogMaxWorkerCount
|
||||||
|
}
|
||||||
|
|
||||||
|
queueSize = workerCount * opsErrorLogQueueSizePerWorker
|
||||||
|
if queueSize < opsErrorLogMinQueueSize {
|
||||||
|
queueSize = opsErrorLogMinQueueSize
|
||||||
|
}
|
||||||
|
if queueSize > opsErrorLogMaxQueueSize {
|
||||||
|
queueSize = opsErrorLogMaxQueueSize
|
||||||
|
}
|
||||||
|
|
||||||
|
return workerCount, queueSize
|
||||||
|
}
|
||||||
|
|
||||||
|
func setOpsRequestContext(c *gin.Context, model string, stream bool, requestBody []byte) {
|
||||||
|
if c == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.Set(opsModelKey, model)
|
||||||
|
c.Set(opsStreamKey, stream)
|
||||||
|
if len(requestBody) > 0 {
|
||||||
|
c.Set(opsRequestBodyKey, requestBody)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func setOpsSelectedAccount(c *gin.Context, accountID int64) {
|
||||||
|
if c == nil || accountID <= 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.Set(opsAccountIDKey, accountID)
|
||||||
|
}
|
||||||
|
|
||||||
|
type opsCaptureWriter struct {
|
||||||
|
gin.ResponseWriter
|
||||||
|
limit int
|
||||||
|
buf bytes.Buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *opsCaptureWriter) Write(b []byte) (int, error) {
|
||||||
|
if w.Status() >= 400 && w.limit > 0 && w.buf.Len() < w.limit {
|
||||||
|
remaining := w.limit - w.buf.Len()
|
||||||
|
if len(b) > remaining {
|
||||||
|
_, _ = w.buf.Write(b[:remaining])
|
||||||
|
} else {
|
||||||
|
_, _ = w.buf.Write(b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return w.ResponseWriter.Write(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *opsCaptureWriter) WriteString(s string) (int, error) {
|
||||||
|
if w.Status() >= 400 && w.limit > 0 && w.buf.Len() < w.limit {
|
||||||
|
remaining := w.limit - w.buf.Len()
|
||||||
|
if len(s) > remaining {
|
||||||
|
_, _ = w.buf.WriteString(s[:remaining])
|
||||||
|
} else {
|
||||||
|
_, _ = w.buf.WriteString(s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return w.ResponseWriter.WriteString(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpsErrorLoggerMiddleware records error responses (status >= 400) into ops_error_logs.
|
||||||
|
//
|
||||||
|
// Notes:
|
||||||
|
// - It buffers response bodies only when status >= 400 to avoid overhead for successful traffic.
|
||||||
|
// - Streaming errors after the response has started (SSE) may still need explicit logging.
|
||||||
|
func OpsErrorLoggerMiddleware(ops *service.OpsService) gin.HandlerFunc {
|
||||||
|
return func(c *gin.Context) {
|
||||||
|
w := &opsCaptureWriter{ResponseWriter: c.Writer, limit: 64 * 1024}
|
||||||
|
c.Writer = w
|
||||||
|
c.Next()
|
||||||
|
|
||||||
|
if ops == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !ops.IsMonitoringEnabled(c.Request.Context()) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
status := c.Writer.Status()
|
||||||
|
if status < 400 {
|
||||||
|
// Even when the client request succeeds, we still want to persist upstream error attempts
|
||||||
|
// (retries/failover) so ops can observe upstream instability that gets "covered" by retries.
|
||||||
|
var events []*service.OpsUpstreamErrorEvent
|
||||||
|
if v, ok := c.Get(service.OpsUpstreamErrorsKey); ok {
|
||||||
|
if arr, ok := v.([]*service.OpsUpstreamErrorEvent); ok && len(arr) > 0 {
|
||||||
|
events = arr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Also accept single upstream fields set by gateway services (rare for successful requests).
|
||||||
|
hasUpstreamContext := len(events) > 0
|
||||||
|
if !hasUpstreamContext {
|
||||||
|
if v, ok := c.Get(service.OpsUpstreamStatusCodeKey); ok {
|
||||||
|
switch t := v.(type) {
|
||||||
|
case int:
|
||||||
|
hasUpstreamContext = t > 0
|
||||||
|
case int64:
|
||||||
|
hasUpstreamContext = t > 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !hasUpstreamContext {
|
||||||
|
if v, ok := c.Get(service.OpsUpstreamErrorMessageKey); ok {
|
||||||
|
if s, ok := v.(string); ok && strings.TrimSpace(s) != "" {
|
||||||
|
hasUpstreamContext = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !hasUpstreamContext {
|
||||||
|
if v, ok := c.Get(service.OpsUpstreamErrorDetailKey); ok {
|
||||||
|
if s, ok := v.(string); ok && strings.TrimSpace(s) != "" {
|
||||||
|
hasUpstreamContext = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !hasUpstreamContext {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
apiKey, _ := middleware2.GetAPIKeyFromContext(c)
|
||||||
|
clientRequestID, _ := c.Request.Context().Value(ctxkey.ClientRequestID).(string)
|
||||||
|
|
||||||
|
model, _ := c.Get(opsModelKey)
|
||||||
|
streamV, _ := c.Get(opsStreamKey)
|
||||||
|
accountIDV, _ := c.Get(opsAccountIDKey)
|
||||||
|
|
||||||
|
var modelName string
|
||||||
|
if s, ok := model.(string); ok {
|
||||||
|
modelName = s
|
||||||
|
}
|
||||||
|
stream := false
|
||||||
|
if b, ok := streamV.(bool); ok {
|
||||||
|
stream = b
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prefer showing the account that experienced the upstream error (if we have events),
|
||||||
|
// otherwise fall back to the final selected account (best-effort).
|
||||||
|
var accountID *int64
|
||||||
|
if len(events) > 0 {
|
||||||
|
if last := events[len(events)-1]; last != nil && last.AccountID > 0 {
|
||||||
|
v := last.AccountID
|
||||||
|
accountID = &v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if accountID == nil {
|
||||||
|
if v, ok := accountIDV.(int64); ok && v > 0 {
|
||||||
|
accountID = &v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fallbackPlatform := guessPlatformFromPath(c.Request.URL.Path)
|
||||||
|
platform := resolveOpsPlatform(apiKey, fallbackPlatform)
|
||||||
|
|
||||||
|
requestID := c.Writer.Header().Get("X-Request-Id")
|
||||||
|
if requestID == "" {
|
||||||
|
requestID = c.Writer.Header().Get("x-request-id")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Best-effort backfill single upstream fields from the last event (if present).
|
||||||
|
var upstreamStatusCode *int
|
||||||
|
var upstreamErrorMessage *string
|
||||||
|
var upstreamErrorDetail *string
|
||||||
|
if len(events) > 0 {
|
||||||
|
last := events[len(events)-1]
|
||||||
|
if last != nil {
|
||||||
|
if last.UpstreamStatusCode > 0 {
|
||||||
|
code := last.UpstreamStatusCode
|
||||||
|
upstreamStatusCode = &code
|
||||||
|
}
|
||||||
|
if msg := strings.TrimSpace(last.Message); msg != "" {
|
||||||
|
upstreamErrorMessage = &msg
|
||||||
|
}
|
||||||
|
if detail := strings.TrimSpace(last.Detail); detail != "" {
|
||||||
|
upstreamErrorDetail = &detail
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if upstreamStatusCode == nil {
|
||||||
|
if v, ok := c.Get(service.OpsUpstreamStatusCodeKey); ok {
|
||||||
|
switch t := v.(type) {
|
||||||
|
case int:
|
||||||
|
if t > 0 {
|
||||||
|
code := t
|
||||||
|
upstreamStatusCode = &code
|
||||||
|
}
|
||||||
|
case int64:
|
||||||
|
if t > 0 {
|
||||||
|
code := int(t)
|
||||||
|
upstreamStatusCode = &code
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if upstreamErrorMessage == nil {
|
||||||
|
if v, ok := c.Get(service.OpsUpstreamErrorMessageKey); ok {
|
||||||
|
if s, ok := v.(string); ok && strings.TrimSpace(s) != "" {
|
||||||
|
msg := strings.TrimSpace(s)
|
||||||
|
upstreamErrorMessage = &msg
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if upstreamErrorDetail == nil {
|
||||||
|
if v, ok := c.Get(service.OpsUpstreamErrorDetailKey); ok {
|
||||||
|
if s, ok := v.(string); ok && strings.TrimSpace(s) != "" {
|
||||||
|
detail := strings.TrimSpace(s)
|
||||||
|
upstreamErrorDetail = &detail
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we still have nothing meaningful, skip.
|
||||||
|
if upstreamStatusCode == nil && upstreamErrorMessage == nil && upstreamErrorDetail == nil && len(events) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
effectiveUpstreamStatus := 0
|
||||||
|
if upstreamStatusCode != nil {
|
||||||
|
effectiveUpstreamStatus = *upstreamStatusCode
|
||||||
|
}
|
||||||
|
|
||||||
|
recoveredMsg := "Recovered upstream error"
|
||||||
|
if effectiveUpstreamStatus > 0 {
|
||||||
|
recoveredMsg += " " + strconvItoa(effectiveUpstreamStatus)
|
||||||
|
}
|
||||||
|
if upstreamErrorMessage != nil && strings.TrimSpace(*upstreamErrorMessage) != "" {
|
||||||
|
recoveredMsg += ": " + strings.TrimSpace(*upstreamErrorMessage)
|
||||||
|
}
|
||||||
|
recoveredMsg = truncateString(recoveredMsg, 2048)
|
||||||
|
|
||||||
|
entry := &service.OpsInsertErrorLogInput{
|
||||||
|
RequestID: requestID,
|
||||||
|
ClientRequestID: clientRequestID,
|
||||||
|
|
||||||
|
AccountID: accountID,
|
||||||
|
Platform: platform,
|
||||||
|
Model: modelName,
|
||||||
|
RequestPath: func() string {
|
||||||
|
if c.Request != nil && c.Request.URL != nil {
|
||||||
|
return c.Request.URL.Path
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}(),
|
||||||
|
Stream: stream,
|
||||||
|
UserAgent: c.GetHeader("User-Agent"),
|
||||||
|
|
||||||
|
ErrorPhase: "upstream",
|
||||||
|
ErrorType: "upstream_error",
|
||||||
|
// Severity/retryability should reflect the upstream failure, not the final client status (200).
|
||||||
|
Severity: classifyOpsSeverity("upstream_error", effectiveUpstreamStatus),
|
||||||
|
StatusCode: status,
|
||||||
|
IsBusinessLimited: false,
|
||||||
|
|
||||||
|
ErrorMessage: recoveredMsg,
|
||||||
|
ErrorBody: "",
|
||||||
|
|
||||||
|
ErrorSource: "upstream_http",
|
||||||
|
ErrorOwner: "provider",
|
||||||
|
|
||||||
|
UpstreamStatusCode: upstreamStatusCode,
|
||||||
|
UpstreamErrorMessage: upstreamErrorMessage,
|
||||||
|
UpstreamErrorDetail: upstreamErrorDetail,
|
||||||
|
UpstreamErrors: events,
|
||||||
|
|
||||||
|
IsRetryable: classifyOpsIsRetryable("upstream_error", effectiveUpstreamStatus),
|
||||||
|
RetryCount: 0,
|
||||||
|
CreatedAt: time.Now(),
|
||||||
|
}
|
||||||
|
|
||||||
|
if apiKey != nil {
|
||||||
|
entry.APIKeyID = &apiKey.ID
|
||||||
|
if apiKey.User != nil {
|
||||||
|
entry.UserID = &apiKey.User.ID
|
||||||
|
}
|
||||||
|
if apiKey.GroupID != nil {
|
||||||
|
entry.GroupID = apiKey.GroupID
|
||||||
|
}
|
||||||
|
// Prefer group platform if present (more stable than inferring from path).
|
||||||
|
if apiKey.Group != nil && apiKey.Group.Platform != "" {
|
||||||
|
entry.Platform = apiKey.Group.Platform
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var clientIP string
|
||||||
|
if ip := strings.TrimSpace(c.ClientIP()); ip != "" {
|
||||||
|
clientIP = ip
|
||||||
|
entry.ClientIP = &clientIP
|
||||||
|
}
|
||||||
|
|
||||||
|
var requestBody []byte
|
||||||
|
if v, ok := c.Get(opsRequestBodyKey); ok {
|
||||||
|
if b, ok := v.([]byte); ok && len(b) > 0 {
|
||||||
|
requestBody = b
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Store request headers/body only when an upstream error occurred to keep overhead minimal.
|
||||||
|
entry.RequestHeadersJSON = extractOpsRetryRequestHeaders(c)
|
||||||
|
|
||||||
|
enqueueOpsErrorLog(ops, entry, requestBody)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
body := w.buf.Bytes()
|
||||||
|
parsed := parseOpsErrorResponse(body)
|
||||||
|
|
||||||
|
apiKey, _ := middleware2.GetAPIKeyFromContext(c)
|
||||||
|
|
||||||
|
clientRequestID, _ := c.Request.Context().Value(ctxkey.ClientRequestID).(string)
|
||||||
|
|
||||||
|
model, _ := c.Get(opsModelKey)
|
||||||
|
streamV, _ := c.Get(opsStreamKey)
|
||||||
|
accountIDV, _ := c.Get(opsAccountIDKey)
|
||||||
|
|
||||||
|
var modelName string
|
||||||
|
if s, ok := model.(string); ok {
|
||||||
|
modelName = s
|
||||||
|
}
|
||||||
|
stream := false
|
||||||
|
if b, ok := streamV.(bool); ok {
|
||||||
|
stream = b
|
||||||
|
}
|
||||||
|
var accountID *int64
|
||||||
|
if v, ok := accountIDV.(int64); ok && v > 0 {
|
||||||
|
accountID = &v
|
||||||
|
}
|
||||||
|
|
||||||
|
fallbackPlatform := guessPlatformFromPath(c.Request.URL.Path)
|
||||||
|
platform := resolveOpsPlatform(apiKey, fallbackPlatform)
|
||||||
|
|
||||||
|
requestID := c.Writer.Header().Get("X-Request-Id")
|
||||||
|
if requestID == "" {
|
||||||
|
requestID = c.Writer.Header().Get("x-request-id")
|
||||||
|
}
|
||||||
|
|
||||||
|
phase := classifyOpsPhase(parsed.ErrorType, parsed.Message, parsed.Code)
|
||||||
|
isBusinessLimited := classifyOpsIsBusinessLimited(parsed.ErrorType, phase, parsed.Code, status, parsed.Message)
|
||||||
|
|
||||||
|
errorOwner := classifyOpsErrorOwner(phase, parsed.Message)
|
||||||
|
errorSource := classifyOpsErrorSource(phase, parsed.Message)
|
||||||
|
|
||||||
|
entry := &service.OpsInsertErrorLogInput{
|
||||||
|
RequestID: requestID,
|
||||||
|
ClientRequestID: clientRequestID,
|
||||||
|
|
||||||
|
AccountID: accountID,
|
||||||
|
Platform: platform,
|
||||||
|
Model: modelName,
|
||||||
|
RequestPath: func() string {
|
||||||
|
if c.Request != nil && c.Request.URL != nil {
|
||||||
|
return c.Request.URL.Path
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}(),
|
||||||
|
Stream: stream,
|
||||||
|
UserAgent: c.GetHeader("User-Agent"),
|
||||||
|
|
||||||
|
ErrorPhase: phase,
|
||||||
|
ErrorType: normalizeOpsErrorType(parsed.ErrorType, parsed.Code),
|
||||||
|
Severity: classifyOpsSeverity(parsed.ErrorType, status),
|
||||||
|
StatusCode: status,
|
||||||
|
IsBusinessLimited: isBusinessLimited,
|
||||||
|
|
||||||
|
ErrorMessage: parsed.Message,
|
||||||
|
// Keep the full captured error body (capture is already capped at 64KB) so the
|
||||||
|
// service layer can sanitize JSON before truncating for storage.
|
||||||
|
ErrorBody: string(body),
|
||||||
|
ErrorSource: errorSource,
|
||||||
|
ErrorOwner: errorOwner,
|
||||||
|
|
||||||
|
IsRetryable: classifyOpsIsRetryable(parsed.ErrorType, status),
|
||||||
|
RetryCount: 0,
|
||||||
|
CreatedAt: time.Now(),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Capture upstream error context set by gateway services (if present).
|
||||||
|
// This does NOT affect the client response; it enriches Ops troubleshooting data.
|
||||||
|
{
|
||||||
|
if v, ok := c.Get(service.OpsUpstreamStatusCodeKey); ok {
|
||||||
|
switch t := v.(type) {
|
||||||
|
case int:
|
||||||
|
if t > 0 {
|
||||||
|
code := t
|
||||||
|
entry.UpstreamStatusCode = &code
|
||||||
|
}
|
||||||
|
case int64:
|
||||||
|
if t > 0 {
|
||||||
|
code := int(t)
|
||||||
|
entry.UpstreamStatusCode = &code
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := c.Get(service.OpsUpstreamErrorMessageKey); ok {
|
||||||
|
if s, ok := v.(string); ok {
|
||||||
|
if msg := strings.TrimSpace(s); msg != "" {
|
||||||
|
entry.UpstreamErrorMessage = &msg
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := c.Get(service.OpsUpstreamErrorDetailKey); ok {
|
||||||
|
if s, ok := v.(string); ok {
|
||||||
|
if detail := strings.TrimSpace(s); detail != "" {
|
||||||
|
entry.UpstreamErrorDetail = &detail
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := c.Get(service.OpsUpstreamErrorsKey); ok {
|
||||||
|
if events, ok := v.([]*service.OpsUpstreamErrorEvent); ok && len(events) > 0 {
|
||||||
|
entry.UpstreamErrors = events
|
||||||
|
// Best-effort backfill the single upstream fields from the last event when missing.
|
||||||
|
last := events[len(events)-1]
|
||||||
|
if last != nil {
|
||||||
|
if entry.UpstreamStatusCode == nil && last.UpstreamStatusCode > 0 {
|
||||||
|
code := last.UpstreamStatusCode
|
||||||
|
entry.UpstreamStatusCode = &code
|
||||||
|
}
|
||||||
|
if entry.UpstreamErrorMessage == nil && strings.TrimSpace(last.Message) != "" {
|
||||||
|
msg := strings.TrimSpace(last.Message)
|
||||||
|
entry.UpstreamErrorMessage = &msg
|
||||||
|
}
|
||||||
|
if entry.UpstreamErrorDetail == nil && strings.TrimSpace(last.Detail) != "" {
|
||||||
|
detail := strings.TrimSpace(last.Detail)
|
||||||
|
entry.UpstreamErrorDetail = &detail
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if apiKey != nil {
|
||||||
|
entry.APIKeyID = &apiKey.ID
|
||||||
|
if apiKey.User != nil {
|
||||||
|
entry.UserID = &apiKey.User.ID
|
||||||
|
}
|
||||||
|
if apiKey.GroupID != nil {
|
||||||
|
entry.GroupID = apiKey.GroupID
|
||||||
|
}
|
||||||
|
// Prefer group platform if present (more stable than inferring from path).
|
||||||
|
if apiKey.Group != nil && apiKey.Group.Platform != "" {
|
||||||
|
entry.Platform = apiKey.Group.Platform
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var clientIP string
|
||||||
|
if ip := strings.TrimSpace(c.ClientIP()); ip != "" {
|
||||||
|
clientIP = ip
|
||||||
|
entry.ClientIP = &clientIP
|
||||||
|
}
|
||||||
|
|
||||||
|
var requestBody []byte
|
||||||
|
if v, ok := c.Get(opsRequestBodyKey); ok {
|
||||||
|
if b, ok := v.([]byte); ok && len(b) > 0 {
|
||||||
|
requestBody = b
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Persist only a minimal, whitelisted set of request headers to improve retry fidelity.
|
||||||
|
// Do NOT store Authorization/Cookie/etc.
|
||||||
|
entry.RequestHeadersJSON = extractOpsRetryRequestHeaders(c)
|
||||||
|
|
||||||
|
enqueueOpsErrorLog(ops, entry, requestBody)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var opsRetryRequestHeaderAllowlist = []string{
|
||||||
|
"anthropic-beta",
|
||||||
|
"anthropic-version",
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractOpsRetryRequestHeaders(c *gin.Context) *string {
|
||||||
|
if c == nil || c.Request == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
headers := make(map[string]string, 4)
|
||||||
|
for _, key := range opsRetryRequestHeaderAllowlist {
|
||||||
|
v := strings.TrimSpace(c.GetHeader(key))
|
||||||
|
if v == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Keep headers small even if a client sends something unexpected.
|
||||||
|
headers[key] = truncateString(v, 512)
|
||||||
|
}
|
||||||
|
if len(headers) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
raw, err := json.Marshal(headers)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
s := string(raw)
|
||||||
|
return &s
|
||||||
|
}
|
||||||
|
|
||||||
|
type parsedOpsError struct {
|
||||||
|
ErrorType string
|
||||||
|
Message string
|
||||||
|
Code string
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseOpsErrorResponse(body []byte) parsedOpsError {
|
||||||
|
if len(body) == 0 {
|
||||||
|
return parsedOpsError{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fast path: attempt to decode into a generic map.
|
||||||
|
var m map[string]any
|
||||||
|
if err := json.Unmarshal(body, &m); err != nil {
|
||||||
|
return parsedOpsError{Message: truncateString(string(body), 1024)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Claude/OpenAI-style gateway error: { type:"error", error:{ type, message } }
|
||||||
|
if errObj, ok := m["error"].(map[string]any); ok {
|
||||||
|
t, _ := errObj["type"].(string)
|
||||||
|
msg, _ := errObj["message"].(string)
|
||||||
|
// Gemini googleError also uses "error": { code, message, status }
|
||||||
|
if msg == "" {
|
||||||
|
if v, ok := errObj["message"]; ok {
|
||||||
|
msg, _ = v.(string)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if t == "" {
|
||||||
|
// Gemini error does not have "type" field.
|
||||||
|
t = "api_error"
|
||||||
|
}
|
||||||
|
// For gemini error, capture numeric code as string for business-limited mapping if needed.
|
||||||
|
var code string
|
||||||
|
if v, ok := errObj["code"]; ok {
|
||||||
|
switch n := v.(type) {
|
||||||
|
case float64:
|
||||||
|
code = strconvItoa(int(n))
|
||||||
|
case int:
|
||||||
|
code = strconvItoa(n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return parsedOpsError{ErrorType: t, Message: msg, Code: code}
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIKeyAuth-style: { code:"INSUFFICIENT_BALANCE", message:"..." }
|
||||||
|
code, _ := m["code"].(string)
|
||||||
|
msg, _ := m["message"].(string)
|
||||||
|
if code != "" || msg != "" {
|
||||||
|
return parsedOpsError{ErrorType: "api_error", Message: msg, Code: code}
|
||||||
|
}
|
||||||
|
|
||||||
|
return parsedOpsError{Message: truncateString(string(body), 1024)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resolveOpsPlatform(apiKey *service.APIKey, fallback string) string {
|
||||||
|
if apiKey != nil && apiKey.Group != nil && apiKey.Group.Platform != "" {
|
||||||
|
return apiKey.Group.Platform
|
||||||
|
}
|
||||||
|
return fallback
|
||||||
|
}
|
||||||
|
|
||||||
|
func guessPlatformFromPath(path string) string {
|
||||||
|
p := strings.ToLower(path)
|
||||||
|
switch {
|
||||||
|
case strings.HasPrefix(p, "/antigravity/"):
|
||||||
|
return service.PlatformAntigravity
|
||||||
|
case strings.HasPrefix(p, "/v1beta/"):
|
||||||
|
return service.PlatformGemini
|
||||||
|
case strings.Contains(p, "/responses"):
|
||||||
|
return service.PlatformOpenAI
|
||||||
|
default:
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func normalizeOpsErrorType(errType string, code string) string {
|
||||||
|
if errType != "" {
|
||||||
|
return errType
|
||||||
|
}
|
||||||
|
switch strings.TrimSpace(code) {
|
||||||
|
case "INSUFFICIENT_BALANCE":
|
||||||
|
return "billing_error"
|
||||||
|
case "USAGE_LIMIT_EXCEEDED", "SUBSCRIPTION_NOT_FOUND", "SUBSCRIPTION_INVALID":
|
||||||
|
return "subscription_error"
|
||||||
|
default:
|
||||||
|
return "api_error"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func classifyOpsPhase(errType, message, code string) string {
|
||||||
|
msg := strings.ToLower(message)
|
||||||
|
switch strings.TrimSpace(code) {
|
||||||
|
case "INSUFFICIENT_BALANCE", "USAGE_LIMIT_EXCEEDED", "SUBSCRIPTION_NOT_FOUND", "SUBSCRIPTION_INVALID":
|
||||||
|
return "billing"
|
||||||
|
}
|
||||||
|
|
||||||
|
switch errType {
|
||||||
|
case "authentication_error":
|
||||||
|
return "auth"
|
||||||
|
case "billing_error", "subscription_error":
|
||||||
|
return "billing"
|
||||||
|
case "rate_limit_error":
|
||||||
|
if strings.Contains(msg, "concurrency") || strings.Contains(msg, "pending") || strings.Contains(msg, "queue") {
|
||||||
|
return "concurrency"
|
||||||
|
}
|
||||||
|
return "upstream"
|
||||||
|
case "invalid_request_error":
|
||||||
|
return "response"
|
||||||
|
case "upstream_error", "overloaded_error":
|
||||||
|
return "upstream"
|
||||||
|
case "api_error":
|
||||||
|
if strings.Contains(msg, "no available accounts") {
|
||||||
|
return "scheduling"
|
||||||
|
}
|
||||||
|
return "internal"
|
||||||
|
default:
|
||||||
|
return "internal"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func classifyOpsSeverity(errType string, status int) string {
|
||||||
|
switch errType {
|
||||||
|
case "invalid_request_error", "authentication_error", "billing_error", "subscription_error":
|
||||||
|
return "P3"
|
||||||
|
}
|
||||||
|
if status >= 500 {
|
||||||
|
return "P1"
|
||||||
|
}
|
||||||
|
if status == 429 {
|
||||||
|
return "P1"
|
||||||
|
}
|
||||||
|
if status >= 400 {
|
||||||
|
return "P2"
|
||||||
|
}
|
||||||
|
return "P3"
|
||||||
|
}
|
||||||
|
|
||||||
|
func classifyOpsIsRetryable(errType string, statusCode int) bool {
|
||||||
|
switch errType {
|
||||||
|
case "authentication_error", "invalid_request_error":
|
||||||
|
return false
|
||||||
|
case "timeout_error":
|
||||||
|
return true
|
||||||
|
case "rate_limit_error":
|
||||||
|
// May be transient (upstream or queue); retry can help.
|
||||||
|
return true
|
||||||
|
case "billing_error", "subscription_error":
|
||||||
|
return false
|
||||||
|
case "upstream_error", "overloaded_error":
|
||||||
|
return statusCode >= 500 || statusCode == 429 || statusCode == 529
|
||||||
|
default:
|
||||||
|
return statusCode >= 500
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func classifyOpsIsBusinessLimited(errType, phase, code string, status int, message string) bool {
|
||||||
|
switch strings.TrimSpace(code) {
|
||||||
|
case "INSUFFICIENT_BALANCE", "USAGE_LIMIT_EXCEEDED", "SUBSCRIPTION_NOT_FOUND", "SUBSCRIPTION_INVALID":
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if phase == "billing" || phase == "concurrency" {
|
||||||
|
// SLA/错误率排除“用户级业务限制”
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// Avoid treating upstream rate limits as business-limited.
|
||||||
|
if errType == "rate_limit_error" && strings.Contains(strings.ToLower(message), "upstream") {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
_ = status
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func classifyOpsErrorOwner(phase string, message string) string {
|
||||||
|
switch phase {
|
||||||
|
case "upstream", "network":
|
||||||
|
return "provider"
|
||||||
|
case "billing", "concurrency", "auth", "response":
|
||||||
|
return "client"
|
||||||
|
default:
|
||||||
|
if strings.Contains(strings.ToLower(message), "upstream") {
|
||||||
|
return "provider"
|
||||||
|
}
|
||||||
|
return "sub2api"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func classifyOpsErrorSource(phase string, message string) string {
|
||||||
|
switch phase {
|
||||||
|
case "upstream":
|
||||||
|
return "upstream_http"
|
||||||
|
case "network":
|
||||||
|
return "upstream_network"
|
||||||
|
case "billing":
|
||||||
|
return "billing"
|
||||||
|
case "concurrency":
|
||||||
|
return "concurrency"
|
||||||
|
default:
|
||||||
|
if strings.Contains(strings.ToLower(message), "upstream") {
|
||||||
|
return "upstream_http"
|
||||||
|
}
|
||||||
|
return "internal"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func truncateString(s string, max int) string {
|
||||||
|
if max <= 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
if len(s) <= max {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
cut := s[:max]
|
||||||
|
// Ensure truncation does not split multi-byte characters.
|
||||||
|
for len(cut) > 0 && !utf8.ValidString(cut) {
|
||||||
|
cut = cut[:len(cut)-1]
|
||||||
|
}
|
||||||
|
return cut
|
||||||
|
}
|
||||||
|
|
||||||
|
func strconvItoa(v int) string {
|
||||||
|
return strconv.Itoa(v)
|
||||||
|
}
|
||||||
@@ -42,6 +42,7 @@ func (h *SettingHandler) GetPublicSettings(c *gin.Context) {
|
|||||||
APIBaseURL: settings.APIBaseURL,
|
APIBaseURL: settings.APIBaseURL,
|
||||||
ContactInfo: settings.ContactInfo,
|
ContactInfo: settings.ContactInfo,
|
||||||
DocURL: settings.DocURL,
|
DocURL: settings.DocURL,
|
||||||
|
HomeContent: settings.HomeContent,
|
||||||
LinuxDoOAuthEnabled: settings.LinuxDoOAuthEnabled,
|
LinuxDoOAuthEnabled: settings.LinuxDoOAuthEnabled,
|
||||||
Version: h.version,
|
Version: h.version,
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -19,7 +19,9 @@ func ProvideAdminHandlers(
|
|||||||
antigravityOAuthHandler *admin.AntigravityOAuthHandler,
|
antigravityOAuthHandler *admin.AntigravityOAuthHandler,
|
||||||
proxyHandler *admin.ProxyHandler,
|
proxyHandler *admin.ProxyHandler,
|
||||||
redeemHandler *admin.RedeemHandler,
|
redeemHandler *admin.RedeemHandler,
|
||||||
|
promoHandler *admin.PromoHandler,
|
||||||
settingHandler *admin.SettingHandler,
|
settingHandler *admin.SettingHandler,
|
||||||
|
opsHandler *admin.OpsHandler,
|
||||||
systemHandler *admin.SystemHandler,
|
systemHandler *admin.SystemHandler,
|
||||||
subscriptionHandler *admin.SubscriptionHandler,
|
subscriptionHandler *admin.SubscriptionHandler,
|
||||||
usageHandler *admin.UsageHandler,
|
usageHandler *admin.UsageHandler,
|
||||||
@@ -36,7 +38,9 @@ func ProvideAdminHandlers(
|
|||||||
AntigravityOAuth: antigravityOAuthHandler,
|
AntigravityOAuth: antigravityOAuthHandler,
|
||||||
Proxy: proxyHandler,
|
Proxy: proxyHandler,
|
||||||
Redeem: redeemHandler,
|
Redeem: redeemHandler,
|
||||||
|
Promo: promoHandler,
|
||||||
Setting: settingHandler,
|
Setting: settingHandler,
|
||||||
|
Ops: opsHandler,
|
||||||
System: systemHandler,
|
System: systemHandler,
|
||||||
Subscription: subscriptionHandler,
|
Subscription: subscriptionHandler,
|
||||||
Usage: usageHandler,
|
Usage: usageHandler,
|
||||||
@@ -105,7 +109,9 @@ var ProviderSet = wire.NewSet(
|
|||||||
admin.NewAntigravityOAuthHandler,
|
admin.NewAntigravityOAuthHandler,
|
||||||
admin.NewProxyHandler,
|
admin.NewProxyHandler,
|
||||||
admin.NewRedeemHandler,
|
admin.NewRedeemHandler,
|
||||||
|
admin.NewPromoHandler,
|
||||||
admin.NewSettingHandler,
|
admin.NewSettingHandler,
|
||||||
|
admin.NewOpsHandler,
|
||||||
ProvideSystemHandler,
|
ProvideSystemHandler,
|
||||||
admin.NewSubscriptionHandler,
|
admin.NewSubscriptionHandler,
|
||||||
admin.NewUsageHandler,
|
admin.NewUsageHandler,
|
||||||
|
|||||||
111
backend/internal/middleware/rate_limiter.go
Normal file
111
backend/internal/middleware/rate_limiter.go
Normal file
@@ -0,0 +1,111 @@
|
|||||||
|
package middleware
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/redis/go-redis/v9"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RateLimitFailureMode Redis 故障策略
|
||||||
|
type RateLimitFailureMode int
|
||||||
|
|
||||||
|
const (
|
||||||
|
RateLimitFailOpen RateLimitFailureMode = iota
|
||||||
|
RateLimitFailClose
|
||||||
|
)
|
||||||
|
|
||||||
|
// RateLimitOptions 限流可选配置
|
||||||
|
type RateLimitOptions struct {
|
||||||
|
FailureMode RateLimitFailureMode
|
||||||
|
}
|
||||||
|
|
||||||
|
var rateLimitScript = redis.NewScript(`
|
||||||
|
local current = redis.call('INCR', KEYS[1])
|
||||||
|
local ttl = redis.call('PTTL', KEYS[1])
|
||||||
|
if current == 1 or ttl == -1 then
|
||||||
|
redis.call('PEXPIRE', KEYS[1], ARGV[1])
|
||||||
|
end
|
||||||
|
return current
|
||||||
|
`)
|
||||||
|
|
||||||
|
// rateLimitRun 允许测试覆写脚本执行逻辑
|
||||||
|
var rateLimitRun = func(ctx context.Context, client *redis.Client, key string, windowMillis int64) (int64, error) {
|
||||||
|
return rateLimitScript.Run(ctx, client, []string{key}, windowMillis).Int64()
|
||||||
|
}
|
||||||
|
|
||||||
|
// RateLimiter Redis 速率限制器
|
||||||
|
type RateLimiter struct {
|
||||||
|
redis *redis.Client
|
||||||
|
prefix string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRateLimiter 创建速率限制器实例
|
||||||
|
func NewRateLimiter(redisClient *redis.Client) *RateLimiter {
|
||||||
|
return &RateLimiter{
|
||||||
|
redis: redisClient,
|
||||||
|
prefix: "rate_limit:",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Limit 返回速率限制中间件
|
||||||
|
// key: 限制类型标识
|
||||||
|
// limit: 时间窗口内最大请求数
|
||||||
|
// window: 时间窗口
|
||||||
|
func (r *RateLimiter) Limit(key string, limit int, window time.Duration) gin.HandlerFunc {
|
||||||
|
return r.LimitWithOptions(key, limit, window, RateLimitOptions{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// LimitWithOptions 返回速率限制中间件(带可选配置)
|
||||||
|
func (r *RateLimiter) LimitWithOptions(key string, limit int, window time.Duration, opts RateLimitOptions) gin.HandlerFunc {
|
||||||
|
failureMode := opts.FailureMode
|
||||||
|
if failureMode != RateLimitFailClose {
|
||||||
|
failureMode = RateLimitFailOpen
|
||||||
|
}
|
||||||
|
|
||||||
|
return func(c *gin.Context) {
|
||||||
|
ip := c.ClientIP()
|
||||||
|
redisKey := r.prefix + key + ":" + ip
|
||||||
|
|
||||||
|
ctx := c.Request.Context()
|
||||||
|
|
||||||
|
windowMillis := windowTTLMillis(window)
|
||||||
|
|
||||||
|
// 使用 Lua 脚本原子操作增加计数并设置过期
|
||||||
|
count, err := rateLimitRun(ctx, r.redis, redisKey, windowMillis)
|
||||||
|
if err != nil {
|
||||||
|
if failureMode == RateLimitFailClose {
|
||||||
|
abortRateLimit(c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Redis 错误时放行,避免影响正常服务
|
||||||
|
c.Next()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// 超过限制
|
||||||
|
if count > int64(limit) {
|
||||||
|
abortRateLimit(c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Next()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func windowTTLMillis(window time.Duration) int64 {
|
||||||
|
ttl := window.Milliseconds()
|
||||||
|
if ttl < 1 {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return ttl
|
||||||
|
}
|
||||||
|
|
||||||
|
func abortRateLimit(c *gin.Context) {
|
||||||
|
c.AbortWithStatusJSON(http.StatusTooManyRequests, gin.H{
|
||||||
|
"error": "rate limit exceeded",
|
||||||
|
"message": "Too many requests, please try again later",
|
||||||
|
})
|
||||||
|
}
|
||||||
114
backend/internal/middleware/rate_limiter_integration_test.go
Normal file
114
backend/internal/middleware/rate_limiter_integration_test.go
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
//go:build integration
|
||||||
|
|
||||||
|
package middleware
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/redis/go-redis/v9"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
tcredis "github.com/testcontainers/testcontainers-go/modules/redis"
|
||||||
|
)
|
||||||
|
|
||||||
|
const redisImageTag = "redis:8.4-alpine"
|
||||||
|
|
||||||
|
func TestRateLimiterSetsTTLAndDoesNotRefresh(t *testing.T) {
|
||||||
|
gin.SetMode(gin.TestMode)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
rdb := startRedis(t, ctx)
|
||||||
|
limiter := NewRateLimiter(rdb)
|
||||||
|
|
||||||
|
router := gin.New()
|
||||||
|
router.Use(limiter.Limit("ttl-test", 10, 2*time.Second))
|
||||||
|
router.GET("/test", func(c *gin.Context) {
|
||||||
|
c.JSON(http.StatusOK, gin.H{"ok": true})
|
||||||
|
})
|
||||||
|
|
||||||
|
recorder := performRequest(router)
|
||||||
|
require.Equal(t, http.StatusOK, recorder.Code)
|
||||||
|
|
||||||
|
redisKey := limiter.prefix + "ttl-test:127.0.0.1"
|
||||||
|
ttlBefore, err := rdb.PTTL(ctx, redisKey).Result()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Greater(t, ttlBefore, time.Duration(0))
|
||||||
|
require.LessOrEqual(t, ttlBefore, 2*time.Second)
|
||||||
|
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
|
||||||
|
recorder = performRequest(router)
|
||||||
|
require.Equal(t, http.StatusOK, recorder.Code)
|
||||||
|
|
||||||
|
ttlAfter, err := rdb.PTTL(ctx, redisKey).Result()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Less(t, ttlAfter, ttlBefore)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRateLimiterFixesMissingTTL(t *testing.T) {
|
||||||
|
gin.SetMode(gin.TestMode)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
rdb := startRedis(t, ctx)
|
||||||
|
limiter := NewRateLimiter(rdb)
|
||||||
|
|
||||||
|
router := gin.New()
|
||||||
|
router.Use(limiter.Limit("ttl-missing", 10, 2*time.Second))
|
||||||
|
router.GET("/test", func(c *gin.Context) {
|
||||||
|
c.JSON(http.StatusOK, gin.H{"ok": true})
|
||||||
|
})
|
||||||
|
|
||||||
|
redisKey := limiter.prefix + "ttl-missing:127.0.0.1"
|
||||||
|
require.NoError(t, rdb.Set(ctx, redisKey, 5, 0).Err())
|
||||||
|
|
||||||
|
ttlBefore, err := rdb.PTTL(ctx, redisKey).Result()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Less(t, ttlBefore, time.Duration(0))
|
||||||
|
|
||||||
|
recorder := performRequest(router)
|
||||||
|
require.Equal(t, http.StatusOK, recorder.Code)
|
||||||
|
|
||||||
|
ttlAfter, err := rdb.PTTL(ctx, redisKey).Result()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Greater(t, ttlAfter, time.Duration(0))
|
||||||
|
}
|
||||||
|
|
||||||
|
func performRequest(router *gin.Engine) *httptest.ResponseRecorder {
|
||||||
|
req := httptest.NewRequest(http.MethodGet, "/test", nil)
|
||||||
|
req.RemoteAddr = "127.0.0.1:1234"
|
||||||
|
recorder := httptest.NewRecorder()
|
||||||
|
router.ServeHTTP(recorder, req)
|
||||||
|
return recorder
|
||||||
|
}
|
||||||
|
|
||||||
|
func startRedis(t *testing.T, ctx context.Context) *redis.Client {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
redisContainer, err := tcredis.Run(ctx, redisImageTag)
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(func() {
|
||||||
|
_ = redisContainer.Terminate(ctx)
|
||||||
|
})
|
||||||
|
|
||||||
|
redisHost, err := redisContainer.Host(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
redisPort, err := redisContainer.MappedPort(ctx, "6379/tcp")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
rdb := redis.NewClient(&redis.Options{
|
||||||
|
Addr: fmt.Sprintf("%s:%d", redisHost, redisPort.Int()),
|
||||||
|
DB: 0,
|
||||||
|
})
|
||||||
|
require.NoError(t, rdb.Ping(ctx).Err())
|
||||||
|
|
||||||
|
t.Cleanup(func() {
|
||||||
|
_ = rdb.Close()
|
||||||
|
})
|
||||||
|
|
||||||
|
return rdb
|
||||||
|
}
|
||||||
100
backend/internal/middleware/rate_limiter_test.go
Normal file
100
backend/internal/middleware/rate_limiter_test.go
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
package middleware
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/redis/go-redis/v9"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestWindowTTLMillis(t *testing.T) {
|
||||||
|
require.Equal(t, int64(1), windowTTLMillis(500*time.Microsecond))
|
||||||
|
require.Equal(t, int64(1), windowTTLMillis(1500*time.Microsecond))
|
||||||
|
require.Equal(t, int64(2), windowTTLMillis(2500*time.Microsecond))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRateLimiterFailureModes(t *testing.T) {
|
||||||
|
gin.SetMode(gin.TestMode)
|
||||||
|
|
||||||
|
rdb := redis.NewClient(&redis.Options{
|
||||||
|
Addr: "127.0.0.1:1",
|
||||||
|
DialTimeout: 50 * time.Millisecond,
|
||||||
|
ReadTimeout: 50 * time.Millisecond,
|
||||||
|
WriteTimeout: 50 * time.Millisecond,
|
||||||
|
})
|
||||||
|
t.Cleanup(func() {
|
||||||
|
_ = rdb.Close()
|
||||||
|
})
|
||||||
|
|
||||||
|
limiter := NewRateLimiter(rdb)
|
||||||
|
|
||||||
|
failOpenRouter := gin.New()
|
||||||
|
failOpenRouter.Use(limiter.Limit("test", 1, time.Second))
|
||||||
|
failOpenRouter.GET("/test", func(c *gin.Context) {
|
||||||
|
c.JSON(http.StatusOK, gin.H{"ok": true})
|
||||||
|
})
|
||||||
|
|
||||||
|
req := httptest.NewRequest(http.MethodGet, "/test", nil)
|
||||||
|
req.RemoteAddr = "127.0.0.1:1234"
|
||||||
|
recorder := httptest.NewRecorder()
|
||||||
|
failOpenRouter.ServeHTTP(recorder, req)
|
||||||
|
require.Equal(t, http.StatusOK, recorder.Code)
|
||||||
|
|
||||||
|
failCloseRouter := gin.New()
|
||||||
|
failCloseRouter.Use(limiter.LimitWithOptions("test", 1, time.Second, RateLimitOptions{
|
||||||
|
FailureMode: RateLimitFailClose,
|
||||||
|
}))
|
||||||
|
failCloseRouter.GET("/test", func(c *gin.Context) {
|
||||||
|
c.JSON(http.StatusOK, gin.H{"ok": true})
|
||||||
|
})
|
||||||
|
|
||||||
|
req = httptest.NewRequest(http.MethodGet, "/test", nil)
|
||||||
|
req.RemoteAddr = "127.0.0.1:1234"
|
||||||
|
recorder = httptest.NewRecorder()
|
||||||
|
failCloseRouter.ServeHTTP(recorder, req)
|
||||||
|
require.Equal(t, http.StatusTooManyRequests, recorder.Code)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRateLimiterSuccessAndLimit(t *testing.T) {
|
||||||
|
gin.SetMode(gin.TestMode)
|
||||||
|
|
||||||
|
originalRun := rateLimitRun
|
||||||
|
counts := []int64{1, 2}
|
||||||
|
callIndex := 0
|
||||||
|
rateLimitRun = func(ctx context.Context, client *redis.Client, key string, windowMillis int64) (int64, error) {
|
||||||
|
if callIndex >= len(counts) {
|
||||||
|
return counts[len(counts)-1], nil
|
||||||
|
}
|
||||||
|
value := counts[callIndex]
|
||||||
|
callIndex++
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
t.Cleanup(func() {
|
||||||
|
rateLimitRun = originalRun
|
||||||
|
})
|
||||||
|
|
||||||
|
limiter := NewRateLimiter(redis.NewClient(&redis.Options{Addr: "127.0.0.1:1"}))
|
||||||
|
|
||||||
|
router := gin.New()
|
||||||
|
router.Use(limiter.Limit("test", 1, time.Second))
|
||||||
|
router.GET("/test", func(c *gin.Context) {
|
||||||
|
c.JSON(http.StatusOK, gin.H{"ok": true})
|
||||||
|
})
|
||||||
|
|
||||||
|
req := httptest.NewRequest(http.MethodGet, "/test", nil)
|
||||||
|
req.RemoteAddr = "127.0.0.1:1234"
|
||||||
|
recorder := httptest.NewRecorder()
|
||||||
|
router.ServeHTTP(recorder, req)
|
||||||
|
require.Equal(t, http.StatusOK, recorder.Code)
|
||||||
|
|
||||||
|
req = httptest.NewRequest(http.MethodGet, "/test", nil)
|
||||||
|
req.RemoteAddr = "127.0.0.1:1234"
|
||||||
|
recorder = httptest.NewRecorder()
|
||||||
|
router.ServeHTTP(recorder, req)
|
||||||
|
require.Equal(t, http.StatusTooManyRequests, recorder.Code)
|
||||||
|
}
|
||||||
@@ -7,6 +7,15 @@ type Key string
|
|||||||
const (
|
const (
|
||||||
// ForcePlatform 强制平台(用于 /antigravity 路由),由 middleware.ForcePlatform 设置
|
// ForcePlatform 强制平台(用于 /antigravity 路由),由 middleware.ForcePlatform 设置
|
||||||
ForcePlatform Key = "ctx_force_platform"
|
ForcePlatform Key = "ctx_force_platform"
|
||||||
// IsClaudeCodeClient 是否为 Claude Code 客户端,由中间件设置
|
|
||||||
|
// ClientRequestID 客户端请求的唯一标识,用于追踪请求全生命周期(用于 Ops 监控与排障)。
|
||||||
|
ClientRequestID Key = "ctx_client_request_id"
|
||||||
|
|
||||||
|
// RetryCount 表示当前请求在网关层的重试次数(用于 Ops 记录与排障)。
|
||||||
|
RetryCount Key = "ctx_retry_count"
|
||||||
|
|
||||||
|
// IsClaudeCodeClient 标识当前请求是否来自 Claude Code 客户端
|
||||||
IsClaudeCodeClient Key = "ctx_is_claude_code_client"
|
IsClaudeCodeClient Key = "ctx_is_claude_code_client"
|
||||||
|
// Group 认证后的分组信息,由 API Key 认证中间件设置
|
||||||
|
Group Key = "ctx_group"
|
||||||
)
|
)
|
||||||
|
|||||||
168
backend/internal/pkg/ip/ip.go
Normal file
168
backend/internal/pkg/ip/ip.go
Normal file
@@ -0,0 +1,168 @@
|
|||||||
|
// Package ip 提供客户端 IP 地址提取工具。
|
||||||
|
package ip
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetClientIP 从 Gin Context 中提取客户端真实 IP 地址。
|
||||||
|
// 按以下优先级检查 Header:
|
||||||
|
// 1. CF-Connecting-IP (Cloudflare)
|
||||||
|
// 2. X-Real-IP (Nginx)
|
||||||
|
// 3. X-Forwarded-For (取第一个非私有 IP)
|
||||||
|
// 4. c.ClientIP() (Gin 内置方法)
|
||||||
|
func GetClientIP(c *gin.Context) string {
|
||||||
|
// 1. Cloudflare
|
||||||
|
if ip := c.GetHeader("CF-Connecting-IP"); ip != "" {
|
||||||
|
return normalizeIP(ip)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Nginx X-Real-IP
|
||||||
|
if ip := c.GetHeader("X-Real-IP"); ip != "" {
|
||||||
|
return normalizeIP(ip)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. X-Forwarded-For (多个 IP 时取第一个公网 IP)
|
||||||
|
if xff := c.GetHeader("X-Forwarded-For"); xff != "" {
|
||||||
|
ips := strings.Split(xff, ",")
|
||||||
|
for _, ip := range ips {
|
||||||
|
ip = strings.TrimSpace(ip)
|
||||||
|
if ip != "" && !isPrivateIP(ip) {
|
||||||
|
return normalizeIP(ip)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// 如果都是私有 IP,返回第一个
|
||||||
|
if len(ips) > 0 {
|
||||||
|
return normalizeIP(strings.TrimSpace(ips[0]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 4. Gin 内置方法
|
||||||
|
return normalizeIP(c.ClientIP())
|
||||||
|
}
|
||||||
|
|
||||||
|
// normalizeIP 规范化 IP 地址,去除端口号和空格。
|
||||||
|
func normalizeIP(ip string) string {
|
||||||
|
ip = strings.TrimSpace(ip)
|
||||||
|
// 移除端口号(如 "192.168.1.1:8080" -> "192.168.1.1")
|
||||||
|
if host, _, err := net.SplitHostPort(ip); err == nil {
|
||||||
|
return host
|
||||||
|
}
|
||||||
|
return ip
|
||||||
|
}
|
||||||
|
|
||||||
|
// isPrivateIP 检查 IP 是否为私有地址。
|
||||||
|
func isPrivateIP(ipStr string) bool {
|
||||||
|
ip := net.ParseIP(ipStr)
|
||||||
|
if ip == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// 私有 IP 范围
|
||||||
|
privateBlocks := []string{
|
||||||
|
"10.0.0.0/8",
|
||||||
|
"172.16.0.0/12",
|
||||||
|
"192.168.0.0/16",
|
||||||
|
"127.0.0.0/8",
|
||||||
|
"::1/128",
|
||||||
|
"fc00::/7",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, block := range privateBlocks {
|
||||||
|
_, cidr, err := net.ParseCIDR(block)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if cidr.Contains(ip) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// MatchesPattern 检查 IP 是否匹配指定的模式(支持单个 IP 或 CIDR)。
|
||||||
|
// pattern 可以是:
|
||||||
|
// - 单个 IP: "192.168.1.100"
|
||||||
|
// - CIDR 范围: "192.168.1.0/24"
|
||||||
|
func MatchesPattern(clientIP, pattern string) bool {
|
||||||
|
ip := net.ParseIP(clientIP)
|
||||||
|
if ip == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// 尝试解析为 CIDR
|
||||||
|
if strings.Contains(pattern, "/") {
|
||||||
|
_, cidr, err := net.ParseCIDR(pattern)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return cidr.Contains(ip)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 作为单个 IP 处理
|
||||||
|
patternIP := net.ParseIP(pattern)
|
||||||
|
if patternIP == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return ip.Equal(patternIP)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MatchesAnyPattern 检查 IP 是否匹配任意一个模式。
|
||||||
|
func MatchesAnyPattern(clientIP string, patterns []string) bool {
|
||||||
|
for _, pattern := range patterns {
|
||||||
|
if MatchesPattern(clientIP, pattern) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckIPRestriction 检查 IP 是否被 API Key 的 IP 限制允许。
|
||||||
|
// 返回值:(是否允许, 拒绝原因)
|
||||||
|
// 逻辑:
|
||||||
|
// 1. 先检查黑名单,如果在黑名单中则直接拒绝
|
||||||
|
// 2. 如果白名单不为空,IP 必须在白名单中
|
||||||
|
// 3. 如果白名单为空,允许访问(除非被黑名单拒绝)
|
||||||
|
func CheckIPRestriction(clientIP string, whitelist, blacklist []string) (bool, string) {
|
||||||
|
// 规范化 IP
|
||||||
|
clientIP = normalizeIP(clientIP)
|
||||||
|
if clientIP == "" {
|
||||||
|
return false, "access denied"
|
||||||
|
}
|
||||||
|
|
||||||
|
// 1. 检查黑名单
|
||||||
|
if len(blacklist) > 0 && MatchesAnyPattern(clientIP, blacklist) {
|
||||||
|
return false, "access denied"
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. 检查白名单(如果设置了白名单,IP 必须在其中)
|
||||||
|
if len(whitelist) > 0 && !MatchesAnyPattern(clientIP, whitelist) {
|
||||||
|
return false, "access denied"
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateIPPattern 验证 IP 或 CIDR 格式是否有效。
|
||||||
|
func ValidateIPPattern(pattern string) bool {
|
||||||
|
if strings.Contains(pattern, "/") {
|
||||||
|
_, _, err := net.ParseCIDR(pattern)
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
return net.ParseIP(pattern) != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateIPPatterns 验证多个 IP 或 CIDR 格式。
|
||||||
|
// 返回无效的模式列表。
|
||||||
|
func ValidateIPPatterns(patterns []string) []string {
|
||||||
|
var invalid []string
|
||||||
|
for _, p := range patterns {
|
||||||
|
if !ValidateIPPattern(p) {
|
||||||
|
invalid = append(invalid, p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return invalid
|
||||||
|
}
|
||||||
@@ -9,6 +9,12 @@ type DashboardStats struct {
|
|||||||
TotalUsers int64 `json:"total_users"`
|
TotalUsers int64 `json:"total_users"`
|
||||||
TodayNewUsers int64 `json:"today_new_users"` // 今日新增用户数
|
TodayNewUsers int64 `json:"today_new_users"` // 今日新增用户数
|
||||||
ActiveUsers int64 `json:"active_users"` // 今日有请求的用户数
|
ActiveUsers int64 `json:"active_users"` // 今日有请求的用户数
|
||||||
|
// 小时活跃用户数(UTC 当前小时)
|
||||||
|
HourlyActiveUsers int64 `json:"hourly_active_users"`
|
||||||
|
|
||||||
|
// 预聚合新鲜度
|
||||||
|
StatsUpdatedAt string `json:"stats_updated_at"`
|
||||||
|
StatsStale bool `json:"stats_stale"`
|
||||||
|
|
||||||
// API Key 统计
|
// API Key 统计
|
||||||
TotalAPIKeys int64 `json:"total_api_keys"`
|
TotalAPIKeys int64 `json:"total_api_keys"`
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ import (
|
|||||||
"database/sql"
|
"database/sql"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
|
"log"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -115,6 +116,9 @@ func (r *accountRepository) Create(ctx context.Context, account *service.Account
|
|||||||
account.ID = created.ID
|
account.ID = created.ID
|
||||||
account.CreatedAt = created.CreatedAt
|
account.CreatedAt = created.CreatedAt
|
||||||
account.UpdatedAt = created.UpdatedAt
|
account.UpdatedAt = created.UpdatedAt
|
||||||
|
if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &account.ID, nil, buildSchedulerGroupPayload(account.GroupIDs)); err != nil {
|
||||||
|
log.Printf("[SchedulerOutbox] enqueue account create failed: account=%d err=%v", account.ID, err)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -341,10 +345,17 @@ func (r *accountRepository) Update(ctx context.Context, account *service.Account
|
|||||||
return translatePersistenceError(err, service.ErrAccountNotFound, nil)
|
return translatePersistenceError(err, service.ErrAccountNotFound, nil)
|
||||||
}
|
}
|
||||||
account.UpdatedAt = updated.UpdatedAt
|
account.UpdatedAt = updated.UpdatedAt
|
||||||
|
if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &account.ID, nil, buildSchedulerGroupPayload(account.GroupIDs)); err != nil {
|
||||||
|
log.Printf("[SchedulerOutbox] enqueue account update failed: account=%d err=%v", account.ID, err)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *accountRepository) Delete(ctx context.Context, id int64) error {
|
func (r *accountRepository) Delete(ctx context.Context, id int64) error {
|
||||||
|
groupIDs, err := r.loadAccountGroupIDs(ctx, id)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
// 使用事务保证账号与关联分组的删除原子性
|
// 使用事务保证账号与关联分组的删除原子性
|
||||||
tx, err := r.client.Tx(ctx)
|
tx, err := r.client.Tx(ctx)
|
||||||
if err != nil && !errors.Is(err, dbent.ErrTxStarted) {
|
if err != nil && !errors.Is(err, dbent.ErrTxStarted) {
|
||||||
@@ -368,7 +379,12 @@ func (r *accountRepository) Delete(ctx context.Context, id int64) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if tx != nil {
|
if tx != nil {
|
||||||
return tx.Commit()
|
if err := tx.Commit(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, buildSchedulerGroupPayload(groupIDs)); err != nil {
|
||||||
|
log.Printf("[SchedulerOutbox] enqueue account delete failed: account=%d err=%v", id, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -455,7 +471,18 @@ func (r *accountRepository) UpdateLastUsed(ctx context.Context, id int64) error
|
|||||||
Where(dbaccount.IDEQ(id)).
|
Where(dbaccount.IDEQ(id)).
|
||||||
SetLastUsedAt(now).
|
SetLastUsedAt(now).
|
||||||
Save(ctx)
|
Save(ctx)
|
||||||
return err
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
payload := map[string]any{
|
||||||
|
"last_used": map[string]int64{
|
||||||
|
strconv.FormatInt(id, 10): now.Unix(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountLastUsed, &id, nil, payload); err != nil {
|
||||||
|
log.Printf("[SchedulerOutbox] enqueue last used failed: account=%d err=%v", id, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *accountRepository) BatchUpdateLastUsed(ctx context.Context, updates map[int64]time.Time) error {
|
func (r *accountRepository) BatchUpdateLastUsed(ctx context.Context, updates map[int64]time.Time) error {
|
||||||
@@ -479,7 +506,18 @@ func (r *accountRepository) BatchUpdateLastUsed(ctx context.Context, updates map
|
|||||||
args = append(args, pq.Array(ids))
|
args = append(args, pq.Array(ids))
|
||||||
|
|
||||||
_, err := r.sql.ExecContext(ctx, caseSQL, args...)
|
_, err := r.sql.ExecContext(ctx, caseSQL, args...)
|
||||||
return err
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
lastUsedPayload := make(map[string]int64, len(updates))
|
||||||
|
for id, ts := range updates {
|
||||||
|
lastUsedPayload[strconv.FormatInt(id, 10)] = ts.Unix()
|
||||||
|
}
|
||||||
|
payload := map[string]any{"last_used": lastUsedPayload}
|
||||||
|
if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountLastUsed, nil, nil, payload); err != nil {
|
||||||
|
log.Printf("[SchedulerOutbox] enqueue batch last used failed: err=%v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *accountRepository) SetError(ctx context.Context, id int64, errorMsg string) error {
|
func (r *accountRepository) SetError(ctx context.Context, id int64, errorMsg string) error {
|
||||||
@@ -488,7 +526,13 @@ func (r *accountRepository) SetError(ctx context.Context, id int64, errorMsg str
|
|||||||
SetStatus(service.StatusError).
|
SetStatus(service.StatusError).
|
||||||
SetErrorMessage(errorMsg).
|
SetErrorMessage(errorMsg).
|
||||||
Save(ctx)
|
Save(ctx)
|
||||||
return err
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil {
|
||||||
|
log.Printf("[SchedulerOutbox] enqueue set error failed: account=%d err=%v", id, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *accountRepository) AddToGroup(ctx context.Context, accountID, groupID int64, priority int) error {
|
func (r *accountRepository) AddToGroup(ctx context.Context, accountID, groupID int64, priority int) error {
|
||||||
@@ -497,7 +541,14 @@ func (r *accountRepository) AddToGroup(ctx context.Context, accountID, groupID i
|
|||||||
SetGroupID(groupID).
|
SetGroupID(groupID).
|
||||||
SetPriority(priority).
|
SetPriority(priority).
|
||||||
Save(ctx)
|
Save(ctx)
|
||||||
return err
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
payload := buildSchedulerGroupPayload([]int64{groupID})
|
||||||
|
if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountGroupsChanged, &accountID, nil, payload); err != nil {
|
||||||
|
log.Printf("[SchedulerOutbox] enqueue add to group failed: account=%d group=%d err=%v", accountID, groupID, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *accountRepository) RemoveFromGroup(ctx context.Context, accountID, groupID int64) error {
|
func (r *accountRepository) RemoveFromGroup(ctx context.Context, accountID, groupID int64) error {
|
||||||
@@ -507,7 +558,14 @@ func (r *accountRepository) RemoveFromGroup(ctx context.Context, accountID, grou
|
|||||||
dbaccountgroup.GroupIDEQ(groupID),
|
dbaccountgroup.GroupIDEQ(groupID),
|
||||||
).
|
).
|
||||||
Exec(ctx)
|
Exec(ctx)
|
||||||
return err
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
payload := buildSchedulerGroupPayload([]int64{groupID})
|
||||||
|
if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountGroupsChanged, &accountID, nil, payload); err != nil {
|
||||||
|
log.Printf("[SchedulerOutbox] enqueue remove from group failed: account=%d group=%d err=%v", accountID, groupID, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *accountRepository) GetGroups(ctx context.Context, accountID int64) ([]service.Group, error) {
|
func (r *accountRepository) GetGroups(ctx context.Context, accountID int64) ([]service.Group, error) {
|
||||||
@@ -528,6 +586,10 @@ func (r *accountRepository) GetGroups(ctx context.Context, accountID int64) ([]s
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *accountRepository) BindGroups(ctx context.Context, accountID int64, groupIDs []int64) error {
|
func (r *accountRepository) BindGroups(ctx context.Context, accountID int64, groupIDs []int64) error {
|
||||||
|
existingGroupIDs, err := r.loadAccountGroupIDs(ctx, accountID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
// 使用事务保证删除旧绑定与创建新绑定的原子性
|
// 使用事务保证删除旧绑定与创建新绑定的原子性
|
||||||
tx, err := r.client.Tx(ctx)
|
tx, err := r.client.Tx(ctx)
|
||||||
if err != nil && !errors.Is(err, dbent.ErrTxStarted) {
|
if err != nil && !errors.Is(err, dbent.ErrTxStarted) {
|
||||||
@@ -568,7 +630,13 @@ func (r *accountRepository) BindGroups(ctx context.Context, accountID int64, gro
|
|||||||
}
|
}
|
||||||
|
|
||||||
if tx != nil {
|
if tx != nil {
|
||||||
return tx.Commit()
|
if err := tx.Commit(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
payload := buildSchedulerGroupPayload(mergeGroupIDs(existingGroupIDs, groupIDs))
|
||||||
|
if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountGroupsChanged, &accountID, nil, payload); err != nil {
|
||||||
|
log.Printf("[SchedulerOutbox] enqueue bind groups failed: account=%d err=%v", accountID, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -672,7 +740,50 @@ func (r *accountRepository) SetRateLimited(ctx context.Context, id int64, resetA
|
|||||||
SetRateLimitedAt(now).
|
SetRateLimitedAt(now).
|
||||||
SetRateLimitResetAt(resetAt).
|
SetRateLimitResetAt(resetAt).
|
||||||
Save(ctx)
|
Save(ctx)
|
||||||
return err
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil {
|
||||||
|
log.Printf("[SchedulerOutbox] enqueue rate limit failed: account=%d err=%v", id, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *accountRepository) SetAntigravityQuotaScopeLimit(ctx context.Context, id int64, scope service.AntigravityQuotaScope, resetAt time.Time) error {
|
||||||
|
now := time.Now().UTC()
|
||||||
|
payload := map[string]string{
|
||||||
|
"rate_limited_at": now.Format(time.RFC3339),
|
||||||
|
"rate_limit_reset_at": resetAt.UTC().Format(time.RFC3339),
|
||||||
|
}
|
||||||
|
raw, err := json.Marshal(payload)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
path := "{antigravity_quota_scopes," + string(scope) + "}"
|
||||||
|
client := clientFromContext(ctx, r.client)
|
||||||
|
result, err := client.ExecContext(
|
||||||
|
ctx,
|
||||||
|
"UPDATE accounts SET extra = jsonb_set(COALESCE(extra, '{}'::jsonb), $1::text[], $2::jsonb, true), updated_at = NOW() WHERE id = $3 AND deleted_at IS NULL",
|
||||||
|
path,
|
||||||
|
raw,
|
||||||
|
id,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
affected, err := result.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if affected == 0 {
|
||||||
|
return service.ErrAccountNotFound
|
||||||
|
}
|
||||||
|
if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil {
|
||||||
|
log.Printf("[SchedulerOutbox] enqueue quota scope failed: account=%d err=%v", id, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *accountRepository) SetOverloaded(ctx context.Context, id int64, until time.Time) error {
|
func (r *accountRepository) SetOverloaded(ctx context.Context, id int64, until time.Time) error {
|
||||||
@@ -680,7 +791,13 @@ func (r *accountRepository) SetOverloaded(ctx context.Context, id int64, until t
|
|||||||
Where(dbaccount.IDEQ(id)).
|
Where(dbaccount.IDEQ(id)).
|
||||||
SetOverloadUntil(until).
|
SetOverloadUntil(until).
|
||||||
Save(ctx)
|
Save(ctx)
|
||||||
return err
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil {
|
||||||
|
log.Printf("[SchedulerOutbox] enqueue overload failed: account=%d err=%v", id, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *accountRepository) SetTempUnschedulable(ctx context.Context, id int64, until time.Time, reason string) error {
|
func (r *accountRepository) SetTempUnschedulable(ctx context.Context, id int64, until time.Time, reason string) error {
|
||||||
@@ -693,7 +810,13 @@ func (r *accountRepository) SetTempUnschedulable(ctx context.Context, id int64,
|
|||||||
AND deleted_at IS NULL
|
AND deleted_at IS NULL
|
||||||
AND (temp_unschedulable_until IS NULL OR temp_unschedulable_until < $1)
|
AND (temp_unschedulable_until IS NULL OR temp_unschedulable_until < $1)
|
||||||
`, until, reason, id)
|
`, until, reason, id)
|
||||||
return err
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil {
|
||||||
|
log.Printf("[SchedulerOutbox] enqueue temp unschedulable failed: account=%d err=%v", id, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *accountRepository) ClearTempUnschedulable(ctx context.Context, id int64) error {
|
func (r *accountRepository) ClearTempUnschedulable(ctx context.Context, id int64) error {
|
||||||
@@ -705,7 +828,13 @@ func (r *accountRepository) ClearTempUnschedulable(ctx context.Context, id int64
|
|||||||
WHERE id = $1
|
WHERE id = $1
|
||||||
AND deleted_at IS NULL
|
AND deleted_at IS NULL
|
||||||
`, id)
|
`, id)
|
||||||
return err
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil {
|
||||||
|
log.Printf("[SchedulerOutbox] enqueue clear temp unschedulable failed: account=%d err=%v", id, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *accountRepository) ClearRateLimit(ctx context.Context, id int64) error {
|
func (r *accountRepository) ClearRateLimit(ctx context.Context, id int64) error {
|
||||||
@@ -715,7 +844,37 @@ func (r *accountRepository) ClearRateLimit(ctx context.Context, id int64) error
|
|||||||
ClearRateLimitResetAt().
|
ClearRateLimitResetAt().
|
||||||
ClearOverloadUntil().
|
ClearOverloadUntil().
|
||||||
Save(ctx)
|
Save(ctx)
|
||||||
return err
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil {
|
||||||
|
log.Printf("[SchedulerOutbox] enqueue clear rate limit failed: account=%d err=%v", id, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *accountRepository) ClearAntigravityQuotaScopes(ctx context.Context, id int64) error {
|
||||||
|
client := clientFromContext(ctx, r.client)
|
||||||
|
result, err := client.ExecContext(
|
||||||
|
ctx,
|
||||||
|
"UPDATE accounts SET extra = COALESCE(extra, '{}'::jsonb) - 'antigravity_quota_scopes', updated_at = NOW() WHERE id = $1 AND deleted_at IS NULL",
|
||||||
|
id,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
affected, err := result.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if affected == 0 {
|
||||||
|
return service.ErrAccountNotFound
|
||||||
|
}
|
||||||
|
if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil {
|
||||||
|
log.Printf("[SchedulerOutbox] enqueue clear quota scopes failed: account=%d err=%v", id, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *accountRepository) UpdateSessionWindow(ctx context.Context, id int64, start, end *time.Time, status string) error {
|
func (r *accountRepository) UpdateSessionWindow(ctx context.Context, id int64, start, end *time.Time, status string) error {
|
||||||
@@ -737,7 +896,13 @@ func (r *accountRepository) SetSchedulable(ctx context.Context, id int64, schedu
|
|||||||
Where(dbaccount.IDEQ(id)).
|
Where(dbaccount.IDEQ(id)).
|
||||||
SetSchedulable(schedulable).
|
SetSchedulable(schedulable).
|
||||||
Save(ctx)
|
Save(ctx)
|
||||||
return err
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil {
|
||||||
|
log.Printf("[SchedulerOutbox] enqueue schedulable change failed: account=%d err=%v", id, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *accountRepository) AutoPauseExpiredAccounts(ctx context.Context, now time.Time) (int64, error) {
|
func (r *accountRepository) AutoPauseExpiredAccounts(ctx context.Context, now time.Time) (int64, error) {
|
||||||
@@ -758,6 +923,11 @@ func (r *accountRepository) AutoPauseExpiredAccounts(ctx context.Context, now ti
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
if rows > 0 {
|
||||||
|
if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventFullRebuild, nil, nil, nil); err != nil {
|
||||||
|
log.Printf("[SchedulerOutbox] enqueue auto pause rebuild failed: err=%v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
return rows, nil
|
return rows, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -789,6 +959,9 @@ func (r *accountRepository) UpdateExtra(ctx context.Context, id int64, updates m
|
|||||||
if affected == 0 {
|
if affected == 0 {
|
||||||
return service.ErrAccountNotFound
|
return service.ErrAccountNotFound
|
||||||
}
|
}
|
||||||
|
if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil {
|
||||||
|
log.Printf("[SchedulerOutbox] enqueue extra update failed: account=%d err=%v", id, err)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -873,6 +1046,12 @@ func (r *accountRepository) BulkUpdate(ctx context.Context, ids []int64, updates
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
if rows > 0 {
|
||||||
|
payload := map[string]any{"account_ids": ids}
|
||||||
|
if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountBulkChanged, nil, nil, payload); err != nil {
|
||||||
|
log.Printf("[SchedulerOutbox] enqueue bulk update failed: err=%v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
return rows, nil
|
return rows, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1115,6 +1294,54 @@ func (r *accountRepository) loadAccountGroups(ctx context.Context, accountIDs []
|
|||||||
return groupsByAccount, groupIDsByAccount, accountGroupsByAccount, nil
|
return groupsByAccount, groupIDsByAccount, accountGroupsByAccount, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *accountRepository) loadAccountGroupIDs(ctx context.Context, accountID int64) ([]int64, error) {
|
||||||
|
entries, err := r.client.AccountGroup.
|
||||||
|
Query().
|
||||||
|
Where(dbaccountgroup.AccountIDEQ(accountID)).
|
||||||
|
All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ids := make([]int64, 0, len(entries))
|
||||||
|
for _, entry := range entries {
|
||||||
|
ids = append(ids, entry.GroupID)
|
||||||
|
}
|
||||||
|
return ids, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func mergeGroupIDs(a []int64, b []int64) []int64 {
|
||||||
|
seen := make(map[int64]struct{}, len(a)+len(b))
|
||||||
|
out := make([]int64, 0, len(a)+len(b))
|
||||||
|
for _, id := range a {
|
||||||
|
if id <= 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, ok := seen[id]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
seen[id] = struct{}{}
|
||||||
|
out = append(out, id)
|
||||||
|
}
|
||||||
|
for _, id := range b {
|
||||||
|
if id <= 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, ok := seen[id]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
seen[id] = struct{}{}
|
||||||
|
out = append(out, id)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildSchedulerGroupPayload(groupIDs []int64) map[string]any {
|
||||||
|
if len(groupIDs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return map[string]any{"group_ids": groupIDs}
|
||||||
|
}
|
||||||
|
|
||||||
func accountEntityToService(m *dbent.Account) *service.Account {
|
func accountEntityToService(m *dbent.Account) *service.Account {
|
||||||
if m == nil {
|
if m == nil {
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package repository
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
@@ -13,6 +14,7 @@ import (
|
|||||||
const (
|
const (
|
||||||
apiKeyRateLimitKeyPrefix = "apikey:ratelimit:"
|
apiKeyRateLimitKeyPrefix = "apikey:ratelimit:"
|
||||||
apiKeyRateLimitDuration = 24 * time.Hour
|
apiKeyRateLimitDuration = 24 * time.Hour
|
||||||
|
apiKeyAuthCachePrefix = "apikey:auth:"
|
||||||
)
|
)
|
||||||
|
|
||||||
// apiKeyRateLimitKey generates the Redis key for API key creation rate limiting.
|
// apiKeyRateLimitKey generates the Redis key for API key creation rate limiting.
|
||||||
@@ -20,6 +22,10 @@ func apiKeyRateLimitKey(userID int64) string {
|
|||||||
return fmt.Sprintf("%s%d", apiKeyRateLimitKeyPrefix, userID)
|
return fmt.Sprintf("%s%d", apiKeyRateLimitKeyPrefix, userID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func apiKeyAuthCacheKey(key string) string {
|
||||||
|
return fmt.Sprintf("%s%s", apiKeyAuthCachePrefix, key)
|
||||||
|
}
|
||||||
|
|
||||||
type apiKeyCache struct {
|
type apiKeyCache struct {
|
||||||
rdb *redis.Client
|
rdb *redis.Client
|
||||||
}
|
}
|
||||||
@@ -58,3 +64,30 @@ func (c *apiKeyCache) IncrementDailyUsage(ctx context.Context, apiKey string) er
|
|||||||
func (c *apiKeyCache) SetDailyUsageExpiry(ctx context.Context, apiKey string, ttl time.Duration) error {
|
func (c *apiKeyCache) SetDailyUsageExpiry(ctx context.Context, apiKey string, ttl time.Duration) error {
|
||||||
return c.rdb.Expire(ctx, apiKey, ttl).Err()
|
return c.rdb.Expire(ctx, apiKey, ttl).Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *apiKeyCache) GetAuthCache(ctx context.Context, key string) (*service.APIKeyAuthCacheEntry, error) {
|
||||||
|
val, err := c.rdb.Get(ctx, apiKeyAuthCacheKey(key)).Bytes()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var entry service.APIKeyAuthCacheEntry
|
||||||
|
if err := json.Unmarshal(val, &entry); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &entry, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *apiKeyCache) SetAuthCache(ctx context.Context, key string, entry *service.APIKeyAuthCacheEntry, ttl time.Duration) error {
|
||||||
|
if entry == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
payload, err := json.Marshal(entry)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return c.rdb.Set(ctx, apiKeyAuthCacheKey(key), payload, ttl).Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *apiKeyCache) DeleteAuthCache(ctx context.Context, key string) error {
|
||||||
|
return c.rdb.Del(ctx, apiKeyAuthCacheKey(key)).Err()
|
||||||
|
}
|
||||||
|
|||||||
@@ -6,7 +6,9 @@ import (
|
|||||||
|
|
||||||
dbent "github.com/Wei-Shaw/sub2api/ent"
|
dbent "github.com/Wei-Shaw/sub2api/ent"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
"github.com/Wei-Shaw/sub2api/ent/schema/mixins"
|
"github.com/Wei-Shaw/sub2api/ent/schema/mixins"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||||
|
|
||||||
"github.com/Wei-Shaw/sub2api/internal/pkg/pagination"
|
"github.com/Wei-Shaw/sub2api/internal/pkg/pagination"
|
||||||
@@ -26,13 +28,21 @@ func (r *apiKeyRepository) activeQuery() *dbent.APIKeyQuery {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *apiKeyRepository) Create(ctx context.Context, key *service.APIKey) error {
|
func (r *apiKeyRepository) Create(ctx context.Context, key *service.APIKey) error {
|
||||||
created, err := r.client.APIKey.Create().
|
builder := r.client.APIKey.Create().
|
||||||
SetUserID(key.UserID).
|
SetUserID(key.UserID).
|
||||||
SetKey(key.Key).
|
SetKey(key.Key).
|
||||||
SetName(key.Name).
|
SetName(key.Name).
|
||||||
SetStatus(key.Status).
|
SetStatus(key.Status).
|
||||||
SetNillableGroupID(key.GroupID).
|
SetNillableGroupID(key.GroupID)
|
||||||
Save(ctx)
|
|
||||||
|
if len(key.IPWhitelist) > 0 {
|
||||||
|
builder.SetIPWhitelist(key.IPWhitelist)
|
||||||
|
}
|
||||||
|
if len(key.IPBlacklist) > 0 {
|
||||||
|
builder.SetIPBlacklist(key.IPBlacklist)
|
||||||
|
}
|
||||||
|
|
||||||
|
created, err := builder.Save(ctx)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
key.ID = created.ID
|
key.ID = created.ID
|
||||||
key.CreatedAt = created.CreatedAt
|
key.CreatedAt = created.CreatedAt
|
||||||
@@ -56,23 +66,23 @@ func (r *apiKeyRepository) GetByID(ctx context.Context, id int64) (*service.APIK
|
|||||||
return apiKeyEntityToService(m), nil
|
return apiKeyEntityToService(m), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetOwnerID 根据 API Key ID 获取其所有者(用户)的 ID。
|
// GetKeyAndOwnerID 根据 API Key ID 获取其 key 与所有者(用户)ID。
|
||||||
// 相比 GetByID,此方法性能更优,因为:
|
// 相比 GetByID,此方法性能更优,因为:
|
||||||
// - 使用 Select() 只查询 user_id 字段,减少数据传输量
|
// - 使用 Select() 只查询必要字段,减少数据传输量
|
||||||
// - 不加载完整的 API Key 实体及其关联数据(User、Group 等)
|
// - 不加载完整的 API Key 实体及其关联数据(User、Group 等)
|
||||||
// - 适用于权限验证等只需用户 ID 的场景(如删除前的所有权检查)
|
// - 适用于删除等只需 key 与用户 ID 的场景
|
||||||
func (r *apiKeyRepository) GetOwnerID(ctx context.Context, id int64) (int64, error) {
|
func (r *apiKeyRepository) GetKeyAndOwnerID(ctx context.Context, id int64) (string, int64, error) {
|
||||||
m, err := r.activeQuery().
|
m, err := r.activeQuery().
|
||||||
Where(apikey.IDEQ(id)).
|
Where(apikey.IDEQ(id)).
|
||||||
Select(apikey.FieldUserID).
|
Select(apikey.FieldKey, apikey.FieldUserID).
|
||||||
Only(ctx)
|
Only(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if dbent.IsNotFound(err) {
|
if dbent.IsNotFound(err) {
|
||||||
return 0, service.ErrAPIKeyNotFound
|
return "", 0, service.ErrAPIKeyNotFound
|
||||||
}
|
}
|
||||||
return 0, err
|
return "", 0, err
|
||||||
}
|
}
|
||||||
return m.UserID, nil
|
return m.Key, m.UserID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *apiKeyRepository) GetByKey(ctx context.Context, key string) (*service.APIKey, error) {
|
func (r *apiKeyRepository) GetByKey(ctx context.Context, key string) (*service.APIKey, error) {
|
||||||
@@ -90,6 +100,54 @@ func (r *apiKeyRepository) GetByKey(ctx context.Context, key string) (*service.A
|
|||||||
return apiKeyEntityToService(m), nil
|
return apiKeyEntityToService(m), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *apiKeyRepository) GetByKeyForAuth(ctx context.Context, key string) (*service.APIKey, error) {
|
||||||
|
m, err := r.activeQuery().
|
||||||
|
Where(apikey.KeyEQ(key)).
|
||||||
|
Select(
|
||||||
|
apikey.FieldID,
|
||||||
|
apikey.FieldUserID,
|
||||||
|
apikey.FieldGroupID,
|
||||||
|
apikey.FieldStatus,
|
||||||
|
apikey.FieldIPWhitelist,
|
||||||
|
apikey.FieldIPBlacklist,
|
||||||
|
).
|
||||||
|
WithUser(func(q *dbent.UserQuery) {
|
||||||
|
q.Select(
|
||||||
|
user.FieldID,
|
||||||
|
user.FieldStatus,
|
||||||
|
user.FieldRole,
|
||||||
|
user.FieldBalance,
|
||||||
|
user.FieldConcurrency,
|
||||||
|
)
|
||||||
|
}).
|
||||||
|
WithGroup(func(q *dbent.GroupQuery) {
|
||||||
|
q.Select(
|
||||||
|
group.FieldID,
|
||||||
|
group.FieldName,
|
||||||
|
group.FieldPlatform,
|
||||||
|
group.FieldStatus,
|
||||||
|
group.FieldSubscriptionType,
|
||||||
|
group.FieldRateMultiplier,
|
||||||
|
group.FieldDailyLimitUsd,
|
||||||
|
group.FieldWeeklyLimitUsd,
|
||||||
|
group.FieldMonthlyLimitUsd,
|
||||||
|
group.FieldImagePrice1k,
|
||||||
|
group.FieldImagePrice2k,
|
||||||
|
group.FieldImagePrice4k,
|
||||||
|
group.FieldClaudeCodeOnly,
|
||||||
|
group.FieldFallbackGroupID,
|
||||||
|
)
|
||||||
|
}).
|
||||||
|
Only(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if dbent.IsNotFound(err) {
|
||||||
|
return nil, service.ErrAPIKeyNotFound
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return apiKeyEntityToService(m), nil
|
||||||
|
}
|
||||||
|
|
||||||
func (r *apiKeyRepository) Update(ctx context.Context, key *service.APIKey) error {
|
func (r *apiKeyRepository) Update(ctx context.Context, key *service.APIKey) error {
|
||||||
// 使用原子操作:将软删除检查与更新合并到同一语句,避免竞态条件。
|
// 使用原子操作:将软删除检查与更新合并到同一语句,避免竞态条件。
|
||||||
// 之前的实现先检查 Exist 再 UpdateOneID,若在两步之间发生软删除,
|
// 之前的实现先检查 Exist 再 UpdateOneID,若在两步之间发生软删除,
|
||||||
@@ -108,6 +166,18 @@ func (r *apiKeyRepository) Update(ctx context.Context, key *service.APIKey) erro
|
|||||||
builder.ClearGroupID()
|
builder.ClearGroupID()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IP 限制字段
|
||||||
|
if len(key.IPWhitelist) > 0 {
|
||||||
|
builder.SetIPWhitelist(key.IPWhitelist)
|
||||||
|
} else {
|
||||||
|
builder.ClearIPWhitelist()
|
||||||
|
}
|
||||||
|
if len(key.IPBlacklist) > 0 {
|
||||||
|
builder.SetIPBlacklist(key.IPBlacklist)
|
||||||
|
} else {
|
||||||
|
builder.ClearIPBlacklist()
|
||||||
|
}
|
||||||
|
|
||||||
affected, err := builder.Save(ctx)
|
affected, err := builder.Save(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -263,19 +333,43 @@ func (r *apiKeyRepository) CountByGroupID(ctx context.Context, groupID int64) (i
|
|||||||
return int64(count), err
|
return int64(count), err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *apiKeyRepository) ListKeysByUserID(ctx context.Context, userID int64) ([]string, error) {
|
||||||
|
keys, err := r.activeQuery().
|
||||||
|
Where(apikey.UserIDEQ(userID)).
|
||||||
|
Select(apikey.FieldKey).
|
||||||
|
Strings(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return keys, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *apiKeyRepository) ListKeysByGroupID(ctx context.Context, groupID int64) ([]string, error) {
|
||||||
|
keys, err := r.activeQuery().
|
||||||
|
Where(apikey.GroupIDEQ(groupID)).
|
||||||
|
Select(apikey.FieldKey).
|
||||||
|
Strings(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return keys, nil
|
||||||
|
}
|
||||||
|
|
||||||
func apiKeyEntityToService(m *dbent.APIKey) *service.APIKey {
|
func apiKeyEntityToService(m *dbent.APIKey) *service.APIKey {
|
||||||
if m == nil {
|
if m == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
out := &service.APIKey{
|
out := &service.APIKey{
|
||||||
ID: m.ID,
|
ID: m.ID,
|
||||||
UserID: m.UserID,
|
UserID: m.UserID,
|
||||||
Key: m.Key,
|
Key: m.Key,
|
||||||
Name: m.Name,
|
Name: m.Name,
|
||||||
Status: m.Status,
|
Status: m.Status,
|
||||||
CreatedAt: m.CreatedAt,
|
IPWhitelist: m.IPWhitelist,
|
||||||
UpdatedAt: m.UpdatedAt,
|
IPBlacklist: m.IPBlacklist,
|
||||||
GroupID: m.GroupID,
|
CreatedAt: m.CreatedAt,
|
||||||
|
UpdatedAt: m.UpdatedAt,
|
||||||
|
GroupID: m.GroupID,
|
||||||
}
|
}
|
||||||
if m.Edges.User != nil {
|
if m.Edges.User != nil {
|
||||||
out.User = userEntityToService(m.Edges.User)
|
out.User = userEntityToService(m.Edges.User)
|
||||||
@@ -317,6 +411,7 @@ func groupEntityToService(g *dbent.Group) *service.Group {
|
|||||||
RateMultiplier: g.RateMultiplier,
|
RateMultiplier: g.RateMultiplier,
|
||||||
IsExclusive: g.IsExclusive,
|
IsExclusive: g.IsExclusive,
|
||||||
Status: g.Status,
|
Status: g.Status,
|
||||||
|
Hydrated: true,
|
||||||
SubscriptionType: g.SubscriptionType,
|
SubscriptionType: g.SubscriptionType,
|
||||||
DailyLimitUSD: g.DailyLimitUsd,
|
DailyLimitUSD: g.DailyLimitUsd,
|
||||||
WeeklyLimitUSD: g.WeeklyLimitUsd,
|
WeeklyLimitUSD: g.WeeklyLimitUsd,
|
||||||
|
|||||||
@@ -93,7 +93,7 @@ var (
|
|||||||
return redis.call('ZCARD', key)
|
return redis.call('ZCARD', key)
|
||||||
`)
|
`)
|
||||||
|
|
||||||
// incrementWaitScript - only sets TTL on first creation to avoid refreshing
|
// incrementWaitScript - refreshes TTL on each increment to keep queue depth accurate
|
||||||
// KEYS[1] = wait queue key
|
// KEYS[1] = wait queue key
|
||||||
// ARGV[1] = maxWait
|
// ARGV[1] = maxWait
|
||||||
// ARGV[2] = TTL in seconds
|
// ARGV[2] = TTL in seconds
|
||||||
@@ -111,15 +111,13 @@ var (
|
|||||||
|
|
||||||
local newVal = redis.call('INCR', KEYS[1])
|
local newVal = redis.call('INCR', KEYS[1])
|
||||||
|
|
||||||
-- Only set TTL on first creation to avoid refreshing zombie data
|
-- Refresh TTL so long-running traffic doesn't expire active queue counters.
|
||||||
if newVal == 1 then
|
redis.call('EXPIRE', KEYS[1], ARGV[2])
|
||||||
redis.call('EXPIRE', KEYS[1], ARGV[2])
|
|
||||||
end
|
|
||||||
|
|
||||||
return 1
|
return 1
|
||||||
`)
|
`)
|
||||||
|
|
||||||
// incrementAccountWaitScript - account-level wait queue count
|
// incrementAccountWaitScript - account-level wait queue count (refresh TTL on each increment)
|
||||||
incrementAccountWaitScript = redis.NewScript(`
|
incrementAccountWaitScript = redis.NewScript(`
|
||||||
local current = redis.call('GET', KEYS[1])
|
local current = redis.call('GET', KEYS[1])
|
||||||
if current == false then
|
if current == false then
|
||||||
@@ -134,10 +132,8 @@ var (
|
|||||||
|
|
||||||
local newVal = redis.call('INCR', KEYS[1])
|
local newVal = redis.call('INCR', KEYS[1])
|
||||||
|
|
||||||
-- Only set TTL on first creation to avoid refreshing zombie data
|
-- Refresh TTL so long-running traffic doesn't expire active queue counters.
|
||||||
if newVal == 1 then
|
redis.call('EXPIRE', KEYS[1], ARGV[2])
|
||||||
redis.call('EXPIRE', KEYS[1], ARGV[2])
|
|
||||||
end
|
|
||||||
|
|
||||||
return 1
|
return 1
|
||||||
`)
|
`)
|
||||||
|
|||||||
387
backend/internal/repository/dashboard_aggregation_repo.go
Normal file
387
backend/internal/repository/dashboard_aggregation_repo.go
Normal file
@@ -0,0 +1,387 @@
|
|||||||
|
package repository
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||||
|
"github.com/lib/pq"
|
||||||
|
)
|
||||||
|
|
||||||
|
type dashboardAggregationRepository struct {
|
||||||
|
sql sqlExecutor
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDashboardAggregationRepository 创建仪表盘预聚合仓储。
|
||||||
|
func NewDashboardAggregationRepository(sqlDB *sql.DB) service.DashboardAggregationRepository {
|
||||||
|
if sqlDB == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if !isPostgresDriver(sqlDB) {
|
||||||
|
log.Printf("[DashboardAggregation] 检测到非 PostgreSQL 驱动,已自动禁用预聚合")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return newDashboardAggregationRepositoryWithSQL(sqlDB)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDashboardAggregationRepositoryWithSQL(sqlq sqlExecutor) *dashboardAggregationRepository {
|
||||||
|
return &dashboardAggregationRepository{sql: sqlq}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isPostgresDriver(db *sql.DB) bool {
|
||||||
|
if db == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
_, ok := db.Driver().(*pq.Driver)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *dashboardAggregationRepository) AggregateRange(ctx context.Context, start, end time.Time) error {
|
||||||
|
startUTC := start.UTC()
|
||||||
|
endUTC := end.UTC()
|
||||||
|
if !endUTC.After(startUTC) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
hourStart := startUTC.Truncate(time.Hour)
|
||||||
|
hourEnd := endUTC.Truncate(time.Hour)
|
||||||
|
if endUTC.After(hourEnd) {
|
||||||
|
hourEnd = hourEnd.Add(time.Hour)
|
||||||
|
}
|
||||||
|
|
||||||
|
dayStart := truncateToDayUTC(startUTC)
|
||||||
|
dayEnd := truncateToDayUTC(endUTC)
|
||||||
|
if endUTC.After(dayEnd) {
|
||||||
|
dayEnd = dayEnd.Add(24 * time.Hour)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 以桶边界聚合,允许覆盖 end 所在桶的剩余区间。
|
||||||
|
if err := r.insertHourlyActiveUsers(ctx, hourStart, hourEnd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := r.insertDailyActiveUsers(ctx, hourStart, hourEnd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := r.upsertHourlyAggregates(ctx, hourStart, hourEnd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := r.upsertDailyAggregates(ctx, dayStart, dayEnd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *dashboardAggregationRepository) GetAggregationWatermark(ctx context.Context) (time.Time, error) {
|
||||||
|
var ts time.Time
|
||||||
|
query := "SELECT last_aggregated_at FROM usage_dashboard_aggregation_watermark WHERE id = 1"
|
||||||
|
if err := scanSingleRow(ctx, r.sql, query, nil, &ts); err != nil {
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
return time.Unix(0, 0).UTC(), nil
|
||||||
|
}
|
||||||
|
return time.Time{}, err
|
||||||
|
}
|
||||||
|
return ts.UTC(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *dashboardAggregationRepository) UpdateAggregationWatermark(ctx context.Context, aggregatedAt time.Time) error {
|
||||||
|
query := `
|
||||||
|
INSERT INTO usage_dashboard_aggregation_watermark (id, last_aggregated_at, updated_at)
|
||||||
|
VALUES (1, $1, NOW())
|
||||||
|
ON CONFLICT (id)
|
||||||
|
DO UPDATE SET last_aggregated_at = EXCLUDED.last_aggregated_at, updated_at = EXCLUDED.updated_at
|
||||||
|
`
|
||||||
|
_, err := r.sql.ExecContext(ctx, query, aggregatedAt.UTC())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *dashboardAggregationRepository) CleanupAggregates(ctx context.Context, hourlyCutoff, dailyCutoff time.Time) error {
|
||||||
|
hourlyCutoffUTC := hourlyCutoff.UTC()
|
||||||
|
dailyCutoffUTC := dailyCutoff.UTC()
|
||||||
|
if _, err := r.sql.ExecContext(ctx, "DELETE FROM usage_dashboard_hourly WHERE bucket_start < $1", hourlyCutoffUTC); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := r.sql.ExecContext(ctx, "DELETE FROM usage_dashboard_hourly_users WHERE bucket_start < $1", hourlyCutoffUTC); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := r.sql.ExecContext(ctx, "DELETE FROM usage_dashboard_daily WHERE bucket_date < $1::date", dailyCutoffUTC); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := r.sql.ExecContext(ctx, "DELETE FROM usage_dashboard_daily_users WHERE bucket_date < $1::date", dailyCutoffUTC); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *dashboardAggregationRepository) CleanupUsageLogs(ctx context.Context, cutoff time.Time) error {
|
||||||
|
isPartitioned, err := r.isUsageLogsPartitioned(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if isPartitioned {
|
||||||
|
return r.dropUsageLogsPartitions(ctx, cutoff)
|
||||||
|
}
|
||||||
|
_, err = r.sql.ExecContext(ctx, "DELETE FROM usage_logs WHERE created_at < $1", cutoff.UTC())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *dashboardAggregationRepository) EnsureUsageLogsPartitions(ctx context.Context, now time.Time) error {
|
||||||
|
isPartitioned, err := r.isUsageLogsPartitioned(ctx)
|
||||||
|
if err != nil || !isPartitioned {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
monthStart := truncateToMonthUTC(now)
|
||||||
|
prevMonth := monthStart.AddDate(0, -1, 0)
|
||||||
|
nextMonth := monthStart.AddDate(0, 1, 0)
|
||||||
|
|
||||||
|
for _, m := range []time.Time{prevMonth, monthStart, nextMonth} {
|
||||||
|
if err := r.createUsageLogsPartition(ctx, m); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *dashboardAggregationRepository) insertHourlyActiveUsers(ctx context.Context, start, end time.Time) error {
|
||||||
|
query := `
|
||||||
|
INSERT INTO usage_dashboard_hourly_users (bucket_start, user_id)
|
||||||
|
SELECT DISTINCT
|
||||||
|
date_trunc('hour', created_at AT TIME ZONE 'UTC') AT TIME ZONE 'UTC' AS bucket_start,
|
||||||
|
user_id
|
||||||
|
FROM usage_logs
|
||||||
|
WHERE created_at >= $1 AND created_at < $2
|
||||||
|
ON CONFLICT DO NOTHING
|
||||||
|
`
|
||||||
|
_, err := r.sql.ExecContext(ctx, query, start.UTC(), end.UTC())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *dashboardAggregationRepository) insertDailyActiveUsers(ctx context.Context, start, end time.Time) error {
|
||||||
|
query := `
|
||||||
|
INSERT INTO usage_dashboard_daily_users (bucket_date, user_id)
|
||||||
|
SELECT DISTINCT
|
||||||
|
(bucket_start AT TIME ZONE 'UTC')::date AS bucket_date,
|
||||||
|
user_id
|
||||||
|
FROM usage_dashboard_hourly_users
|
||||||
|
WHERE bucket_start >= $1 AND bucket_start < $2
|
||||||
|
ON CONFLICT DO NOTHING
|
||||||
|
`
|
||||||
|
_, err := r.sql.ExecContext(ctx, query, start.UTC(), end.UTC())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *dashboardAggregationRepository) upsertHourlyAggregates(ctx context.Context, start, end time.Time) error {
|
||||||
|
query := `
|
||||||
|
WITH hourly AS (
|
||||||
|
SELECT
|
||||||
|
date_trunc('hour', created_at AT TIME ZONE 'UTC') AT TIME ZONE 'UTC' AS bucket_start,
|
||||||
|
COUNT(*) AS total_requests,
|
||||||
|
COALESCE(SUM(input_tokens), 0) AS input_tokens,
|
||||||
|
COALESCE(SUM(output_tokens), 0) AS output_tokens,
|
||||||
|
COALESCE(SUM(cache_creation_tokens), 0) AS cache_creation_tokens,
|
||||||
|
COALESCE(SUM(cache_read_tokens), 0) AS cache_read_tokens,
|
||||||
|
COALESCE(SUM(total_cost), 0) AS total_cost,
|
||||||
|
COALESCE(SUM(actual_cost), 0) AS actual_cost,
|
||||||
|
COALESCE(SUM(COALESCE(duration_ms, 0)), 0) AS total_duration_ms
|
||||||
|
FROM usage_logs
|
||||||
|
WHERE created_at >= $1 AND created_at < $2
|
||||||
|
GROUP BY 1
|
||||||
|
),
|
||||||
|
user_counts AS (
|
||||||
|
SELECT bucket_start, COUNT(*) AS active_users
|
||||||
|
FROM usage_dashboard_hourly_users
|
||||||
|
WHERE bucket_start >= $1 AND bucket_start < $2
|
||||||
|
GROUP BY bucket_start
|
||||||
|
)
|
||||||
|
INSERT INTO usage_dashboard_hourly (
|
||||||
|
bucket_start,
|
||||||
|
total_requests,
|
||||||
|
input_tokens,
|
||||||
|
output_tokens,
|
||||||
|
cache_creation_tokens,
|
||||||
|
cache_read_tokens,
|
||||||
|
total_cost,
|
||||||
|
actual_cost,
|
||||||
|
total_duration_ms,
|
||||||
|
active_users,
|
||||||
|
computed_at
|
||||||
|
)
|
||||||
|
SELECT
|
||||||
|
hourly.bucket_start,
|
||||||
|
hourly.total_requests,
|
||||||
|
hourly.input_tokens,
|
||||||
|
hourly.output_tokens,
|
||||||
|
hourly.cache_creation_tokens,
|
||||||
|
hourly.cache_read_tokens,
|
||||||
|
hourly.total_cost,
|
||||||
|
hourly.actual_cost,
|
||||||
|
hourly.total_duration_ms,
|
||||||
|
COALESCE(user_counts.active_users, 0) AS active_users,
|
||||||
|
NOW()
|
||||||
|
FROM hourly
|
||||||
|
LEFT JOIN user_counts ON user_counts.bucket_start = hourly.bucket_start
|
||||||
|
ON CONFLICT (bucket_start)
|
||||||
|
DO UPDATE SET
|
||||||
|
total_requests = EXCLUDED.total_requests,
|
||||||
|
input_tokens = EXCLUDED.input_tokens,
|
||||||
|
output_tokens = EXCLUDED.output_tokens,
|
||||||
|
cache_creation_tokens = EXCLUDED.cache_creation_tokens,
|
||||||
|
cache_read_tokens = EXCLUDED.cache_read_tokens,
|
||||||
|
total_cost = EXCLUDED.total_cost,
|
||||||
|
actual_cost = EXCLUDED.actual_cost,
|
||||||
|
total_duration_ms = EXCLUDED.total_duration_ms,
|
||||||
|
active_users = EXCLUDED.active_users,
|
||||||
|
computed_at = EXCLUDED.computed_at
|
||||||
|
`
|
||||||
|
_, err := r.sql.ExecContext(ctx, query, start.UTC(), end.UTC())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *dashboardAggregationRepository) upsertDailyAggregates(ctx context.Context, start, end time.Time) error {
|
||||||
|
query := `
|
||||||
|
WITH daily AS (
|
||||||
|
SELECT
|
||||||
|
(bucket_start AT TIME ZONE 'UTC')::date AS bucket_date,
|
||||||
|
COALESCE(SUM(total_requests), 0) AS total_requests,
|
||||||
|
COALESCE(SUM(input_tokens), 0) AS input_tokens,
|
||||||
|
COALESCE(SUM(output_tokens), 0) AS output_tokens,
|
||||||
|
COALESCE(SUM(cache_creation_tokens), 0) AS cache_creation_tokens,
|
||||||
|
COALESCE(SUM(cache_read_tokens), 0) AS cache_read_tokens,
|
||||||
|
COALESCE(SUM(total_cost), 0) AS total_cost,
|
||||||
|
COALESCE(SUM(actual_cost), 0) AS actual_cost,
|
||||||
|
COALESCE(SUM(total_duration_ms), 0) AS total_duration_ms
|
||||||
|
FROM usage_dashboard_hourly
|
||||||
|
WHERE bucket_start >= $1 AND bucket_start < $2
|
||||||
|
GROUP BY (bucket_start AT TIME ZONE 'UTC')::date
|
||||||
|
),
|
||||||
|
user_counts AS (
|
||||||
|
SELECT bucket_date, COUNT(*) AS active_users
|
||||||
|
FROM usage_dashboard_daily_users
|
||||||
|
WHERE bucket_date >= $3::date AND bucket_date < $4::date
|
||||||
|
GROUP BY bucket_date
|
||||||
|
)
|
||||||
|
INSERT INTO usage_dashboard_daily (
|
||||||
|
bucket_date,
|
||||||
|
total_requests,
|
||||||
|
input_tokens,
|
||||||
|
output_tokens,
|
||||||
|
cache_creation_tokens,
|
||||||
|
cache_read_tokens,
|
||||||
|
total_cost,
|
||||||
|
actual_cost,
|
||||||
|
total_duration_ms,
|
||||||
|
active_users,
|
||||||
|
computed_at
|
||||||
|
)
|
||||||
|
SELECT
|
||||||
|
daily.bucket_date,
|
||||||
|
daily.total_requests,
|
||||||
|
daily.input_tokens,
|
||||||
|
daily.output_tokens,
|
||||||
|
daily.cache_creation_tokens,
|
||||||
|
daily.cache_read_tokens,
|
||||||
|
daily.total_cost,
|
||||||
|
daily.actual_cost,
|
||||||
|
daily.total_duration_ms,
|
||||||
|
COALESCE(user_counts.active_users, 0) AS active_users,
|
||||||
|
NOW()
|
||||||
|
FROM daily
|
||||||
|
LEFT JOIN user_counts ON user_counts.bucket_date = daily.bucket_date
|
||||||
|
ON CONFLICT (bucket_date)
|
||||||
|
DO UPDATE SET
|
||||||
|
total_requests = EXCLUDED.total_requests,
|
||||||
|
input_tokens = EXCLUDED.input_tokens,
|
||||||
|
output_tokens = EXCLUDED.output_tokens,
|
||||||
|
cache_creation_tokens = EXCLUDED.cache_creation_tokens,
|
||||||
|
cache_read_tokens = EXCLUDED.cache_read_tokens,
|
||||||
|
total_cost = EXCLUDED.total_cost,
|
||||||
|
actual_cost = EXCLUDED.actual_cost,
|
||||||
|
total_duration_ms = EXCLUDED.total_duration_ms,
|
||||||
|
active_users = EXCLUDED.active_users,
|
||||||
|
computed_at = EXCLUDED.computed_at
|
||||||
|
`
|
||||||
|
_, err := r.sql.ExecContext(ctx, query, start.UTC(), end.UTC(), start.UTC(), end.UTC())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *dashboardAggregationRepository) isUsageLogsPartitioned(ctx context.Context) (bool, error) {
|
||||||
|
query := `
|
||||||
|
SELECT EXISTS(
|
||||||
|
SELECT 1
|
||||||
|
FROM pg_partitioned_table pt
|
||||||
|
JOIN pg_class c ON c.oid = pt.partrelid
|
||||||
|
WHERE c.relname = 'usage_logs'
|
||||||
|
)
|
||||||
|
`
|
||||||
|
var partitioned bool
|
||||||
|
if err := scanSingleRow(ctx, r.sql, query, nil, &partitioned); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return partitioned, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *dashboardAggregationRepository) dropUsageLogsPartitions(ctx context.Context, cutoff time.Time) error {
|
||||||
|
rows, err := r.sql.QueryContext(ctx, `
|
||||||
|
SELECT c.relname
|
||||||
|
FROM pg_inherits
|
||||||
|
JOIN pg_class c ON c.oid = pg_inherits.inhrelid
|
||||||
|
JOIN pg_class p ON p.oid = pg_inherits.inhparent
|
||||||
|
WHERE p.relname = 'usage_logs'
|
||||||
|
`)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
_ = rows.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
cutoffMonth := truncateToMonthUTC(cutoff)
|
||||||
|
for rows.Next() {
|
||||||
|
var name string
|
||||||
|
if err := rows.Scan(&name); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !strings.HasPrefix(name, "usage_logs_") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
suffix := strings.TrimPrefix(name, "usage_logs_")
|
||||||
|
month, err := time.Parse("200601", suffix)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
month = month.UTC()
|
||||||
|
if month.Before(cutoffMonth) {
|
||||||
|
if _, err := r.sql.ExecContext(ctx, fmt.Sprintf("DROP TABLE IF EXISTS %s", pq.QuoteIdentifier(name))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return rows.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *dashboardAggregationRepository) createUsageLogsPartition(ctx context.Context, month time.Time) error {
|
||||||
|
monthStart := truncateToMonthUTC(month)
|
||||||
|
nextMonth := monthStart.AddDate(0, 1, 0)
|
||||||
|
name := fmt.Sprintf("usage_logs_%s", monthStart.Format("200601"))
|
||||||
|
query := fmt.Sprintf(
|
||||||
|
"CREATE TABLE IF NOT EXISTS %s PARTITION OF usage_logs FOR VALUES FROM (%s) TO (%s)",
|
||||||
|
pq.QuoteIdentifier(name),
|
||||||
|
pq.QuoteLiteral(monthStart.Format("2006-01-02")),
|
||||||
|
pq.QuoteLiteral(nextMonth.Format("2006-01-02")),
|
||||||
|
)
|
||||||
|
_, err := r.sql.ExecContext(ctx, query)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func truncateToDayUTC(t time.Time) time.Time {
|
||||||
|
t = t.UTC()
|
||||||
|
return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, time.UTC)
|
||||||
|
}
|
||||||
|
|
||||||
|
func truncateToMonthUTC(t time.Time) time.Time {
|
||||||
|
t = t.UTC()
|
||||||
|
return time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, time.UTC)
|
||||||
|
}
|
||||||
58
backend/internal/repository/dashboard_cache.go
Normal file
58
backend/internal/repository/dashboard_cache.go
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
package repository
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/config"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||||
|
"github.com/redis/go-redis/v9"
|
||||||
|
)
|
||||||
|
|
||||||
|
const dashboardStatsCacheKey = "dashboard:stats:v1"
|
||||||
|
|
||||||
|
type dashboardCache struct {
|
||||||
|
rdb *redis.Client
|
||||||
|
keyPrefix string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDashboardCache(rdb *redis.Client, cfg *config.Config) service.DashboardStatsCache {
|
||||||
|
prefix := "sub2api:"
|
||||||
|
if cfg != nil {
|
||||||
|
prefix = strings.TrimSpace(cfg.Dashboard.KeyPrefix)
|
||||||
|
}
|
||||||
|
if prefix != "" && !strings.HasSuffix(prefix, ":") {
|
||||||
|
prefix += ":"
|
||||||
|
}
|
||||||
|
return &dashboardCache{
|
||||||
|
rdb: rdb,
|
||||||
|
keyPrefix: prefix,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *dashboardCache) GetDashboardStats(ctx context.Context) (string, error) {
|
||||||
|
val, err := c.rdb.Get(ctx, c.buildKey()).Result()
|
||||||
|
if err != nil {
|
||||||
|
if err == redis.Nil {
|
||||||
|
return "", service.ErrDashboardStatsCacheMiss
|
||||||
|
}
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *dashboardCache) SetDashboardStats(ctx context.Context, data string, ttl time.Duration) error {
|
||||||
|
return c.rdb.Set(ctx, c.buildKey(), data, ttl).Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *dashboardCache) buildKey() string {
|
||||||
|
if c.keyPrefix == "" {
|
||||||
|
return dashboardStatsCacheKey
|
||||||
|
}
|
||||||
|
return c.keyPrefix + dashboardStatsCacheKey
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *dashboardCache) DeleteDashboardStats(ctx context.Context) error {
|
||||||
|
return c.rdb.Del(ctx, c.buildKey()).Err()
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user