mirror of
https://gitee.com/wanwujie/sub2api
synced 2026-05-04 21:20:51 +08:00
Compare commits
102 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a0b5e5bfa0 | ||
|
|
41d0657330 | ||
|
|
1a0cabbfd6 | ||
|
|
9b6dcc57bd | ||
|
|
b17704d6ef | ||
|
|
496469ac4e | ||
|
|
c1b52615be | ||
|
|
3af9940b85 | ||
|
|
22b1277572 | ||
|
|
aff98d5ae1 | ||
|
|
4e1bb2b445 | ||
|
|
dac6e52091 | ||
|
|
8987e0ba67 | ||
|
|
9d1751ec57 | ||
|
|
5d1c12e60e | ||
|
|
5b63a9b02d | ||
|
|
641e61073f | ||
|
|
095f457c57 | ||
|
|
1e57e88e43 | ||
|
|
b95ffce244 | ||
|
|
8f28a834f8 | ||
|
|
7424c73b05 | ||
|
|
1afd81b019 | ||
|
|
732d6495ea | ||
|
|
6d20ab8082 | ||
|
|
aa8ee33b0a | ||
|
|
5f630fbb19 | ||
|
|
bdbd2916f5 | ||
|
|
6dc89765fd | ||
|
|
f3233db01f | ||
|
|
6e12578bc5 | ||
|
|
a25faecadd | ||
|
|
5862e2d8d9 | ||
|
|
66d6454535 | ||
|
|
165553cfb0 | ||
|
|
b5467d610a | ||
|
|
57ff97960d | ||
|
|
5b5db88550 | ||
|
|
f03de00cb9 | ||
|
|
76aae5aa74 | ||
|
|
27ee141c1e | ||
|
|
e65574dea9 | ||
|
|
1ce9dc03f9 | ||
|
|
15ce914a62 | ||
|
|
959af1c8f6 | ||
|
|
c4d496da18 | ||
|
|
f3ea878ba2 | ||
|
|
d80469ea35 | ||
|
|
5fc30ea964 | ||
|
|
f68909a68b | ||
|
|
d162604f32 | ||
|
|
a4e329c18b | ||
|
|
ca204ddd2f | ||
|
|
ff08f9d798 | ||
|
|
ac11473833 | ||
|
|
09fd83ab9b | ||
|
|
6699d33760 | ||
|
|
f7c8377abf | ||
|
|
0dcc0e0504 | ||
|
|
5f41899705 | ||
|
|
5e060b2222 | ||
|
|
6f04c25e3d | ||
|
|
375cce29c6 | ||
|
|
67518a59ac | ||
|
|
a3ea8ecac5 | ||
|
|
497872693f | ||
|
|
748a84d871 | ||
|
|
d5dac84e12 | ||
|
|
75e1b40fb4 | ||
|
|
5eedf782f4 | ||
|
|
1949425ab9 | ||
|
|
0a80ec80e3 | ||
|
|
1f81b77911 | ||
|
|
6cd7c60549 | ||
|
|
25a5035503 | ||
|
|
9dae6c7aee | ||
|
|
ff4ef1b574 | ||
|
|
84b03efa0b | ||
|
|
3cdd5754df | ||
|
|
800802b8aa | ||
|
|
9ba42aa556 | ||
|
|
59290e39f9 | ||
|
|
4a3652ec09 | ||
|
|
375aefa209 | ||
|
|
88decb6e0c | ||
|
|
365ef1fdf7 | ||
|
|
654cfb6480 | ||
|
|
c46744f366 | ||
|
|
c2f9ad7a21 | ||
|
|
e1193212b5 | ||
|
|
a7415d4d2e | ||
|
|
6925ac25c4 | ||
|
|
a296425994 | ||
|
|
0c48f08f5c | ||
|
|
b363bff1d8 | ||
|
|
ef6ec8a15a | ||
|
|
8cf83c984e | ||
|
|
ba98243cc2 | ||
|
|
0d01bd908e | ||
|
|
7da5124067 | ||
|
|
a1425b457d | ||
|
|
20a4e41872 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,4 +1,5 @@
|
||||
docs/claude-relay-service/
|
||||
.codex
|
||||
|
||||
# ===================
|
||||
# Go 后端
|
||||
|
||||
@@ -33,7 +33,7 @@ func main() {
|
||||
}()
|
||||
|
||||
userRepo := repository.NewUserRepository(client, sqlDB)
|
||||
authService := service.NewAuthService(client, userRepo, nil, nil, cfg, nil, nil, nil, nil, nil, nil)
|
||||
authService := service.NewAuthService(client, userRepo, nil, nil, cfg, nil, nil, nil, nil, nil, nil, nil)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
@@ -1 +1 @@
|
||||
0.1.115
|
||||
0.1.118
|
||||
|
||||
@@ -97,6 +97,7 @@ func provideCleanup(
|
||||
scheduledTestRunner *service.ScheduledTestRunnerService,
|
||||
backupSvc *service.BackupService,
|
||||
paymentOrderExpiry *service.PaymentOrderExpiryService,
|
||||
channelMonitorRunner *service.ChannelMonitorRunner,
|
||||
) func() {
|
||||
return func() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
@@ -239,6 +240,12 @@ func provideCleanup(
|
||||
}
|
||||
return nil
|
||||
}},
|
||||
{"ChannelMonitorRunner", func() error {
|
||||
if channelMonitorRunner != nil {
|
||||
channelMonitorRunner.Stop()
|
||||
}
|
||||
return nil
|
||||
}},
|
||||
}
|
||||
|
||||
infraSteps := []cleanupStep{
|
||||
|
||||
@@ -69,7 +69,9 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
||||
apiKeyAuthCacheInvalidator := service.ProvideAPIKeyAuthCacheInvalidator(apiKeyService)
|
||||
promoService := service.NewPromoService(promoCodeRepository, userRepository, billingCacheService, client, apiKeyAuthCacheInvalidator)
|
||||
subscriptionService := service.NewSubscriptionService(groupRepository, userSubscriptionRepository, billingCacheService, client, configConfig)
|
||||
authService := service.NewAuthService(client, userRepository, redeemCodeRepository, refreshTokenCache, configConfig, settingService, emailService, turnstileService, emailQueueService, promoService, subscriptionService)
|
||||
affiliateRepository := repository.NewAffiliateRepository(client, db)
|
||||
affiliateService := service.NewAffiliateService(affiliateRepository, settingService, apiKeyAuthCacheInvalidator, billingCacheService)
|
||||
authService := service.NewAuthService(client, userRepository, redeemCodeRepository, refreshTokenCache, configConfig, settingService, emailService, turnstileService, emailQueueService, promoService, subscriptionService, affiliateService)
|
||||
userService := service.NewUserService(userRepository, settingRepository, apiKeyAuthCacheInvalidator, billingCache)
|
||||
redeemCache := repository.NewRedeemCache(redisClient)
|
||||
redeemService := service.NewRedeemService(redeemCodeRepository, userRepository, subscriptionService, redeemCache, billingCacheService, client, apiKeyAuthCacheInvalidator)
|
||||
@@ -80,7 +82,7 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
||||
totpCache := repository.NewTotpCache(redisClient)
|
||||
totpService := service.NewTotpService(userRepository, secretEncryptor, totpCache, settingService, emailService, emailQueueService)
|
||||
authHandler := handler.NewAuthHandler(configConfig, authService, userService, settingService, promoService, redeemService, totpService)
|
||||
userHandler := handler.NewUserHandler(userService, authService, emailService, emailCache)
|
||||
userHandler := handler.NewUserHandler(userService, authService, emailService, emailCache, affiliateService)
|
||||
apiKeyHandler := handler.NewAPIKeyHandler(apiKeyService)
|
||||
usageLogRepository := repository.NewUsageLogRepository(client, db)
|
||||
usageService := service.NewUsageService(usageLogRepository, userRepository, client, apiKeyAuthCacheInvalidator)
|
||||
@@ -91,6 +93,9 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
||||
announcementReadRepository := repository.NewAnnouncementReadRepository(client)
|
||||
announcementService := service.NewAnnouncementService(announcementRepository, announcementReadRepository, userRepository, userSubscriptionRepository)
|
||||
announcementHandler := handler.NewAnnouncementHandler(announcementService)
|
||||
channelMonitorRepository := repository.NewChannelMonitorRepository(client, db)
|
||||
channelMonitorService := service.ProvideChannelMonitorService(channelMonitorRepository, secretEncryptor)
|
||||
channelMonitorUserHandler := handler.NewChannelMonitorUserHandler(channelMonitorService, settingService)
|
||||
dashboardAggregationRepository := repository.NewDashboardAggregationRepository(db)
|
||||
dashboardStatsCache := repository.NewDashboardCache(redisClient, configConfig)
|
||||
dashboardService := service.NewDashboardService(usageLogRepository, dashboardAggregationRepository, dashboardStatsCache, configConfig)
|
||||
@@ -176,7 +181,7 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
||||
claudeTokenProvider := service.ProvideClaudeTokenProvider(accountRepository, geminiTokenCache, oAuthService, oAuthRefreshAPI)
|
||||
digestSessionStore := service.NewDigestSessionStore()
|
||||
channelRepository := repository.NewChannelRepository(db)
|
||||
channelService := service.NewChannelService(channelRepository, apiKeyAuthCacheInvalidator)
|
||||
channelService := service.NewChannelService(channelRepository, groupRepository, apiKeyAuthCacheInvalidator, pricingService)
|
||||
modelPricingResolver := service.NewModelPricingResolver(channelService, billingService)
|
||||
balanceNotifyService := service.ProvideBalanceNotifyService(emailService, settingRepository, accountRepository)
|
||||
gatewayService := service.NewGatewayService(accountRepository, groupRepository, usageLogRepository, usageBillingRepository, userRepository, userSubscriptionRepository, userGroupRateRepository, gatewayCache, configConfig, schedulerSnapshotService, concurrencyService, billingService, rateLimitService, billingCacheService, identityService, httpUpstream, deferredService, claudeTokenProvider, sessionLimitCache, rpmCache, digestSessionStore, settingService, tlsFingerprintProfileService, channelService, modelPricingResolver, balanceNotifyService)
|
||||
@@ -192,7 +197,7 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
||||
paymentConfigService := service.ProvidePaymentConfigService(client, settingRepository, encryptionKey)
|
||||
registry := payment.ProvideRegistry()
|
||||
defaultLoadBalancer := payment.ProvideDefaultLoadBalancer(client, encryptionKey)
|
||||
paymentService := service.NewPaymentService(client, registry, defaultLoadBalancer, redeemService, subscriptionService, paymentConfigService, userRepository, groupRepository)
|
||||
paymentService := service.NewPaymentService(client, registry, defaultLoadBalancer, redeemService, subscriptionService, paymentConfigService, userRepository, groupRepository, affiliateService)
|
||||
settingHandler := admin.NewSettingHandler(settingService, emailService, turnstileService, opsService, paymentConfigService, paymentService)
|
||||
opsHandler := admin.NewOpsHandler(opsService)
|
||||
updateCache := repository.NewUpdateCache(redisClient)
|
||||
@@ -221,8 +226,13 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
||||
scheduledTestService := service.ProvideScheduledTestService(scheduledTestPlanRepository, scheduledTestResultRepository)
|
||||
scheduledTestHandler := admin.NewScheduledTestHandler(scheduledTestService)
|
||||
channelHandler := admin.NewChannelHandler(channelService, billingService)
|
||||
channelMonitorHandler := admin.NewChannelMonitorHandler(channelMonitorService)
|
||||
channelMonitorRequestTemplateRepository := repository.NewChannelMonitorRequestTemplateRepository(client, db)
|
||||
channelMonitorRequestTemplateService := service.NewChannelMonitorRequestTemplateService(channelMonitorRequestTemplateRepository)
|
||||
channelMonitorRequestTemplateHandler := admin.NewChannelMonitorRequestTemplateHandler(channelMonitorRequestTemplateService)
|
||||
paymentHandler := admin.NewPaymentHandler(paymentService, paymentConfigService)
|
||||
adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, adminAnnouncementHandler, dataManagementHandler, backupHandler, oAuthHandler, openAIOAuthHandler, geminiOAuthHandler, antigravityOAuthHandler, proxyHandler, adminRedeemHandler, promoHandler, settingHandler, opsHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler, userAttributeHandler, errorPassthroughHandler, tlsFingerprintProfileHandler, adminAPIKeyHandler, scheduledTestHandler, channelHandler, paymentHandler)
|
||||
affiliateHandler := admin.NewAffiliateHandler(affiliateService, adminService)
|
||||
adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, adminAnnouncementHandler, dataManagementHandler, backupHandler, oAuthHandler, openAIOAuthHandler, geminiOAuthHandler, antigravityOAuthHandler, proxyHandler, adminRedeemHandler, promoHandler, settingHandler, opsHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler, userAttributeHandler, errorPassthroughHandler, tlsFingerprintProfileHandler, adminAPIKeyHandler, scheduledTestHandler, channelHandler, channelMonitorHandler, channelMonitorRequestTemplateHandler, paymentHandler, affiliateHandler)
|
||||
usageRecordWorkerPool := service.NewUsageRecordWorkerPool(configConfig)
|
||||
userMsgQueueCache := repository.NewUserMsgQueueCache(redisClient)
|
||||
userMessageQueueService := service.ProvideUserMessageQueueService(userMsgQueueCache, rpmCache, configConfig)
|
||||
@@ -232,9 +242,10 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
||||
totpHandler := handler.NewTotpHandler(totpService)
|
||||
handlerPaymentHandler := handler.NewPaymentHandler(paymentService, paymentConfigService, channelService)
|
||||
paymentWebhookHandler := handler.NewPaymentWebhookHandler(paymentService, registry)
|
||||
availableChannelHandler := handler.NewAvailableChannelHandler(channelService, apiKeyService, settingService)
|
||||
idempotencyCoordinator := service.ProvideIdempotencyCoordinator(idempotencyRepository, configConfig)
|
||||
idempotencyCleanupService := service.ProvideIdempotencyCleanupService(idempotencyRepository, configConfig)
|
||||
handlers := handler.ProvideHandlers(authHandler, userHandler, apiKeyHandler, usageHandler, redeemHandler, subscriptionHandler, announcementHandler, adminHandlers, gatewayHandler, openAIGatewayHandler, handlerSettingHandler, totpHandler, handlerPaymentHandler, paymentWebhookHandler, idempotencyCoordinator, idempotencyCleanupService)
|
||||
handlers := handler.ProvideHandlers(authHandler, userHandler, apiKeyHandler, usageHandler, redeemHandler, subscriptionHandler, announcementHandler, channelMonitorUserHandler, adminHandlers, gatewayHandler, openAIGatewayHandler, handlerSettingHandler, totpHandler, handlerPaymentHandler, paymentWebhookHandler, availableChannelHandler, idempotencyCoordinator, idempotencyCleanupService)
|
||||
jwtAuthMiddleware := middleware.NewJWTAuthMiddleware(authService, userService)
|
||||
adminAuthMiddleware := middleware.NewAdminAuthMiddleware(authService, userService, settingService)
|
||||
apiKeyAuthMiddleware := middleware.NewAPIKeyAuthMiddleware(apiKeyService, subscriptionService, configConfig)
|
||||
@@ -243,14 +254,15 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
||||
opsMetricsCollector := service.ProvideOpsMetricsCollector(opsRepository, settingRepository, accountRepository, concurrencyService, db, redisClient, configConfig)
|
||||
opsAggregationService := service.ProvideOpsAggregationService(opsRepository, settingRepository, db, redisClient, configConfig)
|
||||
opsAlertEvaluatorService := service.ProvideOpsAlertEvaluatorService(opsService, opsRepository, emailService, redisClient, configConfig)
|
||||
opsCleanupService := service.ProvideOpsCleanupService(opsRepository, db, redisClient, configConfig)
|
||||
opsCleanupService := service.ProvideOpsCleanupService(opsRepository, db, redisClient, configConfig, channelMonitorService)
|
||||
opsScheduledReportService := service.ProvideOpsScheduledReportService(opsService, userService, emailService, redisClient, configConfig)
|
||||
tokenRefreshService := service.ProvideTokenRefreshService(accountRepository, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, compositeTokenCacheInvalidator, schedulerCache, configConfig, tempUnschedCache, privacyClientFactory, proxyRepository, oAuthRefreshAPI)
|
||||
accountExpiryService := service.ProvideAccountExpiryService(accountRepository)
|
||||
subscriptionExpiryService := service.ProvideSubscriptionExpiryService(userSubscriptionRepository)
|
||||
scheduledTestRunnerService := service.ProvideScheduledTestRunnerService(scheduledTestPlanRepository, scheduledTestService, accountTestService, rateLimitService, configConfig)
|
||||
paymentOrderExpiryService := service.ProvidePaymentOrderExpiryService(paymentService)
|
||||
v := provideCleanup(client, redisClient, opsMetricsCollector, opsAggregationService, opsAlertEvaluatorService, opsCleanupService, opsScheduledReportService, opsSystemLogSink, schedulerSnapshotService, tokenRefreshService, accountExpiryService, subscriptionExpiryService, usageCleanupService, idempotencyCleanupService, pricingService, emailQueueService, billingCacheService, usageRecordWorkerPool, subscriptionService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, openAIGatewayService, scheduledTestRunnerService, backupService, paymentOrderExpiryService)
|
||||
channelMonitorRunner := service.ProvideChannelMonitorRunner(channelMonitorService, settingService)
|
||||
v := provideCleanup(client, redisClient, opsMetricsCollector, opsAggregationService, opsAlertEvaluatorService, opsCleanupService, opsScheduledReportService, opsSystemLogSink, schedulerSnapshotService, tokenRefreshService, accountExpiryService, subscriptionExpiryService, usageCleanupService, idempotencyCleanupService, pricingService, emailQueueService, billingCacheService, usageRecordWorkerPool, subscriptionService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, openAIGatewayService, scheduledTestRunnerService, backupService, paymentOrderExpiryService, channelMonitorRunner)
|
||||
application := &Application{
|
||||
Server: httpServer,
|
||||
Cleanup: v,
|
||||
@@ -304,6 +316,7 @@ func provideCleanup(
|
||||
scheduledTestRunner *service.ScheduledTestRunnerService,
|
||||
backupSvc *service.BackupService,
|
||||
paymentOrderExpiry *service.PaymentOrderExpiryService,
|
||||
channelMonitorRunner *service.ChannelMonitorRunner,
|
||||
) func() {
|
||||
return func() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
@@ -445,6 +458,12 @@ func provideCleanup(
|
||||
}
|
||||
return nil
|
||||
}},
|
||||
{"ChannelMonitorRunner", func() error {
|
||||
if channelMonitorRunner != nil {
|
||||
channelMonitorRunner.Stop()
|
||||
}
|
||||
return nil
|
||||
}},
|
||||
}
|
||||
|
||||
infraSteps := []cleanupStep{
|
||||
|
||||
@@ -76,6 +76,7 @@ func TestProvideCleanup_WithMinimalDependencies_NoPanic(t *testing.T) {
|
||||
nil, // scheduledTestRunner
|
||||
nil, // backupSvc
|
||||
nil, // paymentOrderExpiry
|
||||
nil, // channelMonitorRunner
|
||||
)
|
||||
|
||||
require.NotPanics(t, func() {
|
||||
|
||||
359
backend/ent/channelmonitor.go
Normal file
359
backend/ent/channelmonitor.go
Normal file
@@ -0,0 +1,359 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorrequesttemplate"
|
||||
)
|
||||
|
||||
// ChannelMonitor is the model entity for the ChannelMonitor schema.
|
||||
type ChannelMonitor struct {
|
||||
config `json:"-"`
|
||||
// ID of the ent.
|
||||
ID int64 `json:"id,omitempty"`
|
||||
// CreatedAt holds the value of the "created_at" field.
|
||||
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// UpdatedAt holds the value of the "updated_at" field.
|
||||
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||
// Name holds the value of the "name" field.
|
||||
Name string `json:"name,omitempty"`
|
||||
// Provider holds the value of the "provider" field.
|
||||
Provider channelmonitor.Provider `json:"provider,omitempty"`
|
||||
// Provider base origin, e.g. https://api.openai.com
|
||||
Endpoint string `json:"endpoint,omitempty"`
|
||||
// AES-256-GCM encrypted API key
|
||||
APIKeyEncrypted string `json:"-"`
|
||||
// PrimaryModel holds the value of the "primary_model" field.
|
||||
PrimaryModel string `json:"primary_model,omitempty"`
|
||||
// Additional model names to test alongside primary_model
|
||||
ExtraModels []string `json:"extra_models,omitempty"`
|
||||
// GroupName holds the value of the "group_name" field.
|
||||
GroupName string `json:"group_name,omitempty"`
|
||||
// Enabled holds the value of the "enabled" field.
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
// IntervalSeconds holds the value of the "interval_seconds" field.
|
||||
IntervalSeconds int `json:"interval_seconds,omitempty"`
|
||||
// LastCheckedAt holds the value of the "last_checked_at" field.
|
||||
LastCheckedAt *time.Time `json:"last_checked_at,omitempty"`
|
||||
// CreatedBy holds the value of the "created_by" field.
|
||||
CreatedBy int64 `json:"created_by,omitempty"`
|
||||
// TemplateID holds the value of the "template_id" field.
|
||||
TemplateID *int64 `json:"template_id,omitempty"`
|
||||
// ExtraHeaders holds the value of the "extra_headers" field.
|
||||
ExtraHeaders map[string]string `json:"extra_headers,omitempty"`
|
||||
// BodyOverrideMode holds the value of the "body_override_mode" field.
|
||||
BodyOverrideMode string `json:"body_override_mode,omitempty"`
|
||||
// BodyOverride holds the value of the "body_override" field.
|
||||
BodyOverride map[string]interface{} `json:"body_override,omitempty"`
|
||||
// Edges holds the relations/edges for other nodes in the graph.
|
||||
// The values are being populated by the ChannelMonitorQuery when eager-loading is set.
|
||||
Edges ChannelMonitorEdges `json:"edges"`
|
||||
selectValues sql.SelectValues
|
||||
}
|
||||
|
||||
// ChannelMonitorEdges holds the relations/edges for other nodes in the graph.
|
||||
type ChannelMonitorEdges struct {
|
||||
// History holds the value of the history edge.
|
||||
History []*ChannelMonitorHistory `json:"history,omitempty"`
|
||||
// DailyRollups holds the value of the daily_rollups edge.
|
||||
DailyRollups []*ChannelMonitorDailyRollup `json:"daily_rollups,omitempty"`
|
||||
// RequestTemplate holds the value of the request_template edge.
|
||||
RequestTemplate *ChannelMonitorRequestTemplate `json:"request_template,omitempty"`
|
||||
// loadedTypes holds the information for reporting if a
|
||||
// type was loaded (or requested) in eager-loading or not.
|
||||
loadedTypes [3]bool
|
||||
}
|
||||
|
||||
// HistoryOrErr returns the History value or an error if the edge
|
||||
// was not loaded in eager-loading.
|
||||
func (e ChannelMonitorEdges) HistoryOrErr() ([]*ChannelMonitorHistory, error) {
|
||||
if e.loadedTypes[0] {
|
||||
return e.History, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "history"}
|
||||
}
|
||||
|
||||
// DailyRollupsOrErr returns the DailyRollups value or an error if the edge
|
||||
// was not loaded in eager-loading.
|
||||
func (e ChannelMonitorEdges) DailyRollupsOrErr() ([]*ChannelMonitorDailyRollup, error) {
|
||||
if e.loadedTypes[1] {
|
||||
return e.DailyRollups, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "daily_rollups"}
|
||||
}
|
||||
|
||||
// RequestTemplateOrErr returns the RequestTemplate value or an error if the edge
|
||||
// was not loaded in eager-loading, or loaded but was not found.
|
||||
func (e ChannelMonitorEdges) RequestTemplateOrErr() (*ChannelMonitorRequestTemplate, error) {
|
||||
if e.RequestTemplate != nil {
|
||||
return e.RequestTemplate, nil
|
||||
} else if e.loadedTypes[2] {
|
||||
return nil, &NotFoundError{label: channelmonitorrequesttemplate.Label}
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "request_template"}
|
||||
}
|
||||
|
||||
// scanValues returns the types for scanning values from sql.Rows.
|
||||
func (*ChannelMonitor) scanValues(columns []string) ([]any, error) {
|
||||
values := make([]any, len(columns))
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case channelmonitor.FieldExtraModels, channelmonitor.FieldExtraHeaders, channelmonitor.FieldBodyOverride:
|
||||
values[i] = new([]byte)
|
||||
case channelmonitor.FieldEnabled:
|
||||
values[i] = new(sql.NullBool)
|
||||
case channelmonitor.FieldID, channelmonitor.FieldIntervalSeconds, channelmonitor.FieldCreatedBy, channelmonitor.FieldTemplateID:
|
||||
values[i] = new(sql.NullInt64)
|
||||
case channelmonitor.FieldName, channelmonitor.FieldProvider, channelmonitor.FieldEndpoint, channelmonitor.FieldAPIKeyEncrypted, channelmonitor.FieldPrimaryModel, channelmonitor.FieldGroupName, channelmonitor.FieldBodyOverrideMode:
|
||||
values[i] = new(sql.NullString)
|
||||
case channelmonitor.FieldCreatedAt, channelmonitor.FieldUpdatedAt, channelmonitor.FieldLastCheckedAt:
|
||||
values[i] = new(sql.NullTime)
|
||||
default:
|
||||
values[i] = new(sql.UnknownType)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||
// to the ChannelMonitor fields.
|
||||
func (_m *ChannelMonitor) assignValues(columns []string, values []any) error {
|
||||
if m, n := len(values), len(columns); m < n {
|
||||
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||
}
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case channelmonitor.FieldID:
|
||||
value, ok := values[i].(*sql.NullInt64)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected type %T for field id", value)
|
||||
}
|
||||
_m.ID = int64(value.Int64)
|
||||
case channelmonitor.FieldCreatedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||
} else if value.Valid {
|
||||
_m.CreatedAt = value.Time
|
||||
}
|
||||
case channelmonitor.FieldUpdatedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
|
||||
} else if value.Valid {
|
||||
_m.UpdatedAt = value.Time
|
||||
}
|
||||
case channelmonitor.FieldName:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field name", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Name = value.String
|
||||
}
|
||||
case channelmonitor.FieldProvider:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field provider", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Provider = channelmonitor.Provider(value.String)
|
||||
}
|
||||
case channelmonitor.FieldEndpoint:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field endpoint", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Endpoint = value.String
|
||||
}
|
||||
case channelmonitor.FieldAPIKeyEncrypted:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field api_key_encrypted", values[i])
|
||||
} else if value.Valid {
|
||||
_m.APIKeyEncrypted = value.String
|
||||
}
|
||||
case channelmonitor.FieldPrimaryModel:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field primary_model", values[i])
|
||||
} else if value.Valid {
|
||||
_m.PrimaryModel = value.String
|
||||
}
|
||||
case channelmonitor.FieldExtraModels:
|
||||
if value, ok := values[i].(*[]byte); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field extra_models", values[i])
|
||||
} else if value != nil && len(*value) > 0 {
|
||||
if err := json.Unmarshal(*value, &_m.ExtraModels); err != nil {
|
||||
return fmt.Errorf("unmarshal field extra_models: %w", err)
|
||||
}
|
||||
}
|
||||
case channelmonitor.FieldGroupName:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field group_name", values[i])
|
||||
} else if value.Valid {
|
||||
_m.GroupName = value.String
|
||||
}
|
||||
case channelmonitor.FieldEnabled:
|
||||
if value, ok := values[i].(*sql.NullBool); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field enabled", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Enabled = value.Bool
|
||||
}
|
||||
case channelmonitor.FieldIntervalSeconds:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field interval_seconds", values[i])
|
||||
} else if value.Valid {
|
||||
_m.IntervalSeconds = int(value.Int64)
|
||||
}
|
||||
case channelmonitor.FieldLastCheckedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field last_checked_at", values[i])
|
||||
} else if value.Valid {
|
||||
_m.LastCheckedAt = new(time.Time)
|
||||
*_m.LastCheckedAt = value.Time
|
||||
}
|
||||
case channelmonitor.FieldCreatedBy:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field created_by", values[i])
|
||||
} else if value.Valid {
|
||||
_m.CreatedBy = value.Int64
|
||||
}
|
||||
case channelmonitor.FieldTemplateID:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field template_id", values[i])
|
||||
} else if value.Valid {
|
||||
_m.TemplateID = new(int64)
|
||||
*_m.TemplateID = value.Int64
|
||||
}
|
||||
case channelmonitor.FieldExtraHeaders:
|
||||
if value, ok := values[i].(*[]byte); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field extra_headers", values[i])
|
||||
} else if value != nil && len(*value) > 0 {
|
||||
if err := json.Unmarshal(*value, &_m.ExtraHeaders); err != nil {
|
||||
return fmt.Errorf("unmarshal field extra_headers: %w", err)
|
||||
}
|
||||
}
|
||||
case channelmonitor.FieldBodyOverrideMode:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field body_override_mode", values[i])
|
||||
} else if value.Valid {
|
||||
_m.BodyOverrideMode = value.String
|
||||
}
|
||||
case channelmonitor.FieldBodyOverride:
|
||||
if value, ok := values[i].(*[]byte); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field body_override", values[i])
|
||||
} else if value != nil && len(*value) > 0 {
|
||||
if err := json.Unmarshal(*value, &_m.BodyOverride); err != nil {
|
||||
return fmt.Errorf("unmarshal field body_override: %w", err)
|
||||
}
|
||||
}
|
||||
default:
|
||||
_m.selectValues.Set(columns[i], values[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value returns the ent.Value that was dynamically selected and assigned to the ChannelMonitor.
|
||||
// This includes values selected through modifiers, order, etc.
|
||||
func (_m *ChannelMonitor) Value(name string) (ent.Value, error) {
|
||||
return _m.selectValues.Get(name)
|
||||
}
|
||||
|
||||
// QueryHistory queries the "history" edge of the ChannelMonitor entity.
|
||||
func (_m *ChannelMonitor) QueryHistory() *ChannelMonitorHistoryQuery {
|
||||
return NewChannelMonitorClient(_m.config).QueryHistory(_m)
|
||||
}
|
||||
|
||||
// QueryDailyRollups queries the "daily_rollups" edge of the ChannelMonitor entity.
|
||||
func (_m *ChannelMonitor) QueryDailyRollups() *ChannelMonitorDailyRollupQuery {
|
||||
return NewChannelMonitorClient(_m.config).QueryDailyRollups(_m)
|
||||
}
|
||||
|
||||
// QueryRequestTemplate queries the "request_template" edge of the ChannelMonitor entity.
|
||||
func (_m *ChannelMonitor) QueryRequestTemplate() *ChannelMonitorRequestTemplateQuery {
|
||||
return NewChannelMonitorClient(_m.config).QueryRequestTemplate(_m)
|
||||
}
|
||||
|
||||
// Update returns a builder for updating this ChannelMonitor.
|
||||
// Note that you need to call ChannelMonitor.Unwrap() before calling this method if this ChannelMonitor
|
||||
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||
func (_m *ChannelMonitor) Update() *ChannelMonitorUpdateOne {
|
||||
return NewChannelMonitorClient(_m.config).UpdateOne(_m)
|
||||
}
|
||||
|
||||
// Unwrap unwraps the ChannelMonitor entity that was returned from a transaction after it was closed,
|
||||
// so that all future queries will be executed through the driver which created the transaction.
|
||||
func (_m *ChannelMonitor) Unwrap() *ChannelMonitor {
|
||||
_tx, ok := _m.config.driver.(*txDriver)
|
||||
if !ok {
|
||||
panic("ent: ChannelMonitor is not a transactional entity")
|
||||
}
|
||||
_m.config.driver = _tx.drv
|
||||
return _m
|
||||
}
|
||||
|
||||
// String implements the fmt.Stringer.
|
||||
func (_m *ChannelMonitor) String() string {
|
||||
var builder strings.Builder
|
||||
builder.WriteString("ChannelMonitor(")
|
||||
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
||||
builder.WriteString("created_at=")
|
||||
builder.WriteString(_m.CreatedAt.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("updated_at=")
|
||||
builder.WriteString(_m.UpdatedAt.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("name=")
|
||||
builder.WriteString(_m.Name)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("provider=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.Provider))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("endpoint=")
|
||||
builder.WriteString(_m.Endpoint)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("api_key_encrypted=<sensitive>")
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("primary_model=")
|
||||
builder.WriteString(_m.PrimaryModel)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("extra_models=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.ExtraModels))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("group_name=")
|
||||
builder.WriteString(_m.GroupName)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("enabled=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.Enabled))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("interval_seconds=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.IntervalSeconds))
|
||||
builder.WriteString(", ")
|
||||
if v := _m.LastCheckedAt; v != nil {
|
||||
builder.WriteString("last_checked_at=")
|
||||
builder.WriteString(v.Format(time.ANSIC))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("created_by=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.CreatedBy))
|
||||
builder.WriteString(", ")
|
||||
if v := _m.TemplateID; v != nil {
|
||||
builder.WriteString("template_id=")
|
||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("extra_headers=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.ExtraHeaders))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("body_override_mode=")
|
||||
builder.WriteString(_m.BodyOverrideMode)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("body_override=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.BodyOverride))
|
||||
builder.WriteByte(')')
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
// ChannelMonitors is a parsable slice of ChannelMonitor.
|
||||
type ChannelMonitors []*ChannelMonitor
|
||||
304
backend/ent/channelmonitor/channelmonitor.go
Normal file
304
backend/ent/channelmonitor/channelmonitor.go
Normal file
@@ -0,0 +1,304 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package channelmonitor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
)
|
||||
|
||||
const (
|
||||
// Label holds the string label denoting the channelmonitor type in the database.
|
||||
Label = "channel_monitor"
|
||||
// FieldID holds the string denoting the id field in the database.
|
||||
FieldID = "id"
|
||||
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||
FieldCreatedAt = "created_at"
|
||||
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
|
||||
FieldUpdatedAt = "updated_at"
|
||||
// FieldName holds the string denoting the name field in the database.
|
||||
FieldName = "name"
|
||||
// FieldProvider holds the string denoting the provider field in the database.
|
||||
FieldProvider = "provider"
|
||||
// FieldEndpoint holds the string denoting the endpoint field in the database.
|
||||
FieldEndpoint = "endpoint"
|
||||
// FieldAPIKeyEncrypted holds the string denoting the api_key_encrypted field in the database.
|
||||
FieldAPIKeyEncrypted = "api_key_encrypted"
|
||||
// FieldPrimaryModel holds the string denoting the primary_model field in the database.
|
||||
FieldPrimaryModel = "primary_model"
|
||||
// FieldExtraModels holds the string denoting the extra_models field in the database.
|
||||
FieldExtraModels = "extra_models"
|
||||
// FieldGroupName holds the string denoting the group_name field in the database.
|
||||
FieldGroupName = "group_name"
|
||||
// FieldEnabled holds the string denoting the enabled field in the database.
|
||||
FieldEnabled = "enabled"
|
||||
// FieldIntervalSeconds holds the string denoting the interval_seconds field in the database.
|
||||
FieldIntervalSeconds = "interval_seconds"
|
||||
// FieldLastCheckedAt holds the string denoting the last_checked_at field in the database.
|
||||
FieldLastCheckedAt = "last_checked_at"
|
||||
// FieldCreatedBy holds the string denoting the created_by field in the database.
|
||||
FieldCreatedBy = "created_by"
|
||||
// FieldTemplateID holds the string denoting the template_id field in the database.
|
||||
FieldTemplateID = "template_id"
|
||||
// FieldExtraHeaders holds the string denoting the extra_headers field in the database.
|
||||
FieldExtraHeaders = "extra_headers"
|
||||
// FieldBodyOverrideMode holds the string denoting the body_override_mode field in the database.
|
||||
FieldBodyOverrideMode = "body_override_mode"
|
||||
// FieldBodyOverride holds the string denoting the body_override field in the database.
|
||||
FieldBodyOverride = "body_override"
|
||||
// EdgeHistory holds the string denoting the history edge name in mutations.
|
||||
EdgeHistory = "history"
|
||||
// EdgeDailyRollups holds the string denoting the daily_rollups edge name in mutations.
|
||||
EdgeDailyRollups = "daily_rollups"
|
||||
// EdgeRequestTemplate holds the string denoting the request_template edge name in mutations.
|
||||
EdgeRequestTemplate = "request_template"
|
||||
// Table holds the table name of the channelmonitor in the database.
|
||||
Table = "channel_monitors"
|
||||
// HistoryTable is the table that holds the history relation/edge.
|
||||
HistoryTable = "channel_monitor_histories"
|
||||
// HistoryInverseTable is the table name for the ChannelMonitorHistory entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "channelmonitorhistory" package.
|
||||
HistoryInverseTable = "channel_monitor_histories"
|
||||
// HistoryColumn is the table column denoting the history relation/edge.
|
||||
HistoryColumn = "monitor_id"
|
||||
// DailyRollupsTable is the table that holds the daily_rollups relation/edge.
|
||||
DailyRollupsTable = "channel_monitor_daily_rollups"
|
||||
// DailyRollupsInverseTable is the table name for the ChannelMonitorDailyRollup entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "channelmonitordailyrollup" package.
|
||||
DailyRollupsInverseTable = "channel_monitor_daily_rollups"
|
||||
// DailyRollupsColumn is the table column denoting the daily_rollups relation/edge.
|
||||
DailyRollupsColumn = "monitor_id"
|
||||
// RequestTemplateTable is the table that holds the request_template relation/edge.
|
||||
RequestTemplateTable = "channel_monitors"
|
||||
// RequestTemplateInverseTable is the table name for the ChannelMonitorRequestTemplate entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "channelmonitorrequesttemplate" package.
|
||||
RequestTemplateInverseTable = "channel_monitor_request_templates"
|
||||
// RequestTemplateColumn is the table column denoting the request_template relation/edge.
|
||||
RequestTemplateColumn = "template_id"
|
||||
)
|
||||
|
||||
// Columns holds all SQL columns for channelmonitor fields.
|
||||
var Columns = []string{
|
||||
FieldID,
|
||||
FieldCreatedAt,
|
||||
FieldUpdatedAt,
|
||||
FieldName,
|
||||
FieldProvider,
|
||||
FieldEndpoint,
|
||||
FieldAPIKeyEncrypted,
|
||||
FieldPrimaryModel,
|
||||
FieldExtraModels,
|
||||
FieldGroupName,
|
||||
FieldEnabled,
|
||||
FieldIntervalSeconds,
|
||||
FieldLastCheckedAt,
|
||||
FieldCreatedBy,
|
||||
FieldTemplateID,
|
||||
FieldExtraHeaders,
|
||||
FieldBodyOverrideMode,
|
||||
FieldBodyOverride,
|
||||
}
|
||||
|
||||
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||
func ValidColumn(column string) bool {
|
||||
for i := range Columns {
|
||||
if column == Columns[i] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var (
|
||||
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||
DefaultCreatedAt func() time.Time
|
||||
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
|
||||
DefaultUpdatedAt func() time.Time
|
||||
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
|
||||
UpdateDefaultUpdatedAt func() time.Time
|
||||
// NameValidator is a validator for the "name" field. It is called by the builders before save.
|
||||
NameValidator func(string) error
|
||||
// EndpointValidator is a validator for the "endpoint" field. It is called by the builders before save.
|
||||
EndpointValidator func(string) error
|
||||
// APIKeyEncryptedValidator is a validator for the "api_key_encrypted" field. It is called by the builders before save.
|
||||
APIKeyEncryptedValidator func(string) error
|
||||
// PrimaryModelValidator is a validator for the "primary_model" field. It is called by the builders before save.
|
||||
PrimaryModelValidator func(string) error
|
||||
// DefaultExtraModels holds the default value on creation for the "extra_models" field.
|
||||
DefaultExtraModels []string
|
||||
// DefaultGroupName holds the default value on creation for the "group_name" field.
|
||||
DefaultGroupName string
|
||||
// GroupNameValidator is a validator for the "group_name" field. It is called by the builders before save.
|
||||
GroupNameValidator func(string) error
|
||||
// DefaultEnabled holds the default value on creation for the "enabled" field.
|
||||
DefaultEnabled bool
|
||||
// IntervalSecondsValidator is a validator for the "interval_seconds" field. It is called by the builders before save.
|
||||
IntervalSecondsValidator func(int) error
|
||||
// DefaultExtraHeaders holds the default value on creation for the "extra_headers" field.
|
||||
DefaultExtraHeaders map[string]string
|
||||
// DefaultBodyOverrideMode holds the default value on creation for the "body_override_mode" field.
|
||||
DefaultBodyOverrideMode string
|
||||
// BodyOverrideModeValidator is a validator for the "body_override_mode" field. It is called by the builders before save.
|
||||
BodyOverrideModeValidator func(string) error
|
||||
)
|
||||
|
||||
// Provider defines the type for the "provider" enum field.
|
||||
type Provider string
|
||||
|
||||
// Provider values.
|
||||
const (
|
||||
ProviderOpenai Provider = "openai"
|
||||
ProviderAnthropic Provider = "anthropic"
|
||||
ProviderGemini Provider = "gemini"
|
||||
)
|
||||
|
||||
func (pr Provider) String() string {
|
||||
return string(pr)
|
||||
}
|
||||
|
||||
// ProviderValidator is a validator for the "provider" field enum values. It is called by the builders before save.
|
||||
func ProviderValidator(pr Provider) error {
|
||||
switch pr {
|
||||
case ProviderOpenai, ProviderAnthropic, ProviderGemini:
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("channelmonitor: invalid enum value for provider field: %q", pr)
|
||||
}
|
||||
}
|
||||
|
||||
// OrderOption defines the ordering options for the ChannelMonitor queries.
|
||||
type OrderOption func(*sql.Selector)
|
||||
|
||||
// ByID orders the results by the id field.
|
||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCreatedAt orders the results by the created_at field.
|
||||
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUpdatedAt orders the results by the updated_at field.
|
||||
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByName orders the results by the name field.
|
||||
func ByName(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldName, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByProvider orders the results by the provider field.
|
||||
func ByProvider(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldProvider, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByEndpoint orders the results by the endpoint field.
|
||||
func ByEndpoint(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldEndpoint, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByAPIKeyEncrypted orders the results by the api_key_encrypted field.
|
||||
func ByAPIKeyEncrypted(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldAPIKeyEncrypted, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByPrimaryModel orders the results by the primary_model field.
|
||||
func ByPrimaryModel(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldPrimaryModel, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByGroupName orders the results by the group_name field.
|
||||
func ByGroupName(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldGroupName, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByEnabled orders the results by the enabled field.
|
||||
func ByEnabled(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldEnabled, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByIntervalSeconds orders the results by the interval_seconds field.
|
||||
func ByIntervalSeconds(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldIntervalSeconds, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByLastCheckedAt orders the results by the last_checked_at field.
|
||||
func ByLastCheckedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldLastCheckedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCreatedBy orders the results by the created_by field.
|
||||
func ByCreatedBy(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCreatedBy, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByTemplateID orders the results by the template_id field.
|
||||
func ByTemplateID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldTemplateID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByBodyOverrideMode orders the results by the body_override_mode field.
|
||||
func ByBodyOverrideMode(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldBodyOverrideMode, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByHistoryCount orders the results by history count.
|
||||
func ByHistoryCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborsCount(s, newHistoryStep(), opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByHistory orders the results by history terms.
|
||||
func ByHistory(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newHistoryStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByDailyRollupsCount orders the results by daily_rollups count.
|
||||
func ByDailyRollupsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborsCount(s, newDailyRollupsStep(), opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByDailyRollups orders the results by daily_rollups terms.
|
||||
func ByDailyRollups(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newDailyRollupsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByRequestTemplateField orders the results by request_template field.
|
||||
func ByRequestTemplateField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newRequestTemplateStep(), sql.OrderByField(field, opts...))
|
||||
}
|
||||
}
|
||||
func newHistoryStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(HistoryInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, HistoryTable, HistoryColumn),
|
||||
)
|
||||
}
|
||||
func newDailyRollupsStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(DailyRollupsInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, DailyRollupsTable, DailyRollupsColumn),
|
||||
)
|
||||
}
|
||||
func newRequestTemplateStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(RequestTemplateInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, false, RequestTemplateTable, RequestTemplateColumn),
|
||||
)
|
||||
}
|
||||
885
backend/ent/channelmonitor/where.go
Normal file
885
backend/ent/channelmonitor/where.go
Normal file
@@ -0,0 +1,885 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package channelmonitor
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ID filters vertices based on their ID field.
|
||||
func ID(id int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDEQ applies the EQ predicate on the ID field.
|
||||
func IDEQ(id int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDNEQ applies the NEQ predicate on the ID field.
|
||||
func IDNEQ(id int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDIn applies the In predicate on the ID field.
|
||||
func IDIn(ids ...int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDNotIn applies the NotIn predicate on the ID field.
|
||||
func IDNotIn(ids ...int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDGT applies the GT predicate on the ID field.
|
||||
func IDGT(id int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDGTE applies the GTE predicate on the ID field.
|
||||
func IDGTE(id int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGTE(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLT applies the LT predicate on the ID field.
|
||||
func IDLT(id int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLTE applies the LTE predicate on the ID field.
|
||||
func IDLTE(id int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLTE(FieldID, id))
|
||||
}
|
||||
|
||||
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
||||
func CreatedAt(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
|
||||
func UpdatedAt(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
|
||||
func Name(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldName, v))
|
||||
}
|
||||
|
||||
// Endpoint applies equality check predicate on the "endpoint" field. It's identical to EndpointEQ.
|
||||
func Endpoint(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldEndpoint, v))
|
||||
}
|
||||
|
||||
// APIKeyEncrypted applies equality check predicate on the "api_key_encrypted" field. It's identical to APIKeyEncryptedEQ.
|
||||
func APIKeyEncrypted(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldAPIKeyEncrypted, v))
|
||||
}
|
||||
|
||||
// PrimaryModel applies equality check predicate on the "primary_model" field. It's identical to PrimaryModelEQ.
|
||||
func PrimaryModel(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldPrimaryModel, v))
|
||||
}
|
||||
|
||||
// GroupName applies equality check predicate on the "group_name" field. It's identical to GroupNameEQ.
|
||||
func GroupName(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldGroupName, v))
|
||||
}
|
||||
|
||||
// Enabled applies equality check predicate on the "enabled" field. It's identical to EnabledEQ.
|
||||
func Enabled(v bool) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldEnabled, v))
|
||||
}
|
||||
|
||||
// IntervalSeconds applies equality check predicate on the "interval_seconds" field. It's identical to IntervalSecondsEQ.
|
||||
func IntervalSeconds(v int) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldIntervalSeconds, v))
|
||||
}
|
||||
|
||||
// LastCheckedAt applies equality check predicate on the "last_checked_at" field. It's identical to LastCheckedAtEQ.
|
||||
func LastCheckedAt(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldLastCheckedAt, v))
|
||||
}
|
||||
|
||||
// CreatedBy applies equality check predicate on the "created_by" field. It's identical to CreatedByEQ.
|
||||
func CreatedBy(v int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldCreatedBy, v))
|
||||
}
|
||||
|
||||
// TemplateID applies equality check predicate on the "template_id" field. It's identical to TemplateIDEQ.
|
||||
func TemplateID(v int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldTemplateID, v))
|
||||
}
|
||||
|
||||
// BodyOverrideMode applies equality check predicate on the "body_override_mode" field. It's identical to BodyOverrideModeEQ.
|
||||
func BodyOverrideMode(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||
func CreatedAtEQ(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
|
||||
func CreatedAtNEQ(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtIn applies the In predicate on the "created_at" field.
|
||||
func CreatedAtIn(vs ...time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIn(FieldCreatedAt, vs...))
|
||||
}
|
||||
|
||||
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
|
||||
func CreatedAtNotIn(vs ...time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotIn(FieldCreatedAt, vs...))
|
||||
}
|
||||
|
||||
// CreatedAtGT applies the GT predicate on the "created_at" field.
|
||||
func CreatedAtGT(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGT(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
|
||||
func CreatedAtGTE(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGTE(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtLT applies the LT predicate on the "created_at" field.
|
||||
func CreatedAtLT(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLT(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
|
||||
func CreatedAtLTE(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLTE(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
|
||||
func UpdatedAtEQ(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
|
||||
func UpdatedAtNEQ(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtIn applies the In predicate on the "updated_at" field.
|
||||
func UpdatedAtIn(vs ...time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIn(FieldUpdatedAt, vs...))
|
||||
}
|
||||
|
||||
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
|
||||
func UpdatedAtNotIn(vs ...time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotIn(FieldUpdatedAt, vs...))
|
||||
}
|
||||
|
||||
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
|
||||
func UpdatedAtGT(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGT(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
|
||||
func UpdatedAtGTE(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGTE(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
|
||||
func UpdatedAtLT(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLT(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
|
||||
func UpdatedAtLTE(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLTE(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// NameEQ applies the EQ predicate on the "name" field.
|
||||
func NameEQ(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldName, v))
|
||||
}
|
||||
|
||||
// NameNEQ applies the NEQ predicate on the "name" field.
|
||||
func NameNEQ(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNEQ(FieldName, v))
|
||||
}
|
||||
|
||||
// NameIn applies the In predicate on the "name" field.
|
||||
func NameIn(vs ...string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIn(FieldName, vs...))
|
||||
}
|
||||
|
||||
// NameNotIn applies the NotIn predicate on the "name" field.
|
||||
func NameNotIn(vs ...string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotIn(FieldName, vs...))
|
||||
}
|
||||
|
||||
// NameGT applies the GT predicate on the "name" field.
|
||||
func NameGT(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGT(FieldName, v))
|
||||
}
|
||||
|
||||
// NameGTE applies the GTE predicate on the "name" field.
|
||||
func NameGTE(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGTE(FieldName, v))
|
||||
}
|
||||
|
||||
// NameLT applies the LT predicate on the "name" field.
|
||||
func NameLT(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLT(FieldName, v))
|
||||
}
|
||||
|
||||
// NameLTE applies the LTE predicate on the "name" field.
|
||||
func NameLTE(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLTE(FieldName, v))
|
||||
}
|
||||
|
||||
// NameContains applies the Contains predicate on the "name" field.
|
||||
func NameContains(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldContains(FieldName, v))
|
||||
}
|
||||
|
||||
// NameHasPrefix applies the HasPrefix predicate on the "name" field.
|
||||
func NameHasPrefix(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldHasPrefix(FieldName, v))
|
||||
}
|
||||
|
||||
// NameHasSuffix applies the HasSuffix predicate on the "name" field.
|
||||
func NameHasSuffix(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldHasSuffix(FieldName, v))
|
||||
}
|
||||
|
||||
// NameEqualFold applies the EqualFold predicate on the "name" field.
|
||||
func NameEqualFold(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEqualFold(FieldName, v))
|
||||
}
|
||||
|
||||
// NameContainsFold applies the ContainsFold predicate on the "name" field.
|
||||
func NameContainsFold(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldContainsFold(FieldName, v))
|
||||
}
|
||||
|
||||
// ProviderEQ applies the EQ predicate on the "provider" field.
|
||||
func ProviderEQ(v Provider) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldProvider, v))
|
||||
}
|
||||
|
||||
// ProviderNEQ applies the NEQ predicate on the "provider" field.
|
||||
func ProviderNEQ(v Provider) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNEQ(FieldProvider, v))
|
||||
}
|
||||
|
||||
// ProviderIn applies the In predicate on the "provider" field.
|
||||
func ProviderIn(vs ...Provider) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIn(FieldProvider, vs...))
|
||||
}
|
||||
|
||||
// ProviderNotIn applies the NotIn predicate on the "provider" field.
|
||||
func ProviderNotIn(vs ...Provider) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotIn(FieldProvider, vs...))
|
||||
}
|
||||
|
||||
// EndpointEQ applies the EQ predicate on the "endpoint" field.
|
||||
func EndpointEQ(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldEndpoint, v))
|
||||
}
|
||||
|
||||
// EndpointNEQ applies the NEQ predicate on the "endpoint" field.
|
||||
func EndpointNEQ(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNEQ(FieldEndpoint, v))
|
||||
}
|
||||
|
||||
// EndpointIn applies the In predicate on the "endpoint" field.
|
||||
func EndpointIn(vs ...string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIn(FieldEndpoint, vs...))
|
||||
}
|
||||
|
||||
// EndpointNotIn applies the NotIn predicate on the "endpoint" field.
|
||||
func EndpointNotIn(vs ...string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotIn(FieldEndpoint, vs...))
|
||||
}
|
||||
|
||||
// EndpointGT applies the GT predicate on the "endpoint" field.
|
||||
func EndpointGT(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGT(FieldEndpoint, v))
|
||||
}
|
||||
|
||||
// EndpointGTE applies the GTE predicate on the "endpoint" field.
|
||||
func EndpointGTE(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGTE(FieldEndpoint, v))
|
||||
}
|
||||
|
||||
// EndpointLT applies the LT predicate on the "endpoint" field.
|
||||
func EndpointLT(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLT(FieldEndpoint, v))
|
||||
}
|
||||
|
||||
// EndpointLTE applies the LTE predicate on the "endpoint" field.
|
||||
func EndpointLTE(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLTE(FieldEndpoint, v))
|
||||
}
|
||||
|
||||
// EndpointContains applies the Contains predicate on the "endpoint" field.
|
||||
func EndpointContains(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldContains(FieldEndpoint, v))
|
||||
}
|
||||
|
||||
// EndpointHasPrefix applies the HasPrefix predicate on the "endpoint" field.
|
||||
func EndpointHasPrefix(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldHasPrefix(FieldEndpoint, v))
|
||||
}
|
||||
|
||||
// EndpointHasSuffix applies the HasSuffix predicate on the "endpoint" field.
|
||||
func EndpointHasSuffix(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldHasSuffix(FieldEndpoint, v))
|
||||
}
|
||||
|
||||
// EndpointEqualFold applies the EqualFold predicate on the "endpoint" field.
|
||||
func EndpointEqualFold(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEqualFold(FieldEndpoint, v))
|
||||
}
|
||||
|
||||
// EndpointContainsFold applies the ContainsFold predicate on the "endpoint" field.
|
||||
func EndpointContainsFold(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldContainsFold(FieldEndpoint, v))
|
||||
}
|
||||
|
||||
// APIKeyEncryptedEQ applies the EQ predicate on the "api_key_encrypted" field.
|
||||
func APIKeyEncryptedEQ(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldAPIKeyEncrypted, v))
|
||||
}
|
||||
|
||||
// APIKeyEncryptedNEQ applies the NEQ predicate on the "api_key_encrypted" field.
|
||||
func APIKeyEncryptedNEQ(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNEQ(FieldAPIKeyEncrypted, v))
|
||||
}
|
||||
|
||||
// APIKeyEncryptedIn applies the In predicate on the "api_key_encrypted" field.
|
||||
func APIKeyEncryptedIn(vs ...string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIn(FieldAPIKeyEncrypted, vs...))
|
||||
}
|
||||
|
||||
// APIKeyEncryptedNotIn applies the NotIn predicate on the "api_key_encrypted" field.
|
||||
func APIKeyEncryptedNotIn(vs ...string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotIn(FieldAPIKeyEncrypted, vs...))
|
||||
}
|
||||
|
||||
// APIKeyEncryptedGT applies the GT predicate on the "api_key_encrypted" field.
|
||||
func APIKeyEncryptedGT(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGT(FieldAPIKeyEncrypted, v))
|
||||
}
|
||||
|
||||
// APIKeyEncryptedGTE applies the GTE predicate on the "api_key_encrypted" field.
|
||||
func APIKeyEncryptedGTE(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGTE(FieldAPIKeyEncrypted, v))
|
||||
}
|
||||
|
||||
// APIKeyEncryptedLT applies the LT predicate on the "api_key_encrypted" field.
|
||||
func APIKeyEncryptedLT(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLT(FieldAPIKeyEncrypted, v))
|
||||
}
|
||||
|
||||
// APIKeyEncryptedLTE applies the LTE predicate on the "api_key_encrypted" field.
|
||||
func APIKeyEncryptedLTE(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLTE(FieldAPIKeyEncrypted, v))
|
||||
}
|
||||
|
||||
// APIKeyEncryptedContains applies the Contains predicate on the "api_key_encrypted" field.
|
||||
func APIKeyEncryptedContains(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldContains(FieldAPIKeyEncrypted, v))
|
||||
}
|
||||
|
||||
// APIKeyEncryptedHasPrefix applies the HasPrefix predicate on the "api_key_encrypted" field.
|
||||
func APIKeyEncryptedHasPrefix(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldHasPrefix(FieldAPIKeyEncrypted, v))
|
||||
}
|
||||
|
||||
// APIKeyEncryptedHasSuffix applies the HasSuffix predicate on the "api_key_encrypted" field.
|
||||
func APIKeyEncryptedHasSuffix(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldHasSuffix(FieldAPIKeyEncrypted, v))
|
||||
}
|
||||
|
||||
// APIKeyEncryptedEqualFold applies the EqualFold predicate on the "api_key_encrypted" field.
|
||||
func APIKeyEncryptedEqualFold(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEqualFold(FieldAPIKeyEncrypted, v))
|
||||
}
|
||||
|
||||
// APIKeyEncryptedContainsFold applies the ContainsFold predicate on the "api_key_encrypted" field.
|
||||
func APIKeyEncryptedContainsFold(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldContainsFold(FieldAPIKeyEncrypted, v))
|
||||
}
|
||||
|
||||
// PrimaryModelEQ applies the EQ predicate on the "primary_model" field.
|
||||
func PrimaryModelEQ(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldPrimaryModel, v))
|
||||
}
|
||||
|
||||
// PrimaryModelNEQ applies the NEQ predicate on the "primary_model" field.
|
||||
func PrimaryModelNEQ(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNEQ(FieldPrimaryModel, v))
|
||||
}
|
||||
|
||||
// PrimaryModelIn applies the In predicate on the "primary_model" field.
|
||||
func PrimaryModelIn(vs ...string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIn(FieldPrimaryModel, vs...))
|
||||
}
|
||||
|
||||
// PrimaryModelNotIn applies the NotIn predicate on the "primary_model" field.
|
||||
func PrimaryModelNotIn(vs ...string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotIn(FieldPrimaryModel, vs...))
|
||||
}
|
||||
|
||||
// PrimaryModelGT applies the GT predicate on the "primary_model" field.
|
||||
func PrimaryModelGT(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGT(FieldPrimaryModel, v))
|
||||
}
|
||||
|
||||
// PrimaryModelGTE applies the GTE predicate on the "primary_model" field.
|
||||
func PrimaryModelGTE(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGTE(FieldPrimaryModel, v))
|
||||
}
|
||||
|
||||
// PrimaryModelLT applies the LT predicate on the "primary_model" field.
|
||||
func PrimaryModelLT(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLT(FieldPrimaryModel, v))
|
||||
}
|
||||
|
||||
// PrimaryModelLTE applies the LTE predicate on the "primary_model" field.
|
||||
func PrimaryModelLTE(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLTE(FieldPrimaryModel, v))
|
||||
}
|
||||
|
||||
// PrimaryModelContains applies the Contains predicate on the "primary_model" field.
|
||||
func PrimaryModelContains(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldContains(FieldPrimaryModel, v))
|
||||
}
|
||||
|
||||
// PrimaryModelHasPrefix applies the HasPrefix predicate on the "primary_model" field.
|
||||
func PrimaryModelHasPrefix(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldHasPrefix(FieldPrimaryModel, v))
|
||||
}
|
||||
|
||||
// PrimaryModelHasSuffix applies the HasSuffix predicate on the "primary_model" field.
|
||||
func PrimaryModelHasSuffix(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldHasSuffix(FieldPrimaryModel, v))
|
||||
}
|
||||
|
||||
// PrimaryModelEqualFold applies the EqualFold predicate on the "primary_model" field.
|
||||
func PrimaryModelEqualFold(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEqualFold(FieldPrimaryModel, v))
|
||||
}
|
||||
|
||||
// PrimaryModelContainsFold applies the ContainsFold predicate on the "primary_model" field.
|
||||
func PrimaryModelContainsFold(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldContainsFold(FieldPrimaryModel, v))
|
||||
}
|
||||
|
||||
// GroupNameEQ applies the EQ predicate on the "group_name" field.
|
||||
func GroupNameEQ(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldGroupName, v))
|
||||
}
|
||||
|
||||
// GroupNameNEQ applies the NEQ predicate on the "group_name" field.
|
||||
func GroupNameNEQ(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNEQ(FieldGroupName, v))
|
||||
}
|
||||
|
||||
// GroupNameIn applies the In predicate on the "group_name" field.
|
||||
func GroupNameIn(vs ...string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIn(FieldGroupName, vs...))
|
||||
}
|
||||
|
||||
// GroupNameNotIn applies the NotIn predicate on the "group_name" field.
|
||||
func GroupNameNotIn(vs ...string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotIn(FieldGroupName, vs...))
|
||||
}
|
||||
|
||||
// GroupNameGT applies the GT predicate on the "group_name" field.
|
||||
func GroupNameGT(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGT(FieldGroupName, v))
|
||||
}
|
||||
|
||||
// GroupNameGTE applies the GTE predicate on the "group_name" field.
|
||||
func GroupNameGTE(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGTE(FieldGroupName, v))
|
||||
}
|
||||
|
||||
// GroupNameLT applies the LT predicate on the "group_name" field.
|
||||
func GroupNameLT(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLT(FieldGroupName, v))
|
||||
}
|
||||
|
||||
// GroupNameLTE applies the LTE predicate on the "group_name" field.
|
||||
func GroupNameLTE(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLTE(FieldGroupName, v))
|
||||
}
|
||||
|
||||
// GroupNameContains applies the Contains predicate on the "group_name" field.
|
||||
func GroupNameContains(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldContains(FieldGroupName, v))
|
||||
}
|
||||
|
||||
// GroupNameHasPrefix applies the HasPrefix predicate on the "group_name" field.
|
||||
func GroupNameHasPrefix(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldHasPrefix(FieldGroupName, v))
|
||||
}
|
||||
|
||||
// GroupNameHasSuffix applies the HasSuffix predicate on the "group_name" field.
|
||||
func GroupNameHasSuffix(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldHasSuffix(FieldGroupName, v))
|
||||
}
|
||||
|
||||
// GroupNameIsNil applies the IsNil predicate on the "group_name" field.
|
||||
func GroupNameIsNil() predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIsNull(FieldGroupName))
|
||||
}
|
||||
|
||||
// GroupNameNotNil applies the NotNil predicate on the "group_name" field.
|
||||
func GroupNameNotNil() predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotNull(FieldGroupName))
|
||||
}
|
||||
|
||||
// GroupNameEqualFold applies the EqualFold predicate on the "group_name" field.
|
||||
func GroupNameEqualFold(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEqualFold(FieldGroupName, v))
|
||||
}
|
||||
|
||||
// GroupNameContainsFold applies the ContainsFold predicate on the "group_name" field.
|
||||
func GroupNameContainsFold(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldContainsFold(FieldGroupName, v))
|
||||
}
|
||||
|
||||
// EnabledEQ applies the EQ predicate on the "enabled" field.
|
||||
func EnabledEQ(v bool) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldEnabled, v))
|
||||
}
|
||||
|
||||
// EnabledNEQ applies the NEQ predicate on the "enabled" field.
|
||||
func EnabledNEQ(v bool) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNEQ(FieldEnabled, v))
|
||||
}
|
||||
|
||||
// IntervalSecondsEQ applies the EQ predicate on the "interval_seconds" field.
|
||||
func IntervalSecondsEQ(v int) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldIntervalSeconds, v))
|
||||
}
|
||||
|
||||
// IntervalSecondsNEQ applies the NEQ predicate on the "interval_seconds" field.
|
||||
func IntervalSecondsNEQ(v int) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNEQ(FieldIntervalSeconds, v))
|
||||
}
|
||||
|
||||
// IntervalSecondsIn applies the In predicate on the "interval_seconds" field.
|
||||
func IntervalSecondsIn(vs ...int) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIn(FieldIntervalSeconds, vs...))
|
||||
}
|
||||
|
||||
// IntervalSecondsNotIn applies the NotIn predicate on the "interval_seconds" field.
|
||||
func IntervalSecondsNotIn(vs ...int) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotIn(FieldIntervalSeconds, vs...))
|
||||
}
|
||||
|
||||
// IntervalSecondsGT applies the GT predicate on the "interval_seconds" field.
|
||||
func IntervalSecondsGT(v int) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGT(FieldIntervalSeconds, v))
|
||||
}
|
||||
|
||||
// IntervalSecondsGTE applies the GTE predicate on the "interval_seconds" field.
|
||||
func IntervalSecondsGTE(v int) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGTE(FieldIntervalSeconds, v))
|
||||
}
|
||||
|
||||
// IntervalSecondsLT applies the LT predicate on the "interval_seconds" field.
|
||||
func IntervalSecondsLT(v int) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLT(FieldIntervalSeconds, v))
|
||||
}
|
||||
|
||||
// IntervalSecondsLTE applies the LTE predicate on the "interval_seconds" field.
|
||||
func IntervalSecondsLTE(v int) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLTE(FieldIntervalSeconds, v))
|
||||
}
|
||||
|
||||
// LastCheckedAtEQ applies the EQ predicate on the "last_checked_at" field.
|
||||
func LastCheckedAtEQ(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldLastCheckedAt, v))
|
||||
}
|
||||
|
||||
// LastCheckedAtNEQ applies the NEQ predicate on the "last_checked_at" field.
|
||||
func LastCheckedAtNEQ(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNEQ(FieldLastCheckedAt, v))
|
||||
}
|
||||
|
||||
// LastCheckedAtIn applies the In predicate on the "last_checked_at" field.
|
||||
func LastCheckedAtIn(vs ...time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIn(FieldLastCheckedAt, vs...))
|
||||
}
|
||||
|
||||
// LastCheckedAtNotIn applies the NotIn predicate on the "last_checked_at" field.
|
||||
func LastCheckedAtNotIn(vs ...time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotIn(FieldLastCheckedAt, vs...))
|
||||
}
|
||||
|
||||
// LastCheckedAtGT applies the GT predicate on the "last_checked_at" field.
|
||||
func LastCheckedAtGT(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGT(FieldLastCheckedAt, v))
|
||||
}
|
||||
|
||||
// LastCheckedAtGTE applies the GTE predicate on the "last_checked_at" field.
|
||||
func LastCheckedAtGTE(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGTE(FieldLastCheckedAt, v))
|
||||
}
|
||||
|
||||
// LastCheckedAtLT applies the LT predicate on the "last_checked_at" field.
|
||||
func LastCheckedAtLT(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLT(FieldLastCheckedAt, v))
|
||||
}
|
||||
|
||||
// LastCheckedAtLTE applies the LTE predicate on the "last_checked_at" field.
|
||||
func LastCheckedAtLTE(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLTE(FieldLastCheckedAt, v))
|
||||
}
|
||||
|
||||
// LastCheckedAtIsNil applies the IsNil predicate on the "last_checked_at" field.
|
||||
func LastCheckedAtIsNil() predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIsNull(FieldLastCheckedAt))
|
||||
}
|
||||
|
||||
// LastCheckedAtNotNil applies the NotNil predicate on the "last_checked_at" field.
|
||||
func LastCheckedAtNotNil() predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotNull(FieldLastCheckedAt))
|
||||
}
|
||||
|
||||
// CreatedByEQ applies the EQ predicate on the "created_by" field.
|
||||
func CreatedByEQ(v int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldCreatedBy, v))
|
||||
}
|
||||
|
||||
// CreatedByNEQ applies the NEQ predicate on the "created_by" field.
|
||||
func CreatedByNEQ(v int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNEQ(FieldCreatedBy, v))
|
||||
}
|
||||
|
||||
// CreatedByIn applies the In predicate on the "created_by" field.
|
||||
func CreatedByIn(vs ...int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIn(FieldCreatedBy, vs...))
|
||||
}
|
||||
|
||||
// CreatedByNotIn applies the NotIn predicate on the "created_by" field.
|
||||
func CreatedByNotIn(vs ...int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotIn(FieldCreatedBy, vs...))
|
||||
}
|
||||
|
||||
// CreatedByGT applies the GT predicate on the "created_by" field.
|
||||
func CreatedByGT(v int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGT(FieldCreatedBy, v))
|
||||
}
|
||||
|
||||
// CreatedByGTE applies the GTE predicate on the "created_by" field.
|
||||
func CreatedByGTE(v int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGTE(FieldCreatedBy, v))
|
||||
}
|
||||
|
||||
// CreatedByLT applies the LT predicate on the "created_by" field.
|
||||
func CreatedByLT(v int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLT(FieldCreatedBy, v))
|
||||
}
|
||||
|
||||
// CreatedByLTE applies the LTE predicate on the "created_by" field.
|
||||
func CreatedByLTE(v int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLTE(FieldCreatedBy, v))
|
||||
}
|
||||
|
||||
// TemplateIDEQ applies the EQ predicate on the "template_id" field.
|
||||
func TemplateIDEQ(v int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldTemplateID, v))
|
||||
}
|
||||
|
||||
// TemplateIDNEQ applies the NEQ predicate on the "template_id" field.
|
||||
func TemplateIDNEQ(v int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNEQ(FieldTemplateID, v))
|
||||
}
|
||||
|
||||
// TemplateIDIn applies the In predicate on the "template_id" field.
|
||||
func TemplateIDIn(vs ...int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIn(FieldTemplateID, vs...))
|
||||
}
|
||||
|
||||
// TemplateIDNotIn applies the NotIn predicate on the "template_id" field.
|
||||
func TemplateIDNotIn(vs ...int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotIn(FieldTemplateID, vs...))
|
||||
}
|
||||
|
||||
// TemplateIDIsNil applies the IsNil predicate on the "template_id" field.
|
||||
func TemplateIDIsNil() predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIsNull(FieldTemplateID))
|
||||
}
|
||||
|
||||
// TemplateIDNotNil applies the NotNil predicate on the "template_id" field.
|
||||
func TemplateIDNotNil() predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotNull(FieldTemplateID))
|
||||
}
|
||||
|
||||
// BodyOverrideModeEQ applies the EQ predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeEQ(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeNEQ applies the NEQ predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeNEQ(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNEQ(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeIn applies the In predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeIn(vs ...string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIn(FieldBodyOverrideMode, vs...))
|
||||
}
|
||||
|
||||
// BodyOverrideModeNotIn applies the NotIn predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeNotIn(vs ...string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotIn(FieldBodyOverrideMode, vs...))
|
||||
}
|
||||
|
||||
// BodyOverrideModeGT applies the GT predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeGT(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGT(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeGTE applies the GTE predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeGTE(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGTE(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeLT applies the LT predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeLT(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLT(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeLTE applies the LTE predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeLTE(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLTE(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeContains applies the Contains predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeContains(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldContains(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeHasPrefix applies the HasPrefix predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeHasPrefix(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldHasPrefix(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeHasSuffix applies the HasSuffix predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeHasSuffix(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldHasSuffix(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeEqualFold applies the EqualFold predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeEqualFold(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEqualFold(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeContainsFold applies the ContainsFold predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeContainsFold(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldContainsFold(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideIsNil applies the IsNil predicate on the "body_override" field.
|
||||
func BodyOverrideIsNil() predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIsNull(FieldBodyOverride))
|
||||
}
|
||||
|
||||
// BodyOverrideNotNil applies the NotNil predicate on the "body_override" field.
|
||||
func BodyOverrideNotNil() predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotNull(FieldBodyOverride))
|
||||
}
|
||||
|
||||
// HasHistory applies the HasEdge predicate on the "history" edge.
|
||||
func HasHistory() predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, HistoryTable, HistoryColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasHistoryWith applies the HasEdge predicate on the "history" edge with a given conditions (other predicates).
|
||||
func HasHistoryWith(preds ...predicate.ChannelMonitorHistory) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(func(s *sql.Selector) {
|
||||
step := newHistoryStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// HasDailyRollups applies the HasEdge predicate on the "daily_rollups" edge.
|
||||
func HasDailyRollups() predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, DailyRollupsTable, DailyRollupsColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasDailyRollupsWith applies the HasEdge predicate on the "daily_rollups" edge with a given conditions (other predicates).
|
||||
func HasDailyRollupsWith(preds ...predicate.ChannelMonitorDailyRollup) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(func(s *sql.Selector) {
|
||||
step := newDailyRollupsStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// HasRequestTemplate applies the HasEdge predicate on the "request_template" edge.
|
||||
func HasRequestTemplate() predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, false, RequestTemplateTable, RequestTemplateColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasRequestTemplateWith applies the HasEdge predicate on the "request_template" edge with a given conditions (other predicates).
|
||||
func HasRequestTemplateWith(preds ...predicate.ChannelMonitorRequestTemplate) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(func(s *sql.Selector) {
|
||||
step := newRequestTemplateStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// And groups predicates with the AND operator between them.
|
||||
func And(predicates ...predicate.ChannelMonitor) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.AndPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Or groups predicates with the OR operator between them.
|
||||
func Or(predicates ...predicate.ChannelMonitor) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.OrPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Not applies the not operator on the given predicate.
|
||||
func Not(p predicate.ChannelMonitor) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.NotPredicates(p))
|
||||
}
|
||||
1610
backend/ent/channelmonitor_create.go
Normal file
1610
backend/ent/channelmonitor_create.go
Normal file
File diff suppressed because it is too large
Load Diff
88
backend/ent/channelmonitor_delete.go
Normal file
88
backend/ent/channelmonitor_delete.go
Normal file
@@ -0,0 +1,88 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ChannelMonitorDelete is the builder for deleting a ChannelMonitor entity.
|
||||
type ChannelMonitorDelete struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *ChannelMonitorMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the ChannelMonitorDelete builder.
|
||||
func (_d *ChannelMonitorDelete) Where(ps ...predicate.ChannelMonitor) *ChannelMonitorDelete {
|
||||
_d.mutation.Where(ps...)
|
||||
return _d
|
||||
}
|
||||
|
||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||
func (_d *ChannelMonitorDelete) Exec(ctx context.Context) (int, error) {
|
||||
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_d *ChannelMonitorDelete) ExecX(ctx context.Context) int {
|
||||
n, err := _d.Exec(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (_d *ChannelMonitorDelete) sqlExec(ctx context.Context) (int, error) {
|
||||
_spec := sqlgraph.NewDeleteSpec(channelmonitor.Table, sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64))
|
||||
if ps := _d.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
||||
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
_d.mutation.done = true
|
||||
return affected, err
|
||||
}
|
||||
|
||||
// ChannelMonitorDeleteOne is the builder for deleting a single ChannelMonitor entity.
|
||||
type ChannelMonitorDeleteOne struct {
|
||||
_d *ChannelMonitorDelete
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the ChannelMonitorDelete builder.
|
||||
func (_d *ChannelMonitorDeleteOne) Where(ps ...predicate.ChannelMonitor) *ChannelMonitorDeleteOne {
|
||||
_d._d.mutation.Where(ps...)
|
||||
return _d
|
||||
}
|
||||
|
||||
// Exec executes the deletion query.
|
||||
func (_d *ChannelMonitorDeleteOne) Exec(ctx context.Context) error {
|
||||
n, err := _d._d.Exec(ctx)
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case n == 0:
|
||||
return &NotFoundError{channelmonitor.Label}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_d *ChannelMonitorDeleteOne) ExecX(ctx context.Context) {
|
||||
if err := _d.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
797
backend/ent/channelmonitor_query.go
Normal file
797
backend/ent/channelmonitor_query.go
Normal file
@@ -0,0 +1,797 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorrequesttemplate"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ChannelMonitorQuery is the builder for querying ChannelMonitor entities.
|
||||
type ChannelMonitorQuery struct {
|
||||
config
|
||||
ctx *QueryContext
|
||||
order []channelmonitor.OrderOption
|
||||
inters []Interceptor
|
||||
predicates []predicate.ChannelMonitor
|
||||
withHistory *ChannelMonitorHistoryQuery
|
||||
withDailyRollups *ChannelMonitorDailyRollupQuery
|
||||
withRequestTemplate *ChannelMonitorRequestTemplateQuery
|
||||
modifiers []func(*sql.Selector)
|
||||
// intermediate query (i.e. traversal path).
|
||||
sql *sql.Selector
|
||||
path func(context.Context) (*sql.Selector, error)
|
||||
}
|
||||
|
||||
// Where adds a new predicate for the ChannelMonitorQuery builder.
|
||||
func (_q *ChannelMonitorQuery) Where(ps ...predicate.ChannelMonitor) *ChannelMonitorQuery {
|
||||
_q.predicates = append(_q.predicates, ps...)
|
||||
return _q
|
||||
}
|
||||
|
||||
// Limit the number of records to be returned by this query.
|
||||
func (_q *ChannelMonitorQuery) Limit(limit int) *ChannelMonitorQuery {
|
||||
_q.ctx.Limit = &limit
|
||||
return _q
|
||||
}
|
||||
|
||||
// Offset to start from.
|
||||
func (_q *ChannelMonitorQuery) Offset(offset int) *ChannelMonitorQuery {
|
||||
_q.ctx.Offset = &offset
|
||||
return _q
|
||||
}
|
||||
|
||||
// Unique configures the query builder to filter duplicate records on query.
|
||||
// By default, unique is set to true, and can be disabled using this method.
|
||||
func (_q *ChannelMonitorQuery) Unique(unique bool) *ChannelMonitorQuery {
|
||||
_q.ctx.Unique = &unique
|
||||
return _q
|
||||
}
|
||||
|
||||
// Order specifies how the records should be ordered.
|
||||
func (_q *ChannelMonitorQuery) Order(o ...channelmonitor.OrderOption) *ChannelMonitorQuery {
|
||||
_q.order = append(_q.order, o...)
|
||||
return _q
|
||||
}
|
||||
|
||||
// QueryHistory chains the current query on the "history" edge.
|
||||
func (_q *ChannelMonitorQuery) QueryHistory() *ChannelMonitorHistoryQuery {
|
||||
query := (&ChannelMonitorHistoryClient{config: _q.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := _q.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(channelmonitor.Table, channelmonitor.FieldID, selector),
|
||||
sqlgraph.To(channelmonitorhistory.Table, channelmonitorhistory.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, channelmonitor.HistoryTable, channelmonitor.HistoryColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// QueryDailyRollups chains the current query on the "daily_rollups" edge.
|
||||
func (_q *ChannelMonitorQuery) QueryDailyRollups() *ChannelMonitorDailyRollupQuery {
|
||||
query := (&ChannelMonitorDailyRollupClient{config: _q.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := _q.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(channelmonitor.Table, channelmonitor.FieldID, selector),
|
||||
sqlgraph.To(channelmonitordailyrollup.Table, channelmonitordailyrollup.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, channelmonitor.DailyRollupsTable, channelmonitor.DailyRollupsColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// QueryRequestTemplate chains the current query on the "request_template" edge.
|
||||
func (_q *ChannelMonitorQuery) QueryRequestTemplate() *ChannelMonitorRequestTemplateQuery {
|
||||
query := (&ChannelMonitorRequestTemplateClient{config: _q.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := _q.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(channelmonitor.Table, channelmonitor.FieldID, selector),
|
||||
sqlgraph.To(channelmonitorrequesttemplate.Table, channelmonitorrequesttemplate.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, false, channelmonitor.RequestTemplateTable, channelmonitor.RequestTemplateColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// First returns the first ChannelMonitor entity from the query.
|
||||
// Returns a *NotFoundError when no ChannelMonitor was found.
|
||||
func (_q *ChannelMonitorQuery) First(ctx context.Context) (*ChannelMonitor, error) {
|
||||
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nil, &NotFoundError{channelmonitor.Label}
|
||||
}
|
||||
return nodes[0], nil
|
||||
}
|
||||
|
||||
// FirstX is like First, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorQuery) FirstX(ctx context.Context) *ChannelMonitor {
|
||||
node, err := _q.First(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// FirstID returns the first ChannelMonitor ID from the query.
|
||||
// Returns a *NotFoundError when no ChannelMonitor ID was found.
|
||||
func (_q *ChannelMonitorQuery) FirstID(ctx context.Context) (id int64, err error) {
|
||||
var ids []int64
|
||||
if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
|
||||
return
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
err = &NotFoundError{channelmonitor.Label}
|
||||
return
|
||||
}
|
||||
return ids[0], nil
|
||||
}
|
||||
|
||||
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorQuery) FirstIDX(ctx context.Context) int64 {
|
||||
id, err := _q.FirstID(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// Only returns a single ChannelMonitor entity found by the query, ensuring it only returns one.
|
||||
// Returns a *NotSingularError when more than one ChannelMonitor entity is found.
|
||||
// Returns a *NotFoundError when no ChannelMonitor entities are found.
|
||||
func (_q *ChannelMonitorQuery) Only(ctx context.Context) (*ChannelMonitor, error) {
|
||||
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch len(nodes) {
|
||||
case 1:
|
||||
return nodes[0], nil
|
||||
case 0:
|
||||
return nil, &NotFoundError{channelmonitor.Label}
|
||||
default:
|
||||
return nil, &NotSingularError{channelmonitor.Label}
|
||||
}
|
||||
}
|
||||
|
||||
// OnlyX is like Only, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorQuery) OnlyX(ctx context.Context) *ChannelMonitor {
|
||||
node, err := _q.Only(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// OnlyID is like Only, but returns the only ChannelMonitor ID in the query.
|
||||
// Returns a *NotSingularError when more than one ChannelMonitor ID is found.
|
||||
// Returns a *NotFoundError when no entities are found.
|
||||
func (_q *ChannelMonitorQuery) OnlyID(ctx context.Context) (id int64, err error) {
|
||||
var ids []int64
|
||||
if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(ids) {
|
||||
case 1:
|
||||
id = ids[0]
|
||||
case 0:
|
||||
err = &NotFoundError{channelmonitor.Label}
|
||||
default:
|
||||
err = &NotSingularError{channelmonitor.Label}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorQuery) OnlyIDX(ctx context.Context) int64 {
|
||||
id, err := _q.OnlyID(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// All executes the query and returns a list of ChannelMonitors.
|
||||
func (_q *ChannelMonitorQuery) All(ctx context.Context) ([]*ChannelMonitor, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qr := querierAll[[]*ChannelMonitor, *ChannelMonitorQuery]()
|
||||
return withInterceptors[[]*ChannelMonitor](ctx, _q, qr, _q.inters)
|
||||
}
|
||||
|
||||
// AllX is like All, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorQuery) AllX(ctx context.Context) []*ChannelMonitor {
|
||||
nodes, err := _q.All(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
// IDs executes the query and returns a list of ChannelMonitor IDs.
|
||||
func (_q *ChannelMonitorQuery) IDs(ctx context.Context) (ids []int64, err error) {
|
||||
if _q.ctx.Unique == nil && _q.path != nil {
|
||||
_q.Unique(true)
|
||||
}
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
|
||||
if err = _q.Select(channelmonitor.FieldID).Scan(ctx, &ids); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// IDsX is like IDs, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorQuery) IDsX(ctx context.Context) []int64 {
|
||||
ids, err := _q.IDs(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// Count returns the count of the given query.
|
||||
func (_q *ChannelMonitorQuery) Count(ctx context.Context) (int, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return withInterceptors[int](ctx, _q, querierCount[*ChannelMonitorQuery](), _q.inters)
|
||||
}
|
||||
|
||||
// CountX is like Count, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorQuery) CountX(ctx context.Context) int {
|
||||
count, err := _q.Count(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// Exist returns true if the query has elements in the graph.
|
||||
func (_q *ChannelMonitorQuery) Exist(ctx context.Context) (bool, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
||||
switch _, err := _q.FirstID(ctx); {
|
||||
case IsNotFound(err):
|
||||
return false, nil
|
||||
case err != nil:
|
||||
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExistX is like Exist, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorQuery) ExistX(ctx context.Context) bool {
|
||||
exist, err := _q.Exist(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return exist
|
||||
}
|
||||
|
||||
// Clone returns a duplicate of the ChannelMonitorQuery builder, including all associated steps. It can be
|
||||
// used to prepare common query builders and use them differently after the clone is made.
|
||||
func (_q *ChannelMonitorQuery) Clone() *ChannelMonitorQuery {
|
||||
if _q == nil {
|
||||
return nil
|
||||
}
|
||||
return &ChannelMonitorQuery{
|
||||
config: _q.config,
|
||||
ctx: _q.ctx.Clone(),
|
||||
order: append([]channelmonitor.OrderOption{}, _q.order...),
|
||||
inters: append([]Interceptor{}, _q.inters...),
|
||||
predicates: append([]predicate.ChannelMonitor{}, _q.predicates...),
|
||||
withHistory: _q.withHistory.Clone(),
|
||||
withDailyRollups: _q.withDailyRollups.Clone(),
|
||||
withRequestTemplate: _q.withRequestTemplate.Clone(),
|
||||
// clone intermediate query.
|
||||
sql: _q.sql.Clone(),
|
||||
path: _q.path,
|
||||
}
|
||||
}
|
||||
|
||||
// WithHistory tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "history" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (_q *ChannelMonitorQuery) WithHistory(opts ...func(*ChannelMonitorHistoryQuery)) *ChannelMonitorQuery {
|
||||
query := (&ChannelMonitorHistoryClient{config: _q.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
_q.withHistory = query
|
||||
return _q
|
||||
}
|
||||
|
||||
// WithDailyRollups tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "daily_rollups" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (_q *ChannelMonitorQuery) WithDailyRollups(opts ...func(*ChannelMonitorDailyRollupQuery)) *ChannelMonitorQuery {
|
||||
query := (&ChannelMonitorDailyRollupClient{config: _q.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
_q.withDailyRollups = query
|
||||
return _q
|
||||
}
|
||||
|
||||
// WithRequestTemplate tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "request_template" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (_q *ChannelMonitorQuery) WithRequestTemplate(opts ...func(*ChannelMonitorRequestTemplateQuery)) *ChannelMonitorQuery {
|
||||
query := (&ChannelMonitorRequestTemplateClient{config: _q.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
_q.withRequestTemplate = query
|
||||
return _q
|
||||
}
|
||||
|
||||
// GroupBy is used to group vertices by one or more fields/columns.
|
||||
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// Count int `json:"count,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.ChannelMonitor.Query().
|
||||
// GroupBy(channelmonitor.FieldCreatedAt).
|
||||
// Aggregate(ent.Count()).
|
||||
// Scan(ctx, &v)
|
||||
func (_q *ChannelMonitorQuery) GroupBy(field string, fields ...string) *ChannelMonitorGroupBy {
|
||||
_q.ctx.Fields = append([]string{field}, fields...)
|
||||
grbuild := &ChannelMonitorGroupBy{build: _q}
|
||||
grbuild.flds = &_q.ctx.Fields
|
||||
grbuild.label = channelmonitor.Label
|
||||
grbuild.scan = grbuild.Scan
|
||||
return grbuild
|
||||
}
|
||||
|
||||
// Select allows the selection one or more fields/columns for the given query,
|
||||
// instead of selecting all fields in the entity.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.ChannelMonitor.Query().
|
||||
// Select(channelmonitor.FieldCreatedAt).
|
||||
// Scan(ctx, &v)
|
||||
func (_q *ChannelMonitorQuery) Select(fields ...string) *ChannelMonitorSelect {
|
||||
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
||||
sbuild := &ChannelMonitorSelect{ChannelMonitorQuery: _q}
|
||||
sbuild.label = channelmonitor.Label
|
||||
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
||||
return sbuild
|
||||
}
|
||||
|
||||
// Aggregate returns a ChannelMonitorSelect configured with the given aggregations.
|
||||
func (_q *ChannelMonitorQuery) Aggregate(fns ...AggregateFunc) *ChannelMonitorSelect {
|
||||
return _q.Select().Aggregate(fns...)
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorQuery) prepareQuery(ctx context.Context) error {
|
||||
for _, inter := range _q.inters {
|
||||
if inter == nil {
|
||||
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||
}
|
||||
if trv, ok := inter.(Traverser); ok {
|
||||
if err := trv.Traverse(ctx, _q); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, f := range _q.ctx.Fields {
|
||||
if !channelmonitor.ValidColumn(f) {
|
||||
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
}
|
||||
if _q.path != nil {
|
||||
prev, err := _q.path(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_q.sql = prev
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*ChannelMonitor, error) {
|
||||
var (
|
||||
nodes = []*ChannelMonitor{}
|
||||
_spec = _q.querySpec()
|
||||
loadedTypes = [3]bool{
|
||||
_q.withHistory != nil,
|
||||
_q.withDailyRollups != nil,
|
||||
_q.withRequestTemplate != nil,
|
||||
}
|
||||
)
|
||||
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||
return (*ChannelMonitor).scanValues(nil, columns)
|
||||
}
|
||||
_spec.Assign = func(columns []string, values []any) error {
|
||||
node := &ChannelMonitor{config: _q.config}
|
||||
nodes = append(nodes, node)
|
||||
node.Edges.loadedTypes = loadedTypes
|
||||
return node.assignValues(columns, values)
|
||||
}
|
||||
if len(_q.modifiers) > 0 {
|
||||
_spec.Modifiers = _q.modifiers
|
||||
}
|
||||
for i := range hooks {
|
||||
hooks[i](ctx, _spec)
|
||||
}
|
||||
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nodes, nil
|
||||
}
|
||||
if query := _q.withHistory; query != nil {
|
||||
if err := _q.loadHistory(ctx, query, nodes,
|
||||
func(n *ChannelMonitor) { n.Edges.History = []*ChannelMonitorHistory{} },
|
||||
func(n *ChannelMonitor, e *ChannelMonitorHistory) { n.Edges.History = append(n.Edges.History, e) }); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if query := _q.withDailyRollups; query != nil {
|
||||
if err := _q.loadDailyRollups(ctx, query, nodes,
|
||||
func(n *ChannelMonitor) { n.Edges.DailyRollups = []*ChannelMonitorDailyRollup{} },
|
||||
func(n *ChannelMonitor, e *ChannelMonitorDailyRollup) {
|
||||
n.Edges.DailyRollups = append(n.Edges.DailyRollups, e)
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if query := _q.withRequestTemplate; query != nil {
|
||||
if err := _q.loadRequestTemplate(ctx, query, nodes, nil,
|
||||
func(n *ChannelMonitor, e *ChannelMonitorRequestTemplate) { n.Edges.RequestTemplate = e }); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorQuery) loadHistory(ctx context.Context, query *ChannelMonitorHistoryQuery, nodes []*ChannelMonitor, init func(*ChannelMonitor), assign func(*ChannelMonitor, *ChannelMonitorHistory)) error {
|
||||
fks := make([]driver.Value, 0, len(nodes))
|
||||
nodeids := make(map[int64]*ChannelMonitor)
|
||||
for i := range nodes {
|
||||
fks = append(fks, nodes[i].ID)
|
||||
nodeids[nodes[i].ID] = nodes[i]
|
||||
if init != nil {
|
||||
init(nodes[i])
|
||||
}
|
||||
}
|
||||
if len(query.ctx.Fields) > 0 {
|
||||
query.ctx.AppendFieldOnce(channelmonitorhistory.FieldMonitorID)
|
||||
}
|
||||
query.Where(predicate.ChannelMonitorHistory(func(s *sql.Selector) {
|
||||
s.Where(sql.InValues(s.C(channelmonitor.HistoryColumn), fks...))
|
||||
}))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
fk := n.MonitorID
|
||||
node, ok := nodeids[fk]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected referenced foreign-key "monitor_id" returned %v for node %v`, fk, n.ID)
|
||||
}
|
||||
assign(node, n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (_q *ChannelMonitorQuery) loadDailyRollups(ctx context.Context, query *ChannelMonitorDailyRollupQuery, nodes []*ChannelMonitor, init func(*ChannelMonitor), assign func(*ChannelMonitor, *ChannelMonitorDailyRollup)) error {
|
||||
fks := make([]driver.Value, 0, len(nodes))
|
||||
nodeids := make(map[int64]*ChannelMonitor)
|
||||
for i := range nodes {
|
||||
fks = append(fks, nodes[i].ID)
|
||||
nodeids[nodes[i].ID] = nodes[i]
|
||||
if init != nil {
|
||||
init(nodes[i])
|
||||
}
|
||||
}
|
||||
if len(query.ctx.Fields) > 0 {
|
||||
query.ctx.AppendFieldOnce(channelmonitordailyrollup.FieldMonitorID)
|
||||
}
|
||||
query.Where(predicate.ChannelMonitorDailyRollup(func(s *sql.Selector) {
|
||||
s.Where(sql.InValues(s.C(channelmonitor.DailyRollupsColumn), fks...))
|
||||
}))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
fk := n.MonitorID
|
||||
node, ok := nodeids[fk]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected referenced foreign-key "monitor_id" returned %v for node %v`, fk, n.ID)
|
||||
}
|
||||
assign(node, n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (_q *ChannelMonitorQuery) loadRequestTemplate(ctx context.Context, query *ChannelMonitorRequestTemplateQuery, nodes []*ChannelMonitor, init func(*ChannelMonitor), assign func(*ChannelMonitor, *ChannelMonitorRequestTemplate)) error {
|
||||
ids := make([]int64, 0, len(nodes))
|
||||
nodeids := make(map[int64][]*ChannelMonitor)
|
||||
for i := range nodes {
|
||||
if nodes[i].TemplateID == nil {
|
||||
continue
|
||||
}
|
||||
fk := *nodes[i].TemplateID
|
||||
if _, ok := nodeids[fk]; !ok {
|
||||
ids = append(ids, fk)
|
||||
}
|
||||
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
query.Where(channelmonitorrequesttemplate.IDIn(ids...))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
nodes, ok := nodeids[n.ID]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "template_id" returned %v`, n.ID)
|
||||
}
|
||||
for i := range nodes {
|
||||
assign(nodes[i], n)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorQuery) sqlCount(ctx context.Context) (int, error) {
|
||||
_spec := _q.querySpec()
|
||||
if len(_q.modifiers) > 0 {
|
||||
_spec.Modifiers = _q.modifiers
|
||||
}
|
||||
_spec.Node.Columns = _q.ctx.Fields
|
||||
if len(_q.ctx.Fields) > 0 {
|
||||
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||
}
|
||||
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorQuery) querySpec() *sqlgraph.QuerySpec {
|
||||
_spec := sqlgraph.NewQuerySpec(channelmonitor.Table, channelmonitor.Columns, sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64))
|
||||
_spec.From = _q.sql
|
||||
if unique := _q.ctx.Unique; unique != nil {
|
||||
_spec.Unique = *unique
|
||||
} else if _q.path != nil {
|
||||
_spec.Unique = true
|
||||
}
|
||||
if fields := _q.ctx.Fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, channelmonitor.FieldID)
|
||||
for i := range fields {
|
||||
if fields[i] != channelmonitor.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||
}
|
||||
}
|
||||
if _q.withRequestTemplate != nil {
|
||||
_spec.Node.AddColumnOnce(channelmonitor.FieldTemplateID)
|
||||
}
|
||||
}
|
||||
if ps := _q.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if limit := _q.ctx.Limit; limit != nil {
|
||||
_spec.Limit = *limit
|
||||
}
|
||||
if offset := _q.ctx.Offset; offset != nil {
|
||||
_spec.Offset = *offset
|
||||
}
|
||||
if ps := _q.order; len(ps) > 0 {
|
||||
_spec.Order = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
return _spec
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||
builder := sql.Dialect(_q.driver.Dialect())
|
||||
t1 := builder.Table(channelmonitor.Table)
|
||||
columns := _q.ctx.Fields
|
||||
if len(columns) == 0 {
|
||||
columns = channelmonitor.Columns
|
||||
}
|
||||
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||
if _q.sql != nil {
|
||||
selector = _q.sql
|
||||
selector.Select(selector.Columns(columns...)...)
|
||||
}
|
||||
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||
selector.Distinct()
|
||||
}
|
||||
for _, m := range _q.modifiers {
|
||||
m(selector)
|
||||
}
|
||||
for _, p := range _q.predicates {
|
||||
p(selector)
|
||||
}
|
||||
for _, p := range _q.order {
|
||||
p(selector)
|
||||
}
|
||||
if offset := _q.ctx.Offset; offset != nil {
|
||||
// limit is mandatory for offset clause. We start
|
||||
// with default value, and override it below if needed.
|
||||
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||
}
|
||||
if limit := _q.ctx.Limit; limit != nil {
|
||||
selector.Limit(*limit)
|
||||
}
|
||||
return selector
|
||||
}
|
||||
|
||||
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||
// either committed or rolled-back.
|
||||
func (_q *ChannelMonitorQuery) ForUpdate(opts ...sql.LockOption) *ChannelMonitorQuery {
|
||||
if _q.driver.Dialect() == dialect.Postgres {
|
||||
_q.Unique(false)
|
||||
}
|
||||
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||
s.ForUpdate(opts...)
|
||||
})
|
||||
return _q
|
||||
}
|
||||
|
||||
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||
// until your transaction commits.
|
||||
func (_q *ChannelMonitorQuery) ForShare(opts ...sql.LockOption) *ChannelMonitorQuery {
|
||||
if _q.driver.Dialect() == dialect.Postgres {
|
||||
_q.Unique(false)
|
||||
}
|
||||
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||
s.ForShare(opts...)
|
||||
})
|
||||
return _q
|
||||
}
|
||||
|
||||
// ChannelMonitorGroupBy is the group-by builder for ChannelMonitor entities.
|
||||
type ChannelMonitorGroupBy struct {
|
||||
selector
|
||||
build *ChannelMonitorQuery
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the group-by query.
|
||||
func (_g *ChannelMonitorGroupBy) Aggregate(fns ...AggregateFunc) *ChannelMonitorGroupBy {
|
||||
_g.fns = append(_g.fns, fns...)
|
||||
return _g
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (_g *ChannelMonitorGroupBy) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
||||
if err := _g.build.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*ChannelMonitorQuery, *ChannelMonitorGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
||||
}
|
||||
|
||||
func (_g *ChannelMonitorGroupBy) sqlScan(ctx context.Context, root *ChannelMonitorQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx).Select()
|
||||
aggregation := make([]string, 0, len(_g.fns))
|
||||
for _, fn := range _g.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
if len(selector.SelectedColumns()) == 0 {
|
||||
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
||||
for _, f := range *_g.flds {
|
||||
columns = append(columns, selector.C(f))
|
||||
}
|
||||
columns = append(columns, aggregation...)
|
||||
selector.Select(columns...)
|
||||
}
|
||||
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
||||
if err := selector.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
|
||||
// ChannelMonitorSelect is the builder for selecting fields of ChannelMonitor entities.
|
||||
type ChannelMonitorSelect struct {
|
||||
*ChannelMonitorQuery
|
||||
selector
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the selector query.
|
||||
func (_s *ChannelMonitorSelect) Aggregate(fns ...AggregateFunc) *ChannelMonitorSelect {
|
||||
_s.fns = append(_s.fns, fns...)
|
||||
return _s
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (_s *ChannelMonitorSelect) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
||||
if err := _s.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*ChannelMonitorQuery, *ChannelMonitorSelect](ctx, _s.ChannelMonitorQuery, _s, _s.inters, v)
|
||||
}
|
||||
|
||||
func (_s *ChannelMonitorSelect) sqlScan(ctx context.Context, root *ChannelMonitorQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx)
|
||||
aggregation := make([]string, 0, len(_s.fns))
|
||||
for _, fn := range _s.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
switch n := len(*_s.selector.flds); {
|
||||
case n == 0 && len(aggregation) > 0:
|
||||
selector.Select(aggregation...)
|
||||
case n != 0 && len(aggregation) > 0:
|
||||
selector.AppendSelect(aggregation...)
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
1328
backend/ent/channelmonitor_update.go
Normal file
1328
backend/ent/channelmonitor_update.go
Normal file
File diff suppressed because it is too large
Load Diff
278
backend/ent/channelmonitordailyrollup.go
Normal file
278
backend/ent/channelmonitordailyrollup.go
Normal file
@@ -0,0 +1,278 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup"
|
||||
)
|
||||
|
||||
// ChannelMonitorDailyRollup is the model entity for the ChannelMonitorDailyRollup schema.
|
||||
type ChannelMonitorDailyRollup struct {
|
||||
config `json:"-"`
|
||||
// ID of the ent.
|
||||
ID int64 `json:"id,omitempty"`
|
||||
// MonitorID holds the value of the "monitor_id" field.
|
||||
MonitorID int64 `json:"monitor_id,omitempty"`
|
||||
// Model holds the value of the "model" field.
|
||||
Model string `json:"model,omitempty"`
|
||||
// BucketDate holds the value of the "bucket_date" field.
|
||||
BucketDate time.Time `json:"bucket_date,omitempty"`
|
||||
// TotalChecks holds the value of the "total_checks" field.
|
||||
TotalChecks int `json:"total_checks,omitempty"`
|
||||
// OkCount holds the value of the "ok_count" field.
|
||||
OkCount int `json:"ok_count,omitempty"`
|
||||
// OperationalCount holds the value of the "operational_count" field.
|
||||
OperationalCount int `json:"operational_count,omitempty"`
|
||||
// DegradedCount holds the value of the "degraded_count" field.
|
||||
DegradedCount int `json:"degraded_count,omitempty"`
|
||||
// FailedCount holds the value of the "failed_count" field.
|
||||
FailedCount int `json:"failed_count,omitempty"`
|
||||
// ErrorCount holds the value of the "error_count" field.
|
||||
ErrorCount int `json:"error_count,omitempty"`
|
||||
// SumLatencyMs holds the value of the "sum_latency_ms" field.
|
||||
SumLatencyMs int64 `json:"sum_latency_ms,omitempty"`
|
||||
// CountLatency holds the value of the "count_latency" field.
|
||||
CountLatency int `json:"count_latency,omitempty"`
|
||||
// SumPingLatencyMs holds the value of the "sum_ping_latency_ms" field.
|
||||
SumPingLatencyMs int64 `json:"sum_ping_latency_ms,omitempty"`
|
||||
// CountPingLatency holds the value of the "count_ping_latency" field.
|
||||
CountPingLatency int `json:"count_ping_latency,omitempty"`
|
||||
// ComputedAt holds the value of the "computed_at" field.
|
||||
ComputedAt time.Time `json:"computed_at,omitempty"`
|
||||
// Edges holds the relations/edges for other nodes in the graph.
|
||||
// The values are being populated by the ChannelMonitorDailyRollupQuery when eager-loading is set.
|
||||
Edges ChannelMonitorDailyRollupEdges `json:"edges"`
|
||||
selectValues sql.SelectValues
|
||||
}
|
||||
|
||||
// ChannelMonitorDailyRollupEdges holds the relations/edges for other nodes in the graph.
|
||||
type ChannelMonitorDailyRollupEdges struct {
|
||||
// Monitor holds the value of the monitor edge.
|
||||
Monitor *ChannelMonitor `json:"monitor,omitempty"`
|
||||
// loadedTypes holds the information for reporting if a
|
||||
// type was loaded (or requested) in eager-loading or not.
|
||||
loadedTypes [1]bool
|
||||
}
|
||||
|
||||
// MonitorOrErr returns the Monitor value or an error if the edge
|
||||
// was not loaded in eager-loading, or loaded but was not found.
|
||||
func (e ChannelMonitorDailyRollupEdges) MonitorOrErr() (*ChannelMonitor, error) {
|
||||
if e.Monitor != nil {
|
||||
return e.Monitor, nil
|
||||
} else if e.loadedTypes[0] {
|
||||
return nil, &NotFoundError{label: channelmonitor.Label}
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "monitor"}
|
||||
}
|
||||
|
||||
// scanValues returns the types for scanning values from sql.Rows.
|
||||
func (*ChannelMonitorDailyRollup) scanValues(columns []string) ([]any, error) {
|
||||
values := make([]any, len(columns))
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case channelmonitordailyrollup.FieldID, channelmonitordailyrollup.FieldMonitorID, channelmonitordailyrollup.FieldTotalChecks, channelmonitordailyrollup.FieldOkCount, channelmonitordailyrollup.FieldOperationalCount, channelmonitordailyrollup.FieldDegradedCount, channelmonitordailyrollup.FieldFailedCount, channelmonitordailyrollup.FieldErrorCount, channelmonitordailyrollup.FieldSumLatencyMs, channelmonitordailyrollup.FieldCountLatency, channelmonitordailyrollup.FieldSumPingLatencyMs, channelmonitordailyrollup.FieldCountPingLatency:
|
||||
values[i] = new(sql.NullInt64)
|
||||
case channelmonitordailyrollup.FieldModel:
|
||||
values[i] = new(sql.NullString)
|
||||
case channelmonitordailyrollup.FieldBucketDate, channelmonitordailyrollup.FieldComputedAt:
|
||||
values[i] = new(sql.NullTime)
|
||||
default:
|
||||
values[i] = new(sql.UnknownType)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||
// to the ChannelMonitorDailyRollup fields.
|
||||
func (_m *ChannelMonitorDailyRollup) assignValues(columns []string, values []any) error {
|
||||
if m, n := len(values), len(columns); m < n {
|
||||
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||
}
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case channelmonitordailyrollup.FieldID:
|
||||
value, ok := values[i].(*sql.NullInt64)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected type %T for field id", value)
|
||||
}
|
||||
_m.ID = int64(value.Int64)
|
||||
case channelmonitordailyrollup.FieldMonitorID:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field monitor_id", values[i])
|
||||
} else if value.Valid {
|
||||
_m.MonitorID = value.Int64
|
||||
}
|
||||
case channelmonitordailyrollup.FieldModel:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field model", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Model = value.String
|
||||
}
|
||||
case channelmonitordailyrollup.FieldBucketDate:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field bucket_date", values[i])
|
||||
} else if value.Valid {
|
||||
_m.BucketDate = value.Time
|
||||
}
|
||||
case channelmonitordailyrollup.FieldTotalChecks:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field total_checks", values[i])
|
||||
} else if value.Valid {
|
||||
_m.TotalChecks = int(value.Int64)
|
||||
}
|
||||
case channelmonitordailyrollup.FieldOkCount:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field ok_count", values[i])
|
||||
} else if value.Valid {
|
||||
_m.OkCount = int(value.Int64)
|
||||
}
|
||||
case channelmonitordailyrollup.FieldOperationalCount:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field operational_count", values[i])
|
||||
} else if value.Valid {
|
||||
_m.OperationalCount = int(value.Int64)
|
||||
}
|
||||
case channelmonitordailyrollup.FieldDegradedCount:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field degraded_count", values[i])
|
||||
} else if value.Valid {
|
||||
_m.DegradedCount = int(value.Int64)
|
||||
}
|
||||
case channelmonitordailyrollup.FieldFailedCount:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field failed_count", values[i])
|
||||
} else if value.Valid {
|
||||
_m.FailedCount = int(value.Int64)
|
||||
}
|
||||
case channelmonitordailyrollup.FieldErrorCount:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field error_count", values[i])
|
||||
} else if value.Valid {
|
||||
_m.ErrorCount = int(value.Int64)
|
||||
}
|
||||
case channelmonitordailyrollup.FieldSumLatencyMs:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field sum_latency_ms", values[i])
|
||||
} else if value.Valid {
|
||||
_m.SumLatencyMs = value.Int64
|
||||
}
|
||||
case channelmonitordailyrollup.FieldCountLatency:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field count_latency", values[i])
|
||||
} else if value.Valid {
|
||||
_m.CountLatency = int(value.Int64)
|
||||
}
|
||||
case channelmonitordailyrollup.FieldSumPingLatencyMs:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field sum_ping_latency_ms", values[i])
|
||||
} else if value.Valid {
|
||||
_m.SumPingLatencyMs = value.Int64
|
||||
}
|
||||
case channelmonitordailyrollup.FieldCountPingLatency:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field count_ping_latency", values[i])
|
||||
} else if value.Valid {
|
||||
_m.CountPingLatency = int(value.Int64)
|
||||
}
|
||||
case channelmonitordailyrollup.FieldComputedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field computed_at", values[i])
|
||||
} else if value.Valid {
|
||||
_m.ComputedAt = value.Time
|
||||
}
|
||||
default:
|
||||
_m.selectValues.Set(columns[i], values[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value returns the ent.Value that was dynamically selected and assigned to the ChannelMonitorDailyRollup.
|
||||
// This includes values selected through modifiers, order, etc.
|
||||
func (_m *ChannelMonitorDailyRollup) Value(name string) (ent.Value, error) {
|
||||
return _m.selectValues.Get(name)
|
||||
}
|
||||
|
||||
// QueryMonitor queries the "monitor" edge of the ChannelMonitorDailyRollup entity.
|
||||
func (_m *ChannelMonitorDailyRollup) QueryMonitor() *ChannelMonitorQuery {
|
||||
return NewChannelMonitorDailyRollupClient(_m.config).QueryMonitor(_m)
|
||||
}
|
||||
|
||||
// Update returns a builder for updating this ChannelMonitorDailyRollup.
|
||||
// Note that you need to call ChannelMonitorDailyRollup.Unwrap() before calling this method if this ChannelMonitorDailyRollup
|
||||
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||
func (_m *ChannelMonitorDailyRollup) Update() *ChannelMonitorDailyRollupUpdateOne {
|
||||
return NewChannelMonitorDailyRollupClient(_m.config).UpdateOne(_m)
|
||||
}
|
||||
|
||||
// Unwrap unwraps the ChannelMonitorDailyRollup entity that was returned from a transaction after it was closed,
|
||||
// so that all future queries will be executed through the driver which created the transaction.
|
||||
func (_m *ChannelMonitorDailyRollup) Unwrap() *ChannelMonitorDailyRollup {
|
||||
_tx, ok := _m.config.driver.(*txDriver)
|
||||
if !ok {
|
||||
panic("ent: ChannelMonitorDailyRollup is not a transactional entity")
|
||||
}
|
||||
_m.config.driver = _tx.drv
|
||||
return _m
|
||||
}
|
||||
|
||||
// String implements the fmt.Stringer.
|
||||
func (_m *ChannelMonitorDailyRollup) String() string {
|
||||
var builder strings.Builder
|
||||
builder.WriteString("ChannelMonitorDailyRollup(")
|
||||
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
||||
builder.WriteString("monitor_id=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.MonitorID))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("model=")
|
||||
builder.WriteString(_m.Model)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("bucket_date=")
|
||||
builder.WriteString(_m.BucketDate.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("total_checks=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.TotalChecks))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("ok_count=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.OkCount))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("operational_count=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.OperationalCount))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("degraded_count=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.DegradedCount))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("failed_count=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.FailedCount))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("error_count=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.ErrorCount))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("sum_latency_ms=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.SumLatencyMs))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("count_latency=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.CountLatency))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("sum_ping_latency_ms=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.SumPingLatencyMs))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("count_ping_latency=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.CountPingLatency))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("computed_at=")
|
||||
builder.WriteString(_m.ComputedAt.Format(time.ANSIC))
|
||||
builder.WriteByte(')')
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
// ChannelMonitorDailyRollups is a parsable slice of ChannelMonitorDailyRollup.
|
||||
type ChannelMonitorDailyRollups []*ChannelMonitorDailyRollup
|
||||
@@ -0,0 +1,206 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package channelmonitordailyrollup
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
)
|
||||
|
||||
const (
|
||||
// Label holds the string label denoting the channelmonitordailyrollup type in the database.
|
||||
Label = "channel_monitor_daily_rollup"
|
||||
// FieldID holds the string denoting the id field in the database.
|
||||
FieldID = "id"
|
||||
// FieldMonitorID holds the string denoting the monitor_id field in the database.
|
||||
FieldMonitorID = "monitor_id"
|
||||
// FieldModel holds the string denoting the model field in the database.
|
||||
FieldModel = "model"
|
||||
// FieldBucketDate holds the string denoting the bucket_date field in the database.
|
||||
FieldBucketDate = "bucket_date"
|
||||
// FieldTotalChecks holds the string denoting the total_checks field in the database.
|
||||
FieldTotalChecks = "total_checks"
|
||||
// FieldOkCount holds the string denoting the ok_count field in the database.
|
||||
FieldOkCount = "ok_count"
|
||||
// FieldOperationalCount holds the string denoting the operational_count field in the database.
|
||||
FieldOperationalCount = "operational_count"
|
||||
// FieldDegradedCount holds the string denoting the degraded_count field in the database.
|
||||
FieldDegradedCount = "degraded_count"
|
||||
// FieldFailedCount holds the string denoting the failed_count field in the database.
|
||||
FieldFailedCount = "failed_count"
|
||||
// FieldErrorCount holds the string denoting the error_count field in the database.
|
||||
FieldErrorCount = "error_count"
|
||||
// FieldSumLatencyMs holds the string denoting the sum_latency_ms field in the database.
|
||||
FieldSumLatencyMs = "sum_latency_ms"
|
||||
// FieldCountLatency holds the string denoting the count_latency field in the database.
|
||||
FieldCountLatency = "count_latency"
|
||||
// FieldSumPingLatencyMs holds the string denoting the sum_ping_latency_ms field in the database.
|
||||
FieldSumPingLatencyMs = "sum_ping_latency_ms"
|
||||
// FieldCountPingLatency holds the string denoting the count_ping_latency field in the database.
|
||||
FieldCountPingLatency = "count_ping_latency"
|
||||
// FieldComputedAt holds the string denoting the computed_at field in the database.
|
||||
FieldComputedAt = "computed_at"
|
||||
// EdgeMonitor holds the string denoting the monitor edge name in mutations.
|
||||
EdgeMonitor = "monitor"
|
||||
// Table holds the table name of the channelmonitordailyrollup in the database.
|
||||
Table = "channel_monitor_daily_rollups"
|
||||
// MonitorTable is the table that holds the monitor relation/edge.
|
||||
MonitorTable = "channel_monitor_daily_rollups"
|
||||
// MonitorInverseTable is the table name for the ChannelMonitor entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "channelmonitor" package.
|
||||
MonitorInverseTable = "channel_monitors"
|
||||
// MonitorColumn is the table column denoting the monitor relation/edge.
|
||||
MonitorColumn = "monitor_id"
|
||||
)
|
||||
|
||||
// Columns holds all SQL columns for channelmonitordailyrollup fields.
|
||||
var Columns = []string{
|
||||
FieldID,
|
||||
FieldMonitorID,
|
||||
FieldModel,
|
||||
FieldBucketDate,
|
||||
FieldTotalChecks,
|
||||
FieldOkCount,
|
||||
FieldOperationalCount,
|
||||
FieldDegradedCount,
|
||||
FieldFailedCount,
|
||||
FieldErrorCount,
|
||||
FieldSumLatencyMs,
|
||||
FieldCountLatency,
|
||||
FieldSumPingLatencyMs,
|
||||
FieldCountPingLatency,
|
||||
FieldComputedAt,
|
||||
}
|
||||
|
||||
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||
func ValidColumn(column string) bool {
|
||||
for i := range Columns {
|
||||
if column == Columns[i] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var (
|
||||
// ModelValidator is a validator for the "model" field. It is called by the builders before save.
|
||||
ModelValidator func(string) error
|
||||
// DefaultTotalChecks holds the default value on creation for the "total_checks" field.
|
||||
DefaultTotalChecks int
|
||||
// DefaultOkCount holds the default value on creation for the "ok_count" field.
|
||||
DefaultOkCount int
|
||||
// DefaultOperationalCount holds the default value on creation for the "operational_count" field.
|
||||
DefaultOperationalCount int
|
||||
// DefaultDegradedCount holds the default value on creation for the "degraded_count" field.
|
||||
DefaultDegradedCount int
|
||||
// DefaultFailedCount holds the default value on creation for the "failed_count" field.
|
||||
DefaultFailedCount int
|
||||
// DefaultErrorCount holds the default value on creation for the "error_count" field.
|
||||
DefaultErrorCount int
|
||||
// DefaultSumLatencyMs holds the default value on creation for the "sum_latency_ms" field.
|
||||
DefaultSumLatencyMs int64
|
||||
// DefaultCountLatency holds the default value on creation for the "count_latency" field.
|
||||
DefaultCountLatency int
|
||||
// DefaultSumPingLatencyMs holds the default value on creation for the "sum_ping_latency_ms" field.
|
||||
DefaultSumPingLatencyMs int64
|
||||
// DefaultCountPingLatency holds the default value on creation for the "count_ping_latency" field.
|
||||
DefaultCountPingLatency int
|
||||
// DefaultComputedAt holds the default value on creation for the "computed_at" field.
|
||||
DefaultComputedAt func() time.Time
|
||||
// UpdateDefaultComputedAt holds the default value on update for the "computed_at" field.
|
||||
UpdateDefaultComputedAt func() time.Time
|
||||
)
|
||||
|
||||
// OrderOption defines the ordering options for the ChannelMonitorDailyRollup queries.
|
||||
type OrderOption func(*sql.Selector)
|
||||
|
||||
// ByID orders the results by the id field.
|
||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByMonitorID orders the results by the monitor_id field.
|
||||
func ByMonitorID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldMonitorID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByModel orders the results by the model field.
|
||||
func ByModel(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldModel, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByBucketDate orders the results by the bucket_date field.
|
||||
func ByBucketDate(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldBucketDate, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByTotalChecks orders the results by the total_checks field.
|
||||
func ByTotalChecks(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldTotalChecks, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByOkCount orders the results by the ok_count field.
|
||||
func ByOkCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldOkCount, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByOperationalCount orders the results by the operational_count field.
|
||||
func ByOperationalCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldOperationalCount, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByDegradedCount orders the results by the degraded_count field.
|
||||
func ByDegradedCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldDegradedCount, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByFailedCount orders the results by the failed_count field.
|
||||
func ByFailedCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldFailedCount, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByErrorCount orders the results by the error_count field.
|
||||
func ByErrorCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldErrorCount, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// BySumLatencyMs orders the results by the sum_latency_ms field.
|
||||
func BySumLatencyMs(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSumLatencyMs, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCountLatency orders the results by the count_latency field.
|
||||
func ByCountLatency(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCountLatency, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// BySumPingLatencyMs orders the results by the sum_ping_latency_ms field.
|
||||
func BySumPingLatencyMs(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSumPingLatencyMs, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCountPingLatency orders the results by the count_ping_latency field.
|
||||
func ByCountPingLatency(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCountPingLatency, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByComputedAt orders the results by the computed_at field.
|
||||
func ByComputedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldComputedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByMonitorField orders the results by monitor field.
|
||||
func ByMonitorField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newMonitorStep(), sql.OrderByField(field, opts...))
|
||||
}
|
||||
}
|
||||
func newMonitorStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(MonitorInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, MonitorTable, MonitorColumn),
|
||||
)
|
||||
}
|
||||
729
backend/ent/channelmonitordailyrollup/where.go
Normal file
729
backend/ent/channelmonitordailyrollup/where.go
Normal file
@@ -0,0 +1,729 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package channelmonitordailyrollup
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ID filters vertices based on their ID field.
|
||||
func ID(id int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDEQ applies the EQ predicate on the ID field.
|
||||
func IDEQ(id int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDNEQ applies the NEQ predicate on the ID field.
|
||||
func IDNEQ(id int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDIn applies the In predicate on the ID field.
|
||||
func IDIn(ids ...int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDNotIn applies the NotIn predicate on the ID field.
|
||||
func IDNotIn(ids ...int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDGT applies the GT predicate on the ID field.
|
||||
func IDGT(id int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDGTE applies the GTE predicate on the ID field.
|
||||
func IDGTE(id int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLT applies the LT predicate on the ID field.
|
||||
func IDLT(id int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLTE applies the LTE predicate on the ID field.
|
||||
func IDLTE(id int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldID, id))
|
||||
}
|
||||
|
||||
// MonitorID applies equality check predicate on the "monitor_id" field. It's identical to MonitorIDEQ.
|
||||
func MonitorID(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldMonitorID, v))
|
||||
}
|
||||
|
||||
// Model applies equality check predicate on the "model" field. It's identical to ModelEQ.
|
||||
func Model(v string) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldModel, v))
|
||||
}
|
||||
|
||||
// BucketDate applies equality check predicate on the "bucket_date" field. It's identical to BucketDateEQ.
|
||||
func BucketDate(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldBucketDate, v))
|
||||
}
|
||||
|
||||
// TotalChecks applies equality check predicate on the "total_checks" field. It's identical to TotalChecksEQ.
|
||||
func TotalChecks(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldTotalChecks, v))
|
||||
}
|
||||
|
||||
// OkCount applies equality check predicate on the "ok_count" field. It's identical to OkCountEQ.
|
||||
func OkCount(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldOkCount, v))
|
||||
}
|
||||
|
||||
// OperationalCount applies equality check predicate on the "operational_count" field. It's identical to OperationalCountEQ.
|
||||
func OperationalCount(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldOperationalCount, v))
|
||||
}
|
||||
|
||||
// DegradedCount applies equality check predicate on the "degraded_count" field. It's identical to DegradedCountEQ.
|
||||
func DegradedCount(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldDegradedCount, v))
|
||||
}
|
||||
|
||||
// FailedCount applies equality check predicate on the "failed_count" field. It's identical to FailedCountEQ.
|
||||
func FailedCount(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldFailedCount, v))
|
||||
}
|
||||
|
||||
// ErrorCount applies equality check predicate on the "error_count" field. It's identical to ErrorCountEQ.
|
||||
func ErrorCount(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldErrorCount, v))
|
||||
}
|
||||
|
||||
// SumLatencyMs applies equality check predicate on the "sum_latency_ms" field. It's identical to SumLatencyMsEQ.
|
||||
func SumLatencyMs(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldSumLatencyMs, v))
|
||||
}
|
||||
|
||||
// CountLatency applies equality check predicate on the "count_latency" field. It's identical to CountLatencyEQ.
|
||||
func CountLatency(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldCountLatency, v))
|
||||
}
|
||||
|
||||
// SumPingLatencyMs applies equality check predicate on the "sum_ping_latency_ms" field. It's identical to SumPingLatencyMsEQ.
|
||||
func SumPingLatencyMs(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldSumPingLatencyMs, v))
|
||||
}
|
||||
|
||||
// CountPingLatency applies equality check predicate on the "count_ping_latency" field. It's identical to CountPingLatencyEQ.
|
||||
func CountPingLatency(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldCountPingLatency, v))
|
||||
}
|
||||
|
||||
// ComputedAt applies equality check predicate on the "computed_at" field. It's identical to ComputedAtEQ.
|
||||
func ComputedAt(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldComputedAt, v))
|
||||
}
|
||||
|
||||
// MonitorIDEQ applies the EQ predicate on the "monitor_id" field.
|
||||
func MonitorIDEQ(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldMonitorID, v))
|
||||
}
|
||||
|
||||
// MonitorIDNEQ applies the NEQ predicate on the "monitor_id" field.
|
||||
func MonitorIDNEQ(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldMonitorID, v))
|
||||
}
|
||||
|
||||
// MonitorIDIn applies the In predicate on the "monitor_id" field.
|
||||
func MonitorIDIn(vs ...int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldMonitorID, vs...))
|
||||
}
|
||||
|
||||
// MonitorIDNotIn applies the NotIn predicate on the "monitor_id" field.
|
||||
func MonitorIDNotIn(vs ...int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldMonitorID, vs...))
|
||||
}
|
||||
|
||||
// ModelEQ applies the EQ predicate on the "model" field.
|
||||
func ModelEQ(v string) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelNEQ applies the NEQ predicate on the "model" field.
|
||||
func ModelNEQ(v string) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelIn applies the In predicate on the "model" field.
|
||||
func ModelIn(vs ...string) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldModel, vs...))
|
||||
}
|
||||
|
||||
// ModelNotIn applies the NotIn predicate on the "model" field.
|
||||
func ModelNotIn(vs ...string) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldModel, vs...))
|
||||
}
|
||||
|
||||
// ModelGT applies the GT predicate on the "model" field.
|
||||
func ModelGT(v string) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelGTE applies the GTE predicate on the "model" field.
|
||||
func ModelGTE(v string) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelLT applies the LT predicate on the "model" field.
|
||||
func ModelLT(v string) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelLTE applies the LTE predicate on the "model" field.
|
||||
func ModelLTE(v string) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelContains applies the Contains predicate on the "model" field.
|
||||
func ModelContains(v string) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldContains(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelHasPrefix applies the HasPrefix predicate on the "model" field.
|
||||
func ModelHasPrefix(v string) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldHasPrefix(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelHasSuffix applies the HasSuffix predicate on the "model" field.
|
||||
func ModelHasSuffix(v string) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldHasSuffix(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelEqualFold applies the EqualFold predicate on the "model" field.
|
||||
func ModelEqualFold(v string) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEqualFold(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelContainsFold applies the ContainsFold predicate on the "model" field.
|
||||
func ModelContainsFold(v string) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldContainsFold(FieldModel, v))
|
||||
}
|
||||
|
||||
// BucketDateEQ applies the EQ predicate on the "bucket_date" field.
|
||||
func BucketDateEQ(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldBucketDate, v))
|
||||
}
|
||||
|
||||
// BucketDateNEQ applies the NEQ predicate on the "bucket_date" field.
|
||||
func BucketDateNEQ(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldBucketDate, v))
|
||||
}
|
||||
|
||||
// BucketDateIn applies the In predicate on the "bucket_date" field.
|
||||
func BucketDateIn(vs ...time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldBucketDate, vs...))
|
||||
}
|
||||
|
||||
// BucketDateNotIn applies the NotIn predicate on the "bucket_date" field.
|
||||
func BucketDateNotIn(vs ...time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldBucketDate, vs...))
|
||||
}
|
||||
|
||||
// BucketDateGT applies the GT predicate on the "bucket_date" field.
|
||||
func BucketDateGT(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldBucketDate, v))
|
||||
}
|
||||
|
||||
// BucketDateGTE applies the GTE predicate on the "bucket_date" field.
|
||||
func BucketDateGTE(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldBucketDate, v))
|
||||
}
|
||||
|
||||
// BucketDateLT applies the LT predicate on the "bucket_date" field.
|
||||
func BucketDateLT(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldBucketDate, v))
|
||||
}
|
||||
|
||||
// BucketDateLTE applies the LTE predicate on the "bucket_date" field.
|
||||
func BucketDateLTE(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldBucketDate, v))
|
||||
}
|
||||
|
||||
// TotalChecksEQ applies the EQ predicate on the "total_checks" field.
|
||||
func TotalChecksEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldTotalChecks, v))
|
||||
}
|
||||
|
||||
// TotalChecksNEQ applies the NEQ predicate on the "total_checks" field.
|
||||
func TotalChecksNEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldTotalChecks, v))
|
||||
}
|
||||
|
||||
// TotalChecksIn applies the In predicate on the "total_checks" field.
|
||||
func TotalChecksIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldTotalChecks, vs...))
|
||||
}
|
||||
|
||||
// TotalChecksNotIn applies the NotIn predicate on the "total_checks" field.
|
||||
func TotalChecksNotIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldTotalChecks, vs...))
|
||||
}
|
||||
|
||||
// TotalChecksGT applies the GT predicate on the "total_checks" field.
|
||||
func TotalChecksGT(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldTotalChecks, v))
|
||||
}
|
||||
|
||||
// TotalChecksGTE applies the GTE predicate on the "total_checks" field.
|
||||
func TotalChecksGTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldTotalChecks, v))
|
||||
}
|
||||
|
||||
// TotalChecksLT applies the LT predicate on the "total_checks" field.
|
||||
func TotalChecksLT(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldTotalChecks, v))
|
||||
}
|
||||
|
||||
// TotalChecksLTE applies the LTE predicate on the "total_checks" field.
|
||||
func TotalChecksLTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldTotalChecks, v))
|
||||
}
|
||||
|
||||
// OkCountEQ applies the EQ predicate on the "ok_count" field.
|
||||
func OkCountEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldOkCount, v))
|
||||
}
|
||||
|
||||
// OkCountNEQ applies the NEQ predicate on the "ok_count" field.
|
||||
func OkCountNEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldOkCount, v))
|
||||
}
|
||||
|
||||
// OkCountIn applies the In predicate on the "ok_count" field.
|
||||
func OkCountIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldOkCount, vs...))
|
||||
}
|
||||
|
||||
// OkCountNotIn applies the NotIn predicate on the "ok_count" field.
|
||||
func OkCountNotIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldOkCount, vs...))
|
||||
}
|
||||
|
||||
// OkCountGT applies the GT predicate on the "ok_count" field.
|
||||
func OkCountGT(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldOkCount, v))
|
||||
}
|
||||
|
||||
// OkCountGTE applies the GTE predicate on the "ok_count" field.
|
||||
func OkCountGTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldOkCount, v))
|
||||
}
|
||||
|
||||
// OkCountLT applies the LT predicate on the "ok_count" field.
|
||||
func OkCountLT(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldOkCount, v))
|
||||
}
|
||||
|
||||
// OkCountLTE applies the LTE predicate on the "ok_count" field.
|
||||
func OkCountLTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldOkCount, v))
|
||||
}
|
||||
|
||||
// OperationalCountEQ applies the EQ predicate on the "operational_count" field.
|
||||
func OperationalCountEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldOperationalCount, v))
|
||||
}
|
||||
|
||||
// OperationalCountNEQ applies the NEQ predicate on the "operational_count" field.
|
||||
func OperationalCountNEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldOperationalCount, v))
|
||||
}
|
||||
|
||||
// OperationalCountIn applies the In predicate on the "operational_count" field.
|
||||
func OperationalCountIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldOperationalCount, vs...))
|
||||
}
|
||||
|
||||
// OperationalCountNotIn applies the NotIn predicate on the "operational_count" field.
|
||||
func OperationalCountNotIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldOperationalCount, vs...))
|
||||
}
|
||||
|
||||
// OperationalCountGT applies the GT predicate on the "operational_count" field.
|
||||
func OperationalCountGT(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldOperationalCount, v))
|
||||
}
|
||||
|
||||
// OperationalCountGTE applies the GTE predicate on the "operational_count" field.
|
||||
func OperationalCountGTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldOperationalCount, v))
|
||||
}
|
||||
|
||||
// OperationalCountLT applies the LT predicate on the "operational_count" field.
|
||||
func OperationalCountLT(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldOperationalCount, v))
|
||||
}
|
||||
|
||||
// OperationalCountLTE applies the LTE predicate on the "operational_count" field.
|
||||
func OperationalCountLTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldOperationalCount, v))
|
||||
}
|
||||
|
||||
// DegradedCountEQ applies the EQ predicate on the "degraded_count" field.
|
||||
func DegradedCountEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldDegradedCount, v))
|
||||
}
|
||||
|
||||
// DegradedCountNEQ applies the NEQ predicate on the "degraded_count" field.
|
||||
func DegradedCountNEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldDegradedCount, v))
|
||||
}
|
||||
|
||||
// DegradedCountIn applies the In predicate on the "degraded_count" field.
|
||||
func DegradedCountIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldDegradedCount, vs...))
|
||||
}
|
||||
|
||||
// DegradedCountNotIn applies the NotIn predicate on the "degraded_count" field.
|
||||
func DegradedCountNotIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldDegradedCount, vs...))
|
||||
}
|
||||
|
||||
// DegradedCountGT applies the GT predicate on the "degraded_count" field.
|
||||
func DegradedCountGT(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldDegradedCount, v))
|
||||
}
|
||||
|
||||
// DegradedCountGTE applies the GTE predicate on the "degraded_count" field.
|
||||
func DegradedCountGTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldDegradedCount, v))
|
||||
}
|
||||
|
||||
// DegradedCountLT applies the LT predicate on the "degraded_count" field.
|
||||
func DegradedCountLT(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldDegradedCount, v))
|
||||
}
|
||||
|
||||
// DegradedCountLTE applies the LTE predicate on the "degraded_count" field.
|
||||
func DegradedCountLTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldDegradedCount, v))
|
||||
}
|
||||
|
||||
// FailedCountEQ applies the EQ predicate on the "failed_count" field.
|
||||
func FailedCountEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldFailedCount, v))
|
||||
}
|
||||
|
||||
// FailedCountNEQ applies the NEQ predicate on the "failed_count" field.
|
||||
func FailedCountNEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldFailedCount, v))
|
||||
}
|
||||
|
||||
// FailedCountIn applies the In predicate on the "failed_count" field.
|
||||
func FailedCountIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldFailedCount, vs...))
|
||||
}
|
||||
|
||||
// FailedCountNotIn applies the NotIn predicate on the "failed_count" field.
|
||||
func FailedCountNotIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldFailedCount, vs...))
|
||||
}
|
||||
|
||||
// FailedCountGT applies the GT predicate on the "failed_count" field.
|
||||
func FailedCountGT(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldFailedCount, v))
|
||||
}
|
||||
|
||||
// FailedCountGTE applies the GTE predicate on the "failed_count" field.
|
||||
func FailedCountGTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldFailedCount, v))
|
||||
}
|
||||
|
||||
// FailedCountLT applies the LT predicate on the "failed_count" field.
|
||||
func FailedCountLT(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldFailedCount, v))
|
||||
}
|
||||
|
||||
// FailedCountLTE applies the LTE predicate on the "failed_count" field.
|
||||
func FailedCountLTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldFailedCount, v))
|
||||
}
|
||||
|
||||
// ErrorCountEQ applies the EQ predicate on the "error_count" field.
|
||||
func ErrorCountEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldErrorCount, v))
|
||||
}
|
||||
|
||||
// ErrorCountNEQ applies the NEQ predicate on the "error_count" field.
|
||||
func ErrorCountNEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldErrorCount, v))
|
||||
}
|
||||
|
||||
// ErrorCountIn applies the In predicate on the "error_count" field.
|
||||
func ErrorCountIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldErrorCount, vs...))
|
||||
}
|
||||
|
||||
// ErrorCountNotIn applies the NotIn predicate on the "error_count" field.
|
||||
func ErrorCountNotIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldErrorCount, vs...))
|
||||
}
|
||||
|
||||
// ErrorCountGT applies the GT predicate on the "error_count" field.
|
||||
func ErrorCountGT(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldErrorCount, v))
|
||||
}
|
||||
|
||||
// ErrorCountGTE applies the GTE predicate on the "error_count" field.
|
||||
func ErrorCountGTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldErrorCount, v))
|
||||
}
|
||||
|
||||
// ErrorCountLT applies the LT predicate on the "error_count" field.
|
||||
func ErrorCountLT(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldErrorCount, v))
|
||||
}
|
||||
|
||||
// ErrorCountLTE applies the LTE predicate on the "error_count" field.
|
||||
func ErrorCountLTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldErrorCount, v))
|
||||
}
|
||||
|
||||
// SumLatencyMsEQ applies the EQ predicate on the "sum_latency_ms" field.
|
||||
func SumLatencyMsEQ(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldSumLatencyMs, v))
|
||||
}
|
||||
|
||||
// SumLatencyMsNEQ applies the NEQ predicate on the "sum_latency_ms" field.
|
||||
func SumLatencyMsNEQ(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldSumLatencyMs, v))
|
||||
}
|
||||
|
||||
// SumLatencyMsIn applies the In predicate on the "sum_latency_ms" field.
|
||||
func SumLatencyMsIn(vs ...int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldSumLatencyMs, vs...))
|
||||
}
|
||||
|
||||
// SumLatencyMsNotIn applies the NotIn predicate on the "sum_latency_ms" field.
|
||||
func SumLatencyMsNotIn(vs ...int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldSumLatencyMs, vs...))
|
||||
}
|
||||
|
||||
// SumLatencyMsGT applies the GT predicate on the "sum_latency_ms" field.
|
||||
func SumLatencyMsGT(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldSumLatencyMs, v))
|
||||
}
|
||||
|
||||
// SumLatencyMsGTE applies the GTE predicate on the "sum_latency_ms" field.
|
||||
func SumLatencyMsGTE(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldSumLatencyMs, v))
|
||||
}
|
||||
|
||||
// SumLatencyMsLT applies the LT predicate on the "sum_latency_ms" field.
|
||||
func SumLatencyMsLT(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldSumLatencyMs, v))
|
||||
}
|
||||
|
||||
// SumLatencyMsLTE applies the LTE predicate on the "sum_latency_ms" field.
|
||||
func SumLatencyMsLTE(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldSumLatencyMs, v))
|
||||
}
|
||||
|
||||
// CountLatencyEQ applies the EQ predicate on the "count_latency" field.
|
||||
func CountLatencyEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldCountLatency, v))
|
||||
}
|
||||
|
||||
// CountLatencyNEQ applies the NEQ predicate on the "count_latency" field.
|
||||
func CountLatencyNEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldCountLatency, v))
|
||||
}
|
||||
|
||||
// CountLatencyIn applies the In predicate on the "count_latency" field.
|
||||
func CountLatencyIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldCountLatency, vs...))
|
||||
}
|
||||
|
||||
// CountLatencyNotIn applies the NotIn predicate on the "count_latency" field.
|
||||
func CountLatencyNotIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldCountLatency, vs...))
|
||||
}
|
||||
|
||||
// CountLatencyGT applies the GT predicate on the "count_latency" field.
|
||||
func CountLatencyGT(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldCountLatency, v))
|
||||
}
|
||||
|
||||
// CountLatencyGTE applies the GTE predicate on the "count_latency" field.
|
||||
func CountLatencyGTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldCountLatency, v))
|
||||
}
|
||||
|
||||
// CountLatencyLT applies the LT predicate on the "count_latency" field.
|
||||
func CountLatencyLT(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldCountLatency, v))
|
||||
}
|
||||
|
||||
// CountLatencyLTE applies the LTE predicate on the "count_latency" field.
|
||||
func CountLatencyLTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldCountLatency, v))
|
||||
}
|
||||
|
||||
// SumPingLatencyMsEQ applies the EQ predicate on the "sum_ping_latency_ms" field.
|
||||
func SumPingLatencyMsEQ(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldSumPingLatencyMs, v))
|
||||
}
|
||||
|
||||
// SumPingLatencyMsNEQ applies the NEQ predicate on the "sum_ping_latency_ms" field.
|
||||
func SumPingLatencyMsNEQ(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldSumPingLatencyMs, v))
|
||||
}
|
||||
|
||||
// SumPingLatencyMsIn applies the In predicate on the "sum_ping_latency_ms" field.
|
||||
func SumPingLatencyMsIn(vs ...int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldSumPingLatencyMs, vs...))
|
||||
}
|
||||
|
||||
// SumPingLatencyMsNotIn applies the NotIn predicate on the "sum_ping_latency_ms" field.
|
||||
func SumPingLatencyMsNotIn(vs ...int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldSumPingLatencyMs, vs...))
|
||||
}
|
||||
|
||||
// SumPingLatencyMsGT applies the GT predicate on the "sum_ping_latency_ms" field.
|
||||
func SumPingLatencyMsGT(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldSumPingLatencyMs, v))
|
||||
}
|
||||
|
||||
// SumPingLatencyMsGTE applies the GTE predicate on the "sum_ping_latency_ms" field.
|
||||
func SumPingLatencyMsGTE(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldSumPingLatencyMs, v))
|
||||
}
|
||||
|
||||
// SumPingLatencyMsLT applies the LT predicate on the "sum_ping_latency_ms" field.
|
||||
func SumPingLatencyMsLT(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldSumPingLatencyMs, v))
|
||||
}
|
||||
|
||||
// SumPingLatencyMsLTE applies the LTE predicate on the "sum_ping_latency_ms" field.
|
||||
func SumPingLatencyMsLTE(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldSumPingLatencyMs, v))
|
||||
}
|
||||
|
||||
// CountPingLatencyEQ applies the EQ predicate on the "count_ping_latency" field.
|
||||
func CountPingLatencyEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldCountPingLatency, v))
|
||||
}
|
||||
|
||||
// CountPingLatencyNEQ applies the NEQ predicate on the "count_ping_latency" field.
|
||||
func CountPingLatencyNEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldCountPingLatency, v))
|
||||
}
|
||||
|
||||
// CountPingLatencyIn applies the In predicate on the "count_ping_latency" field.
|
||||
func CountPingLatencyIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldCountPingLatency, vs...))
|
||||
}
|
||||
|
||||
// CountPingLatencyNotIn applies the NotIn predicate on the "count_ping_latency" field.
|
||||
func CountPingLatencyNotIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldCountPingLatency, vs...))
|
||||
}
|
||||
|
||||
// CountPingLatencyGT applies the GT predicate on the "count_ping_latency" field.
|
||||
func CountPingLatencyGT(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldCountPingLatency, v))
|
||||
}
|
||||
|
||||
// CountPingLatencyGTE applies the GTE predicate on the "count_ping_latency" field.
|
||||
func CountPingLatencyGTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldCountPingLatency, v))
|
||||
}
|
||||
|
||||
// CountPingLatencyLT applies the LT predicate on the "count_ping_latency" field.
|
||||
func CountPingLatencyLT(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldCountPingLatency, v))
|
||||
}
|
||||
|
||||
// CountPingLatencyLTE applies the LTE predicate on the "count_ping_latency" field.
|
||||
func CountPingLatencyLTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldCountPingLatency, v))
|
||||
}
|
||||
|
||||
// ComputedAtEQ applies the EQ predicate on the "computed_at" field.
|
||||
func ComputedAtEQ(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldComputedAt, v))
|
||||
}
|
||||
|
||||
// ComputedAtNEQ applies the NEQ predicate on the "computed_at" field.
|
||||
func ComputedAtNEQ(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldComputedAt, v))
|
||||
}
|
||||
|
||||
// ComputedAtIn applies the In predicate on the "computed_at" field.
|
||||
func ComputedAtIn(vs ...time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldComputedAt, vs...))
|
||||
}
|
||||
|
||||
// ComputedAtNotIn applies the NotIn predicate on the "computed_at" field.
|
||||
func ComputedAtNotIn(vs ...time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldComputedAt, vs...))
|
||||
}
|
||||
|
||||
// ComputedAtGT applies the GT predicate on the "computed_at" field.
|
||||
func ComputedAtGT(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldComputedAt, v))
|
||||
}
|
||||
|
||||
// ComputedAtGTE applies the GTE predicate on the "computed_at" field.
|
||||
func ComputedAtGTE(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldComputedAt, v))
|
||||
}
|
||||
|
||||
// ComputedAtLT applies the LT predicate on the "computed_at" field.
|
||||
func ComputedAtLT(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldComputedAt, v))
|
||||
}
|
||||
|
||||
// ComputedAtLTE applies the LTE predicate on the "computed_at" field.
|
||||
func ComputedAtLTE(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldComputedAt, v))
|
||||
}
|
||||
|
||||
// HasMonitor applies the HasEdge predicate on the "monitor" edge.
|
||||
func HasMonitor() predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, MonitorTable, MonitorColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasMonitorWith applies the HasEdge predicate on the "monitor" edge with a given conditions (other predicates).
|
||||
func HasMonitorWith(preds ...predicate.ChannelMonitor) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(func(s *sql.Selector) {
|
||||
step := newMonitorStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// And groups predicates with the AND operator between them.
|
||||
func And(predicates ...predicate.ChannelMonitorDailyRollup) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.AndPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Or groups predicates with the OR operator between them.
|
||||
func Or(predicates ...predicate.ChannelMonitorDailyRollup) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.OrPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Not applies the not operator on the given predicate.
|
||||
func Not(p predicate.ChannelMonitorDailyRollup) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.NotPredicates(p))
|
||||
}
|
||||
1509
backend/ent/channelmonitordailyrollup_create.go
Normal file
1509
backend/ent/channelmonitordailyrollup_create.go
Normal file
File diff suppressed because it is too large
Load Diff
88
backend/ent/channelmonitordailyrollup_delete.go
Normal file
88
backend/ent/channelmonitordailyrollup_delete.go
Normal file
@@ -0,0 +1,88 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ChannelMonitorDailyRollupDelete is the builder for deleting a ChannelMonitorDailyRollup entity.
|
||||
type ChannelMonitorDailyRollupDelete struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *ChannelMonitorDailyRollupMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the ChannelMonitorDailyRollupDelete builder.
|
||||
func (_d *ChannelMonitorDailyRollupDelete) Where(ps ...predicate.ChannelMonitorDailyRollup) *ChannelMonitorDailyRollupDelete {
|
||||
_d.mutation.Where(ps...)
|
||||
return _d
|
||||
}
|
||||
|
||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||
func (_d *ChannelMonitorDailyRollupDelete) Exec(ctx context.Context) (int, error) {
|
||||
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_d *ChannelMonitorDailyRollupDelete) ExecX(ctx context.Context) int {
|
||||
n, err := _d.Exec(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (_d *ChannelMonitorDailyRollupDelete) sqlExec(ctx context.Context) (int, error) {
|
||||
_spec := sqlgraph.NewDeleteSpec(channelmonitordailyrollup.Table, sqlgraph.NewFieldSpec(channelmonitordailyrollup.FieldID, field.TypeInt64))
|
||||
if ps := _d.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
||||
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
_d.mutation.done = true
|
||||
return affected, err
|
||||
}
|
||||
|
||||
// ChannelMonitorDailyRollupDeleteOne is the builder for deleting a single ChannelMonitorDailyRollup entity.
|
||||
type ChannelMonitorDailyRollupDeleteOne struct {
|
||||
_d *ChannelMonitorDailyRollupDelete
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the ChannelMonitorDailyRollupDelete builder.
|
||||
func (_d *ChannelMonitorDailyRollupDeleteOne) Where(ps ...predicate.ChannelMonitorDailyRollup) *ChannelMonitorDailyRollupDeleteOne {
|
||||
_d._d.mutation.Where(ps...)
|
||||
return _d
|
||||
}
|
||||
|
||||
// Exec executes the deletion query.
|
||||
func (_d *ChannelMonitorDailyRollupDeleteOne) Exec(ctx context.Context) error {
|
||||
n, err := _d._d.Exec(ctx)
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case n == 0:
|
||||
return &NotFoundError{channelmonitordailyrollup.Label}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_d *ChannelMonitorDailyRollupDeleteOne) ExecX(ctx context.Context) {
|
||||
if err := _d.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
643
backend/ent/channelmonitordailyrollup_query.go
Normal file
643
backend/ent/channelmonitordailyrollup_query.go
Normal file
@@ -0,0 +1,643 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ChannelMonitorDailyRollupQuery is the builder for querying ChannelMonitorDailyRollup entities.
|
||||
type ChannelMonitorDailyRollupQuery struct {
|
||||
config
|
||||
ctx *QueryContext
|
||||
order []channelmonitordailyrollup.OrderOption
|
||||
inters []Interceptor
|
||||
predicates []predicate.ChannelMonitorDailyRollup
|
||||
withMonitor *ChannelMonitorQuery
|
||||
modifiers []func(*sql.Selector)
|
||||
// intermediate query (i.e. traversal path).
|
||||
sql *sql.Selector
|
||||
path func(context.Context) (*sql.Selector, error)
|
||||
}
|
||||
|
||||
// Where adds a new predicate for the ChannelMonitorDailyRollupQuery builder.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) Where(ps ...predicate.ChannelMonitorDailyRollup) *ChannelMonitorDailyRollupQuery {
|
||||
_q.predicates = append(_q.predicates, ps...)
|
||||
return _q
|
||||
}
|
||||
|
||||
// Limit the number of records to be returned by this query.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) Limit(limit int) *ChannelMonitorDailyRollupQuery {
|
||||
_q.ctx.Limit = &limit
|
||||
return _q
|
||||
}
|
||||
|
||||
// Offset to start from.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) Offset(offset int) *ChannelMonitorDailyRollupQuery {
|
||||
_q.ctx.Offset = &offset
|
||||
return _q
|
||||
}
|
||||
|
||||
// Unique configures the query builder to filter duplicate records on query.
|
||||
// By default, unique is set to true, and can be disabled using this method.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) Unique(unique bool) *ChannelMonitorDailyRollupQuery {
|
||||
_q.ctx.Unique = &unique
|
||||
return _q
|
||||
}
|
||||
|
||||
// Order specifies how the records should be ordered.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) Order(o ...channelmonitordailyrollup.OrderOption) *ChannelMonitorDailyRollupQuery {
|
||||
_q.order = append(_q.order, o...)
|
||||
return _q
|
||||
}
|
||||
|
||||
// QueryMonitor chains the current query on the "monitor" edge.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) QueryMonitor() *ChannelMonitorQuery {
|
||||
query := (&ChannelMonitorClient{config: _q.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := _q.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(channelmonitordailyrollup.Table, channelmonitordailyrollup.FieldID, selector),
|
||||
sqlgraph.To(channelmonitor.Table, channelmonitor.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, channelmonitordailyrollup.MonitorTable, channelmonitordailyrollup.MonitorColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// First returns the first ChannelMonitorDailyRollup entity from the query.
|
||||
// Returns a *NotFoundError when no ChannelMonitorDailyRollup was found.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) First(ctx context.Context) (*ChannelMonitorDailyRollup, error) {
|
||||
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nil, &NotFoundError{channelmonitordailyrollup.Label}
|
||||
}
|
||||
return nodes[0], nil
|
||||
}
|
||||
|
||||
// FirstX is like First, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) FirstX(ctx context.Context) *ChannelMonitorDailyRollup {
|
||||
node, err := _q.First(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// FirstID returns the first ChannelMonitorDailyRollup ID from the query.
|
||||
// Returns a *NotFoundError when no ChannelMonitorDailyRollup ID was found.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) FirstID(ctx context.Context) (id int64, err error) {
|
||||
var ids []int64
|
||||
if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
|
||||
return
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
err = &NotFoundError{channelmonitordailyrollup.Label}
|
||||
return
|
||||
}
|
||||
return ids[0], nil
|
||||
}
|
||||
|
||||
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) FirstIDX(ctx context.Context) int64 {
|
||||
id, err := _q.FirstID(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// Only returns a single ChannelMonitorDailyRollup entity found by the query, ensuring it only returns one.
|
||||
// Returns a *NotSingularError when more than one ChannelMonitorDailyRollup entity is found.
|
||||
// Returns a *NotFoundError when no ChannelMonitorDailyRollup entities are found.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) Only(ctx context.Context) (*ChannelMonitorDailyRollup, error) {
|
||||
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch len(nodes) {
|
||||
case 1:
|
||||
return nodes[0], nil
|
||||
case 0:
|
||||
return nil, &NotFoundError{channelmonitordailyrollup.Label}
|
||||
default:
|
||||
return nil, &NotSingularError{channelmonitordailyrollup.Label}
|
||||
}
|
||||
}
|
||||
|
||||
// OnlyX is like Only, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) OnlyX(ctx context.Context) *ChannelMonitorDailyRollup {
|
||||
node, err := _q.Only(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// OnlyID is like Only, but returns the only ChannelMonitorDailyRollup ID in the query.
|
||||
// Returns a *NotSingularError when more than one ChannelMonitorDailyRollup ID is found.
|
||||
// Returns a *NotFoundError when no entities are found.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) OnlyID(ctx context.Context) (id int64, err error) {
|
||||
var ids []int64
|
||||
if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(ids) {
|
||||
case 1:
|
||||
id = ids[0]
|
||||
case 0:
|
||||
err = &NotFoundError{channelmonitordailyrollup.Label}
|
||||
default:
|
||||
err = &NotSingularError{channelmonitordailyrollup.Label}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) OnlyIDX(ctx context.Context) int64 {
|
||||
id, err := _q.OnlyID(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// All executes the query and returns a list of ChannelMonitorDailyRollups.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) All(ctx context.Context) ([]*ChannelMonitorDailyRollup, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qr := querierAll[[]*ChannelMonitorDailyRollup, *ChannelMonitorDailyRollupQuery]()
|
||||
return withInterceptors[[]*ChannelMonitorDailyRollup](ctx, _q, qr, _q.inters)
|
||||
}
|
||||
|
||||
// AllX is like All, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) AllX(ctx context.Context) []*ChannelMonitorDailyRollup {
|
||||
nodes, err := _q.All(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
// IDs executes the query and returns a list of ChannelMonitorDailyRollup IDs.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) IDs(ctx context.Context) (ids []int64, err error) {
|
||||
if _q.ctx.Unique == nil && _q.path != nil {
|
||||
_q.Unique(true)
|
||||
}
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
|
||||
if err = _q.Select(channelmonitordailyrollup.FieldID).Scan(ctx, &ids); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// IDsX is like IDs, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) IDsX(ctx context.Context) []int64 {
|
||||
ids, err := _q.IDs(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// Count returns the count of the given query.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) Count(ctx context.Context) (int, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return withInterceptors[int](ctx, _q, querierCount[*ChannelMonitorDailyRollupQuery](), _q.inters)
|
||||
}
|
||||
|
||||
// CountX is like Count, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) CountX(ctx context.Context) int {
|
||||
count, err := _q.Count(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// Exist returns true if the query has elements in the graph.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) Exist(ctx context.Context) (bool, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
||||
switch _, err := _q.FirstID(ctx); {
|
||||
case IsNotFound(err):
|
||||
return false, nil
|
||||
case err != nil:
|
||||
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExistX is like Exist, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) ExistX(ctx context.Context) bool {
|
||||
exist, err := _q.Exist(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return exist
|
||||
}
|
||||
|
||||
// Clone returns a duplicate of the ChannelMonitorDailyRollupQuery builder, including all associated steps. It can be
|
||||
// used to prepare common query builders and use them differently after the clone is made.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) Clone() *ChannelMonitorDailyRollupQuery {
|
||||
if _q == nil {
|
||||
return nil
|
||||
}
|
||||
return &ChannelMonitorDailyRollupQuery{
|
||||
config: _q.config,
|
||||
ctx: _q.ctx.Clone(),
|
||||
order: append([]channelmonitordailyrollup.OrderOption{}, _q.order...),
|
||||
inters: append([]Interceptor{}, _q.inters...),
|
||||
predicates: append([]predicate.ChannelMonitorDailyRollup{}, _q.predicates...),
|
||||
withMonitor: _q.withMonitor.Clone(),
|
||||
// clone intermediate query.
|
||||
sql: _q.sql.Clone(),
|
||||
path: _q.path,
|
||||
}
|
||||
}
|
||||
|
||||
// WithMonitor tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "monitor" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) WithMonitor(opts ...func(*ChannelMonitorQuery)) *ChannelMonitorDailyRollupQuery {
|
||||
query := (&ChannelMonitorClient{config: _q.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
_q.withMonitor = query
|
||||
return _q
|
||||
}
|
||||
|
||||
// GroupBy is used to group vertices by one or more fields/columns.
|
||||
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// MonitorID int64 `json:"monitor_id,omitempty"`
|
||||
// Count int `json:"count,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.ChannelMonitorDailyRollup.Query().
|
||||
// GroupBy(channelmonitordailyrollup.FieldMonitorID).
|
||||
// Aggregate(ent.Count()).
|
||||
// Scan(ctx, &v)
|
||||
func (_q *ChannelMonitorDailyRollupQuery) GroupBy(field string, fields ...string) *ChannelMonitorDailyRollupGroupBy {
|
||||
_q.ctx.Fields = append([]string{field}, fields...)
|
||||
grbuild := &ChannelMonitorDailyRollupGroupBy{build: _q}
|
||||
grbuild.flds = &_q.ctx.Fields
|
||||
grbuild.label = channelmonitordailyrollup.Label
|
||||
grbuild.scan = grbuild.Scan
|
||||
return grbuild
|
||||
}
|
||||
|
||||
// Select allows the selection one or more fields/columns for the given query,
|
||||
// instead of selecting all fields in the entity.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// MonitorID int64 `json:"monitor_id,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.ChannelMonitorDailyRollup.Query().
|
||||
// Select(channelmonitordailyrollup.FieldMonitorID).
|
||||
// Scan(ctx, &v)
|
||||
func (_q *ChannelMonitorDailyRollupQuery) Select(fields ...string) *ChannelMonitorDailyRollupSelect {
|
||||
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
||||
sbuild := &ChannelMonitorDailyRollupSelect{ChannelMonitorDailyRollupQuery: _q}
|
||||
sbuild.label = channelmonitordailyrollup.Label
|
||||
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
||||
return sbuild
|
||||
}
|
||||
|
||||
// Aggregate returns a ChannelMonitorDailyRollupSelect configured with the given aggregations.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) Aggregate(fns ...AggregateFunc) *ChannelMonitorDailyRollupSelect {
|
||||
return _q.Select().Aggregate(fns...)
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorDailyRollupQuery) prepareQuery(ctx context.Context) error {
|
||||
for _, inter := range _q.inters {
|
||||
if inter == nil {
|
||||
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||
}
|
||||
if trv, ok := inter.(Traverser); ok {
|
||||
if err := trv.Traverse(ctx, _q); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, f := range _q.ctx.Fields {
|
||||
if !channelmonitordailyrollup.ValidColumn(f) {
|
||||
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
}
|
||||
if _q.path != nil {
|
||||
prev, err := _q.path(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_q.sql = prev
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorDailyRollupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*ChannelMonitorDailyRollup, error) {
|
||||
var (
|
||||
nodes = []*ChannelMonitorDailyRollup{}
|
||||
_spec = _q.querySpec()
|
||||
loadedTypes = [1]bool{
|
||||
_q.withMonitor != nil,
|
||||
}
|
||||
)
|
||||
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||
return (*ChannelMonitorDailyRollup).scanValues(nil, columns)
|
||||
}
|
||||
_spec.Assign = func(columns []string, values []any) error {
|
||||
node := &ChannelMonitorDailyRollup{config: _q.config}
|
||||
nodes = append(nodes, node)
|
||||
node.Edges.loadedTypes = loadedTypes
|
||||
return node.assignValues(columns, values)
|
||||
}
|
||||
if len(_q.modifiers) > 0 {
|
||||
_spec.Modifiers = _q.modifiers
|
||||
}
|
||||
for i := range hooks {
|
||||
hooks[i](ctx, _spec)
|
||||
}
|
||||
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nodes, nil
|
||||
}
|
||||
if query := _q.withMonitor; query != nil {
|
||||
if err := _q.loadMonitor(ctx, query, nodes, nil,
|
||||
func(n *ChannelMonitorDailyRollup, e *ChannelMonitor) { n.Edges.Monitor = e }); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorDailyRollupQuery) loadMonitor(ctx context.Context, query *ChannelMonitorQuery, nodes []*ChannelMonitorDailyRollup, init func(*ChannelMonitorDailyRollup), assign func(*ChannelMonitorDailyRollup, *ChannelMonitor)) error {
|
||||
ids := make([]int64, 0, len(nodes))
|
||||
nodeids := make(map[int64][]*ChannelMonitorDailyRollup)
|
||||
for i := range nodes {
|
||||
fk := nodes[i].MonitorID
|
||||
if _, ok := nodeids[fk]; !ok {
|
||||
ids = append(ids, fk)
|
||||
}
|
||||
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
query.Where(channelmonitor.IDIn(ids...))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
nodes, ok := nodeids[n.ID]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "monitor_id" returned %v`, n.ID)
|
||||
}
|
||||
for i := range nodes {
|
||||
assign(nodes[i], n)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorDailyRollupQuery) sqlCount(ctx context.Context) (int, error) {
|
||||
_spec := _q.querySpec()
|
||||
if len(_q.modifiers) > 0 {
|
||||
_spec.Modifiers = _q.modifiers
|
||||
}
|
||||
_spec.Node.Columns = _q.ctx.Fields
|
||||
if len(_q.ctx.Fields) > 0 {
|
||||
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||
}
|
||||
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorDailyRollupQuery) querySpec() *sqlgraph.QuerySpec {
|
||||
_spec := sqlgraph.NewQuerySpec(channelmonitordailyrollup.Table, channelmonitordailyrollup.Columns, sqlgraph.NewFieldSpec(channelmonitordailyrollup.FieldID, field.TypeInt64))
|
||||
_spec.From = _q.sql
|
||||
if unique := _q.ctx.Unique; unique != nil {
|
||||
_spec.Unique = *unique
|
||||
} else if _q.path != nil {
|
||||
_spec.Unique = true
|
||||
}
|
||||
if fields := _q.ctx.Fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, channelmonitordailyrollup.FieldID)
|
||||
for i := range fields {
|
||||
if fields[i] != channelmonitordailyrollup.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||
}
|
||||
}
|
||||
if _q.withMonitor != nil {
|
||||
_spec.Node.AddColumnOnce(channelmonitordailyrollup.FieldMonitorID)
|
||||
}
|
||||
}
|
||||
if ps := _q.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if limit := _q.ctx.Limit; limit != nil {
|
||||
_spec.Limit = *limit
|
||||
}
|
||||
if offset := _q.ctx.Offset; offset != nil {
|
||||
_spec.Offset = *offset
|
||||
}
|
||||
if ps := _q.order; len(ps) > 0 {
|
||||
_spec.Order = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
return _spec
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorDailyRollupQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||
builder := sql.Dialect(_q.driver.Dialect())
|
||||
t1 := builder.Table(channelmonitordailyrollup.Table)
|
||||
columns := _q.ctx.Fields
|
||||
if len(columns) == 0 {
|
||||
columns = channelmonitordailyrollup.Columns
|
||||
}
|
||||
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||
if _q.sql != nil {
|
||||
selector = _q.sql
|
||||
selector.Select(selector.Columns(columns...)...)
|
||||
}
|
||||
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||
selector.Distinct()
|
||||
}
|
||||
for _, m := range _q.modifiers {
|
||||
m(selector)
|
||||
}
|
||||
for _, p := range _q.predicates {
|
||||
p(selector)
|
||||
}
|
||||
for _, p := range _q.order {
|
||||
p(selector)
|
||||
}
|
||||
if offset := _q.ctx.Offset; offset != nil {
|
||||
// limit is mandatory for offset clause. We start
|
||||
// with default value, and override it below if needed.
|
||||
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||
}
|
||||
if limit := _q.ctx.Limit; limit != nil {
|
||||
selector.Limit(*limit)
|
||||
}
|
||||
return selector
|
||||
}
|
||||
|
||||
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||
// either committed or rolled-back.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) ForUpdate(opts ...sql.LockOption) *ChannelMonitorDailyRollupQuery {
|
||||
if _q.driver.Dialect() == dialect.Postgres {
|
||||
_q.Unique(false)
|
||||
}
|
||||
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||
s.ForUpdate(opts...)
|
||||
})
|
||||
return _q
|
||||
}
|
||||
|
||||
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||
// until your transaction commits.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) ForShare(opts ...sql.LockOption) *ChannelMonitorDailyRollupQuery {
|
||||
if _q.driver.Dialect() == dialect.Postgres {
|
||||
_q.Unique(false)
|
||||
}
|
||||
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||
s.ForShare(opts...)
|
||||
})
|
||||
return _q
|
||||
}
|
||||
|
||||
// ChannelMonitorDailyRollupGroupBy is the group-by builder for ChannelMonitorDailyRollup entities.
|
||||
type ChannelMonitorDailyRollupGroupBy struct {
|
||||
selector
|
||||
build *ChannelMonitorDailyRollupQuery
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the group-by query.
|
||||
func (_g *ChannelMonitorDailyRollupGroupBy) Aggregate(fns ...AggregateFunc) *ChannelMonitorDailyRollupGroupBy {
|
||||
_g.fns = append(_g.fns, fns...)
|
||||
return _g
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (_g *ChannelMonitorDailyRollupGroupBy) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
||||
if err := _g.build.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*ChannelMonitorDailyRollupQuery, *ChannelMonitorDailyRollupGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
||||
}
|
||||
|
||||
func (_g *ChannelMonitorDailyRollupGroupBy) sqlScan(ctx context.Context, root *ChannelMonitorDailyRollupQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx).Select()
|
||||
aggregation := make([]string, 0, len(_g.fns))
|
||||
for _, fn := range _g.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
if len(selector.SelectedColumns()) == 0 {
|
||||
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
||||
for _, f := range *_g.flds {
|
||||
columns = append(columns, selector.C(f))
|
||||
}
|
||||
columns = append(columns, aggregation...)
|
||||
selector.Select(columns...)
|
||||
}
|
||||
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
||||
if err := selector.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
|
||||
// ChannelMonitorDailyRollupSelect is the builder for selecting fields of ChannelMonitorDailyRollup entities.
|
||||
type ChannelMonitorDailyRollupSelect struct {
|
||||
*ChannelMonitorDailyRollupQuery
|
||||
selector
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the selector query.
|
||||
func (_s *ChannelMonitorDailyRollupSelect) Aggregate(fns ...AggregateFunc) *ChannelMonitorDailyRollupSelect {
|
||||
_s.fns = append(_s.fns, fns...)
|
||||
return _s
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (_s *ChannelMonitorDailyRollupSelect) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
||||
if err := _s.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*ChannelMonitorDailyRollupQuery, *ChannelMonitorDailyRollupSelect](ctx, _s.ChannelMonitorDailyRollupQuery, _s, _s.inters, v)
|
||||
}
|
||||
|
||||
func (_s *ChannelMonitorDailyRollupSelect) sqlScan(ctx context.Context, root *ChannelMonitorDailyRollupQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx)
|
||||
aggregation := make([]string, 0, len(_s.fns))
|
||||
for _, fn := range _s.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
switch n := len(*_s.selector.flds); {
|
||||
case n == 0 && len(aggregation) > 0:
|
||||
selector.Select(aggregation...)
|
||||
case n != 0 && len(aggregation) > 0:
|
||||
selector.AppendSelect(aggregation...)
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
961
backend/ent/channelmonitordailyrollup_update.go
Normal file
961
backend/ent/channelmonitordailyrollup_update.go
Normal file
@@ -0,0 +1,961 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ChannelMonitorDailyRollupUpdate is the builder for updating ChannelMonitorDailyRollup entities.
|
||||
type ChannelMonitorDailyRollupUpdate struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *ChannelMonitorDailyRollupMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the ChannelMonitorDailyRollupUpdate builder.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) Where(ps ...predicate.ChannelMonitorDailyRollup) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.Where(ps...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetMonitorID sets the "monitor_id" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetMonitorID(v int64) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.SetMonitorID(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableMonitorID sets the "monitor_id" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetNillableMonitorID(v *int64) *ChannelMonitorDailyRollupUpdate {
|
||||
if v != nil {
|
||||
_u.SetMonitorID(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetModel sets the "model" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetModel(v string) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.SetModel(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableModel sets the "model" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetNillableModel(v *string) *ChannelMonitorDailyRollupUpdate {
|
||||
if v != nil {
|
||||
_u.SetModel(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetBucketDate sets the "bucket_date" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetBucketDate(v time.Time) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.SetBucketDate(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableBucketDate sets the "bucket_date" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetNillableBucketDate(v *time.Time) *ChannelMonitorDailyRollupUpdate {
|
||||
if v != nil {
|
||||
_u.SetBucketDate(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetTotalChecks sets the "total_checks" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetTotalChecks(v int) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.ResetTotalChecks()
|
||||
_u.mutation.SetTotalChecks(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableTotalChecks sets the "total_checks" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetNillableTotalChecks(v *int) *ChannelMonitorDailyRollupUpdate {
|
||||
if v != nil {
|
||||
_u.SetTotalChecks(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddTotalChecks adds value to the "total_checks" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) AddTotalChecks(v int) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.AddTotalChecks(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetOkCount sets the "ok_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetOkCount(v int) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.ResetOkCount()
|
||||
_u.mutation.SetOkCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableOkCount sets the "ok_count" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetNillableOkCount(v *int) *ChannelMonitorDailyRollupUpdate {
|
||||
if v != nil {
|
||||
_u.SetOkCount(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddOkCount adds value to the "ok_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) AddOkCount(v int) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.AddOkCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetOperationalCount sets the "operational_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetOperationalCount(v int) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.ResetOperationalCount()
|
||||
_u.mutation.SetOperationalCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableOperationalCount sets the "operational_count" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetNillableOperationalCount(v *int) *ChannelMonitorDailyRollupUpdate {
|
||||
if v != nil {
|
||||
_u.SetOperationalCount(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddOperationalCount adds value to the "operational_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) AddOperationalCount(v int) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.AddOperationalCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetDegradedCount sets the "degraded_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetDegradedCount(v int) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.ResetDegradedCount()
|
||||
_u.mutation.SetDegradedCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableDegradedCount sets the "degraded_count" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetNillableDegradedCount(v *int) *ChannelMonitorDailyRollupUpdate {
|
||||
if v != nil {
|
||||
_u.SetDegradedCount(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddDegradedCount adds value to the "degraded_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) AddDegradedCount(v int) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.AddDegradedCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetFailedCount sets the "failed_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetFailedCount(v int) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.ResetFailedCount()
|
||||
_u.mutation.SetFailedCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableFailedCount sets the "failed_count" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetNillableFailedCount(v *int) *ChannelMonitorDailyRollupUpdate {
|
||||
if v != nil {
|
||||
_u.SetFailedCount(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddFailedCount adds value to the "failed_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) AddFailedCount(v int) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.AddFailedCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetErrorCount sets the "error_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetErrorCount(v int) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.ResetErrorCount()
|
||||
_u.mutation.SetErrorCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableErrorCount sets the "error_count" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetNillableErrorCount(v *int) *ChannelMonitorDailyRollupUpdate {
|
||||
if v != nil {
|
||||
_u.SetErrorCount(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddErrorCount adds value to the "error_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) AddErrorCount(v int) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.AddErrorCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetSumLatencyMs sets the "sum_latency_ms" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetSumLatencyMs(v int64) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.ResetSumLatencyMs()
|
||||
_u.mutation.SetSumLatencyMs(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableSumLatencyMs sets the "sum_latency_ms" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetNillableSumLatencyMs(v *int64) *ChannelMonitorDailyRollupUpdate {
|
||||
if v != nil {
|
||||
_u.SetSumLatencyMs(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddSumLatencyMs adds value to the "sum_latency_ms" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) AddSumLatencyMs(v int64) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.AddSumLatencyMs(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetCountLatency sets the "count_latency" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetCountLatency(v int) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.ResetCountLatency()
|
||||
_u.mutation.SetCountLatency(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableCountLatency sets the "count_latency" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetNillableCountLatency(v *int) *ChannelMonitorDailyRollupUpdate {
|
||||
if v != nil {
|
||||
_u.SetCountLatency(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddCountLatency adds value to the "count_latency" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) AddCountLatency(v int) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.AddCountLatency(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetSumPingLatencyMs sets the "sum_ping_latency_ms" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetSumPingLatencyMs(v int64) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.ResetSumPingLatencyMs()
|
||||
_u.mutation.SetSumPingLatencyMs(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableSumPingLatencyMs sets the "sum_ping_latency_ms" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetNillableSumPingLatencyMs(v *int64) *ChannelMonitorDailyRollupUpdate {
|
||||
if v != nil {
|
||||
_u.SetSumPingLatencyMs(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddSumPingLatencyMs adds value to the "sum_ping_latency_ms" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) AddSumPingLatencyMs(v int64) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.AddSumPingLatencyMs(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetCountPingLatency sets the "count_ping_latency" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetCountPingLatency(v int) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.ResetCountPingLatency()
|
||||
_u.mutation.SetCountPingLatency(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableCountPingLatency sets the "count_ping_latency" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetNillableCountPingLatency(v *int) *ChannelMonitorDailyRollupUpdate {
|
||||
if v != nil {
|
||||
_u.SetCountPingLatency(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddCountPingLatency adds value to the "count_ping_latency" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) AddCountPingLatency(v int) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.AddCountPingLatency(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetComputedAt sets the "computed_at" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetComputedAt(v time.Time) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.SetComputedAt(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetMonitor sets the "monitor" edge to the ChannelMonitor entity.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetMonitor(v *ChannelMonitor) *ChannelMonitorDailyRollupUpdate {
|
||||
return _u.SetMonitorID(v.ID)
|
||||
}
|
||||
|
||||
// Mutation returns the ChannelMonitorDailyRollupMutation object of the builder.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) Mutation() *ChannelMonitorDailyRollupMutation {
|
||||
return _u.mutation
|
||||
}
|
||||
|
||||
// ClearMonitor clears the "monitor" edge to the ChannelMonitor entity.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) ClearMonitor() *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.ClearMonitor()
|
||||
return _u
|
||||
}
|
||||
|
||||
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) Save(ctx context.Context) (int, error) {
|
||||
_u.defaults()
|
||||
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SaveX(ctx context.Context) int {
|
||||
affected, err := _u.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return affected
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) Exec(ctx context.Context) error {
|
||||
_, err := _u.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) ExecX(ctx context.Context) {
|
||||
if err := _u.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) defaults() {
|
||||
if _, ok := _u.mutation.ComputedAt(); !ok {
|
||||
v := channelmonitordailyrollup.UpdateDefaultComputedAt()
|
||||
_u.mutation.SetComputedAt(v)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) check() error {
|
||||
if v, ok := _u.mutation.Model(); ok {
|
||||
if err := channelmonitordailyrollup.ModelValidator(v); err != nil {
|
||||
return &ValidationError{Name: "model", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorDailyRollup.model": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _u.mutation.MonitorCleared() && len(_u.mutation.MonitorIDs()) > 0 {
|
||||
return errors.New(`ent: clearing a required unique edge "ChannelMonitorDailyRollup.monitor"`)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
||||
if err := _u.check(); err != nil {
|
||||
return _node, err
|
||||
}
|
||||
_spec := sqlgraph.NewUpdateSpec(channelmonitordailyrollup.Table, channelmonitordailyrollup.Columns, sqlgraph.NewFieldSpec(channelmonitordailyrollup.FieldID, field.TypeInt64))
|
||||
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if value, ok := _u.mutation.Model(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldModel, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.BucketDate(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldBucketDate, field.TypeTime, value)
|
||||
}
|
||||
if value, ok := _u.mutation.TotalChecks(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldTotalChecks, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedTotalChecks(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldTotalChecks, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.OkCount(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldOkCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedOkCount(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldOkCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.OperationalCount(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldOperationalCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedOperationalCount(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldOperationalCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.DegradedCount(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldDegradedCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedDegradedCount(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldDegradedCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.FailedCount(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldFailedCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedFailedCount(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldFailedCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.ErrorCount(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldErrorCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedErrorCount(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldErrorCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.SumLatencyMs(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldSumLatencyMs, field.TypeInt64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedSumLatencyMs(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldSumLatencyMs, field.TypeInt64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.CountLatency(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldCountLatency, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedCountLatency(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldCountLatency, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.SumPingLatencyMs(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldSumPingLatencyMs, field.TypeInt64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedSumPingLatencyMs(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldSumPingLatencyMs, field.TypeInt64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.CountPingLatency(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldCountPingLatency, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedCountPingLatency(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldCountPingLatency, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.ComputedAt(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldComputedAt, field.TypeTime, value)
|
||||
}
|
||||
if _u.mutation.MonitorCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: channelmonitordailyrollup.MonitorTable,
|
||||
Columns: []string{channelmonitordailyrollup.MonitorColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := _u.mutation.MonitorIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: channelmonitordailyrollup.MonitorTable,
|
||||
Columns: []string{channelmonitordailyrollup.MonitorColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{channelmonitordailyrollup.Label}
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
_u.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
|
||||
// ChannelMonitorDailyRollupUpdateOne is the builder for updating a single ChannelMonitorDailyRollup entity.
|
||||
type ChannelMonitorDailyRollupUpdateOne struct {
|
||||
config
|
||||
fields []string
|
||||
hooks []Hook
|
||||
mutation *ChannelMonitorDailyRollupMutation
|
||||
}
|
||||
|
||||
// SetMonitorID sets the "monitor_id" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetMonitorID(v int64) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.SetMonitorID(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableMonitorID sets the "monitor_id" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableMonitorID(v *int64) *ChannelMonitorDailyRollupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetMonitorID(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetModel sets the "model" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetModel(v string) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.SetModel(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableModel sets the "model" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableModel(v *string) *ChannelMonitorDailyRollupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetModel(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetBucketDate sets the "bucket_date" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetBucketDate(v time.Time) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.SetBucketDate(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableBucketDate sets the "bucket_date" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableBucketDate(v *time.Time) *ChannelMonitorDailyRollupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetBucketDate(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetTotalChecks sets the "total_checks" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetTotalChecks(v int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.ResetTotalChecks()
|
||||
_u.mutation.SetTotalChecks(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableTotalChecks sets the "total_checks" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableTotalChecks(v *int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetTotalChecks(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddTotalChecks adds value to the "total_checks" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) AddTotalChecks(v int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.AddTotalChecks(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetOkCount sets the "ok_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetOkCount(v int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.ResetOkCount()
|
||||
_u.mutation.SetOkCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableOkCount sets the "ok_count" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableOkCount(v *int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetOkCount(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddOkCount adds value to the "ok_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) AddOkCount(v int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.AddOkCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetOperationalCount sets the "operational_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetOperationalCount(v int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.ResetOperationalCount()
|
||||
_u.mutation.SetOperationalCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableOperationalCount sets the "operational_count" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableOperationalCount(v *int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetOperationalCount(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddOperationalCount adds value to the "operational_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) AddOperationalCount(v int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.AddOperationalCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetDegradedCount sets the "degraded_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetDegradedCount(v int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.ResetDegradedCount()
|
||||
_u.mutation.SetDegradedCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableDegradedCount sets the "degraded_count" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableDegradedCount(v *int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetDegradedCount(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddDegradedCount adds value to the "degraded_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) AddDegradedCount(v int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.AddDegradedCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetFailedCount sets the "failed_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetFailedCount(v int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.ResetFailedCount()
|
||||
_u.mutation.SetFailedCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableFailedCount sets the "failed_count" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableFailedCount(v *int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetFailedCount(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddFailedCount adds value to the "failed_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) AddFailedCount(v int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.AddFailedCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetErrorCount sets the "error_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetErrorCount(v int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.ResetErrorCount()
|
||||
_u.mutation.SetErrorCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableErrorCount sets the "error_count" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableErrorCount(v *int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetErrorCount(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddErrorCount adds value to the "error_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) AddErrorCount(v int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.AddErrorCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetSumLatencyMs sets the "sum_latency_ms" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetSumLatencyMs(v int64) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.ResetSumLatencyMs()
|
||||
_u.mutation.SetSumLatencyMs(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableSumLatencyMs sets the "sum_latency_ms" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableSumLatencyMs(v *int64) *ChannelMonitorDailyRollupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetSumLatencyMs(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddSumLatencyMs adds value to the "sum_latency_ms" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) AddSumLatencyMs(v int64) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.AddSumLatencyMs(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetCountLatency sets the "count_latency" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetCountLatency(v int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.ResetCountLatency()
|
||||
_u.mutation.SetCountLatency(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableCountLatency sets the "count_latency" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableCountLatency(v *int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetCountLatency(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddCountLatency adds value to the "count_latency" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) AddCountLatency(v int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.AddCountLatency(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetSumPingLatencyMs sets the "sum_ping_latency_ms" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetSumPingLatencyMs(v int64) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.ResetSumPingLatencyMs()
|
||||
_u.mutation.SetSumPingLatencyMs(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableSumPingLatencyMs sets the "sum_ping_latency_ms" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableSumPingLatencyMs(v *int64) *ChannelMonitorDailyRollupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetSumPingLatencyMs(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddSumPingLatencyMs adds value to the "sum_ping_latency_ms" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) AddSumPingLatencyMs(v int64) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.AddSumPingLatencyMs(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetCountPingLatency sets the "count_ping_latency" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetCountPingLatency(v int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.ResetCountPingLatency()
|
||||
_u.mutation.SetCountPingLatency(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableCountPingLatency sets the "count_ping_latency" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableCountPingLatency(v *int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetCountPingLatency(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddCountPingLatency adds value to the "count_ping_latency" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) AddCountPingLatency(v int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.AddCountPingLatency(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetComputedAt sets the "computed_at" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetComputedAt(v time.Time) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.SetComputedAt(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetMonitor sets the "monitor" edge to the ChannelMonitor entity.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetMonitor(v *ChannelMonitor) *ChannelMonitorDailyRollupUpdateOne {
|
||||
return _u.SetMonitorID(v.ID)
|
||||
}
|
||||
|
||||
// Mutation returns the ChannelMonitorDailyRollupMutation object of the builder.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) Mutation() *ChannelMonitorDailyRollupMutation {
|
||||
return _u.mutation
|
||||
}
|
||||
|
||||
// ClearMonitor clears the "monitor" edge to the ChannelMonitor entity.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) ClearMonitor() *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.ClearMonitor()
|
||||
return _u
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the ChannelMonitorDailyRollupUpdate builder.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) Where(ps ...predicate.ChannelMonitorDailyRollup) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.Where(ps...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||
// The default is selecting all fields defined in the entity schema.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) Select(field string, fields ...string) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.fields = append([]string{field}, fields...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// Save executes the query and returns the updated ChannelMonitorDailyRollup entity.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) Save(ctx context.Context) (*ChannelMonitorDailyRollup, error) {
|
||||
_u.defaults()
|
||||
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SaveX(ctx context.Context) *ChannelMonitorDailyRollup {
|
||||
node, err := _u.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// Exec executes the query on the entity.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) Exec(ctx context.Context) error {
|
||||
_, err := _u.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) ExecX(ctx context.Context) {
|
||||
if err := _u.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) defaults() {
|
||||
if _, ok := _u.mutation.ComputedAt(); !ok {
|
||||
v := channelmonitordailyrollup.UpdateDefaultComputedAt()
|
||||
_u.mutation.SetComputedAt(v)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) check() error {
|
||||
if v, ok := _u.mutation.Model(); ok {
|
||||
if err := channelmonitordailyrollup.ModelValidator(v); err != nil {
|
||||
return &ValidationError{Name: "model", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorDailyRollup.model": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _u.mutation.MonitorCleared() && len(_u.mutation.MonitorIDs()) > 0 {
|
||||
return errors.New(`ent: clearing a required unique edge "ChannelMonitorDailyRollup.monitor"`)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) sqlSave(ctx context.Context) (_node *ChannelMonitorDailyRollup, err error) {
|
||||
if err := _u.check(); err != nil {
|
||||
return _node, err
|
||||
}
|
||||
_spec := sqlgraph.NewUpdateSpec(channelmonitordailyrollup.Table, channelmonitordailyrollup.Columns, sqlgraph.NewFieldSpec(channelmonitordailyrollup.FieldID, field.TypeInt64))
|
||||
id, ok := _u.mutation.ID()
|
||||
if !ok {
|
||||
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "ChannelMonitorDailyRollup.id" for update`)}
|
||||
}
|
||||
_spec.Node.ID.Value = id
|
||||
if fields := _u.fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, channelmonitordailyrollup.FieldID)
|
||||
for _, f := range fields {
|
||||
if !channelmonitordailyrollup.ValidColumn(f) {
|
||||
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
if f != channelmonitordailyrollup.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if value, ok := _u.mutation.Model(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldModel, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.BucketDate(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldBucketDate, field.TypeTime, value)
|
||||
}
|
||||
if value, ok := _u.mutation.TotalChecks(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldTotalChecks, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedTotalChecks(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldTotalChecks, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.OkCount(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldOkCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedOkCount(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldOkCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.OperationalCount(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldOperationalCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedOperationalCount(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldOperationalCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.DegradedCount(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldDegradedCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedDegradedCount(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldDegradedCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.FailedCount(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldFailedCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedFailedCount(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldFailedCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.ErrorCount(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldErrorCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedErrorCount(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldErrorCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.SumLatencyMs(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldSumLatencyMs, field.TypeInt64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedSumLatencyMs(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldSumLatencyMs, field.TypeInt64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.CountLatency(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldCountLatency, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedCountLatency(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldCountLatency, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.SumPingLatencyMs(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldSumPingLatencyMs, field.TypeInt64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedSumPingLatencyMs(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldSumPingLatencyMs, field.TypeInt64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.CountPingLatency(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldCountPingLatency, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedCountPingLatency(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldCountPingLatency, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.ComputedAt(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldComputedAt, field.TypeTime, value)
|
||||
}
|
||||
if _u.mutation.MonitorCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: channelmonitordailyrollup.MonitorTable,
|
||||
Columns: []string{channelmonitordailyrollup.MonitorColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := _u.mutation.MonitorIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: channelmonitordailyrollup.MonitorTable,
|
||||
Columns: []string{channelmonitordailyrollup.MonitorColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
_node = &ChannelMonitorDailyRollup{config: _u.config}
|
||||
_spec.Assign = _node.assignValues
|
||||
_spec.ScanValues = _node.scanValues
|
||||
if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{channelmonitordailyrollup.Label}
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
_u.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
207
backend/ent/channelmonitorhistory.go
Normal file
207
backend/ent/channelmonitorhistory.go
Normal file
@@ -0,0 +1,207 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory"
|
||||
)
|
||||
|
||||
// ChannelMonitorHistory is the model entity for the ChannelMonitorHistory schema.
|
||||
type ChannelMonitorHistory struct {
|
||||
config `json:"-"`
|
||||
// ID of the ent.
|
||||
ID int64 `json:"id,omitempty"`
|
||||
// MonitorID holds the value of the "monitor_id" field.
|
||||
MonitorID int64 `json:"monitor_id,omitempty"`
|
||||
// Model holds the value of the "model" field.
|
||||
Model string `json:"model,omitempty"`
|
||||
// Status holds the value of the "status" field.
|
||||
Status channelmonitorhistory.Status `json:"status,omitempty"`
|
||||
// LatencyMs holds the value of the "latency_ms" field.
|
||||
LatencyMs *int `json:"latency_ms,omitempty"`
|
||||
// PingLatencyMs holds the value of the "ping_latency_ms" field.
|
||||
PingLatencyMs *int `json:"ping_latency_ms,omitempty"`
|
||||
// Message holds the value of the "message" field.
|
||||
Message string `json:"message,omitempty"`
|
||||
// CheckedAt holds the value of the "checked_at" field.
|
||||
CheckedAt time.Time `json:"checked_at,omitempty"`
|
||||
// Edges holds the relations/edges for other nodes in the graph.
|
||||
// The values are being populated by the ChannelMonitorHistoryQuery when eager-loading is set.
|
||||
Edges ChannelMonitorHistoryEdges `json:"edges"`
|
||||
selectValues sql.SelectValues
|
||||
}
|
||||
|
||||
// ChannelMonitorHistoryEdges holds the relations/edges for other nodes in the graph.
|
||||
type ChannelMonitorHistoryEdges struct {
|
||||
// Monitor holds the value of the monitor edge.
|
||||
Monitor *ChannelMonitor `json:"monitor,omitempty"`
|
||||
// loadedTypes holds the information for reporting if a
|
||||
// type was loaded (or requested) in eager-loading or not.
|
||||
loadedTypes [1]bool
|
||||
}
|
||||
|
||||
// MonitorOrErr returns the Monitor value or an error if the edge
|
||||
// was not loaded in eager-loading, or loaded but was not found.
|
||||
func (e ChannelMonitorHistoryEdges) MonitorOrErr() (*ChannelMonitor, error) {
|
||||
if e.Monitor != nil {
|
||||
return e.Monitor, nil
|
||||
} else if e.loadedTypes[0] {
|
||||
return nil, &NotFoundError{label: channelmonitor.Label}
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "monitor"}
|
||||
}
|
||||
|
||||
// scanValues returns the types for scanning values from sql.Rows.
|
||||
func (*ChannelMonitorHistory) scanValues(columns []string) ([]any, error) {
|
||||
values := make([]any, len(columns))
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case channelmonitorhistory.FieldID, channelmonitorhistory.FieldMonitorID, channelmonitorhistory.FieldLatencyMs, channelmonitorhistory.FieldPingLatencyMs:
|
||||
values[i] = new(sql.NullInt64)
|
||||
case channelmonitorhistory.FieldModel, channelmonitorhistory.FieldStatus, channelmonitorhistory.FieldMessage:
|
||||
values[i] = new(sql.NullString)
|
||||
case channelmonitorhistory.FieldCheckedAt:
|
||||
values[i] = new(sql.NullTime)
|
||||
default:
|
||||
values[i] = new(sql.UnknownType)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||
// to the ChannelMonitorHistory fields.
|
||||
func (_m *ChannelMonitorHistory) assignValues(columns []string, values []any) error {
|
||||
if m, n := len(values), len(columns); m < n {
|
||||
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||
}
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case channelmonitorhistory.FieldID:
|
||||
value, ok := values[i].(*sql.NullInt64)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected type %T for field id", value)
|
||||
}
|
||||
_m.ID = int64(value.Int64)
|
||||
case channelmonitorhistory.FieldMonitorID:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field monitor_id", values[i])
|
||||
} else if value.Valid {
|
||||
_m.MonitorID = value.Int64
|
||||
}
|
||||
case channelmonitorhistory.FieldModel:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field model", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Model = value.String
|
||||
}
|
||||
case channelmonitorhistory.FieldStatus:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field status", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Status = channelmonitorhistory.Status(value.String)
|
||||
}
|
||||
case channelmonitorhistory.FieldLatencyMs:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field latency_ms", values[i])
|
||||
} else if value.Valid {
|
||||
_m.LatencyMs = new(int)
|
||||
*_m.LatencyMs = int(value.Int64)
|
||||
}
|
||||
case channelmonitorhistory.FieldPingLatencyMs:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field ping_latency_ms", values[i])
|
||||
} else if value.Valid {
|
||||
_m.PingLatencyMs = new(int)
|
||||
*_m.PingLatencyMs = int(value.Int64)
|
||||
}
|
||||
case channelmonitorhistory.FieldMessage:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field message", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Message = value.String
|
||||
}
|
||||
case channelmonitorhistory.FieldCheckedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field checked_at", values[i])
|
||||
} else if value.Valid {
|
||||
_m.CheckedAt = value.Time
|
||||
}
|
||||
default:
|
||||
_m.selectValues.Set(columns[i], values[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value returns the ent.Value that was dynamically selected and assigned to the ChannelMonitorHistory.
|
||||
// This includes values selected through modifiers, order, etc.
|
||||
func (_m *ChannelMonitorHistory) Value(name string) (ent.Value, error) {
|
||||
return _m.selectValues.Get(name)
|
||||
}
|
||||
|
||||
// QueryMonitor queries the "monitor" edge of the ChannelMonitorHistory entity.
|
||||
func (_m *ChannelMonitorHistory) QueryMonitor() *ChannelMonitorQuery {
|
||||
return NewChannelMonitorHistoryClient(_m.config).QueryMonitor(_m)
|
||||
}
|
||||
|
||||
// Update returns a builder for updating this ChannelMonitorHistory.
|
||||
// Note that you need to call ChannelMonitorHistory.Unwrap() before calling this method if this ChannelMonitorHistory
|
||||
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||
func (_m *ChannelMonitorHistory) Update() *ChannelMonitorHistoryUpdateOne {
|
||||
return NewChannelMonitorHistoryClient(_m.config).UpdateOne(_m)
|
||||
}
|
||||
|
||||
// Unwrap unwraps the ChannelMonitorHistory entity that was returned from a transaction after it was closed,
|
||||
// so that all future queries will be executed through the driver which created the transaction.
|
||||
func (_m *ChannelMonitorHistory) Unwrap() *ChannelMonitorHistory {
|
||||
_tx, ok := _m.config.driver.(*txDriver)
|
||||
if !ok {
|
||||
panic("ent: ChannelMonitorHistory is not a transactional entity")
|
||||
}
|
||||
_m.config.driver = _tx.drv
|
||||
return _m
|
||||
}
|
||||
|
||||
// String implements the fmt.Stringer.
|
||||
func (_m *ChannelMonitorHistory) String() string {
|
||||
var builder strings.Builder
|
||||
builder.WriteString("ChannelMonitorHistory(")
|
||||
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
||||
builder.WriteString("monitor_id=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.MonitorID))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("model=")
|
||||
builder.WriteString(_m.Model)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("status=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.Status))
|
||||
builder.WriteString(", ")
|
||||
if v := _m.LatencyMs; v != nil {
|
||||
builder.WriteString("latency_ms=")
|
||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
if v := _m.PingLatencyMs; v != nil {
|
||||
builder.WriteString("ping_latency_ms=")
|
||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("message=")
|
||||
builder.WriteString(_m.Message)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("checked_at=")
|
||||
builder.WriteString(_m.CheckedAt.Format(time.ANSIC))
|
||||
builder.WriteByte(')')
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
// ChannelMonitorHistories is a parsable slice of ChannelMonitorHistory.
|
||||
type ChannelMonitorHistories []*ChannelMonitorHistory
|
||||
158
backend/ent/channelmonitorhistory/channelmonitorhistory.go
Normal file
158
backend/ent/channelmonitorhistory/channelmonitorhistory.go
Normal file
@@ -0,0 +1,158 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package channelmonitorhistory
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
)
|
||||
|
||||
const (
|
||||
// Label holds the string label denoting the channelmonitorhistory type in the database.
|
||||
Label = "channel_monitor_history"
|
||||
// FieldID holds the string denoting the id field in the database.
|
||||
FieldID = "id"
|
||||
// FieldMonitorID holds the string denoting the monitor_id field in the database.
|
||||
FieldMonitorID = "monitor_id"
|
||||
// FieldModel holds the string denoting the model field in the database.
|
||||
FieldModel = "model"
|
||||
// FieldStatus holds the string denoting the status field in the database.
|
||||
FieldStatus = "status"
|
||||
// FieldLatencyMs holds the string denoting the latency_ms field in the database.
|
||||
FieldLatencyMs = "latency_ms"
|
||||
// FieldPingLatencyMs holds the string denoting the ping_latency_ms field in the database.
|
||||
FieldPingLatencyMs = "ping_latency_ms"
|
||||
// FieldMessage holds the string denoting the message field in the database.
|
||||
FieldMessage = "message"
|
||||
// FieldCheckedAt holds the string denoting the checked_at field in the database.
|
||||
FieldCheckedAt = "checked_at"
|
||||
// EdgeMonitor holds the string denoting the monitor edge name in mutations.
|
||||
EdgeMonitor = "monitor"
|
||||
// Table holds the table name of the channelmonitorhistory in the database.
|
||||
Table = "channel_monitor_histories"
|
||||
// MonitorTable is the table that holds the monitor relation/edge.
|
||||
MonitorTable = "channel_monitor_histories"
|
||||
// MonitorInverseTable is the table name for the ChannelMonitor entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "channelmonitor" package.
|
||||
MonitorInverseTable = "channel_monitors"
|
||||
// MonitorColumn is the table column denoting the monitor relation/edge.
|
||||
MonitorColumn = "monitor_id"
|
||||
)
|
||||
|
||||
// Columns holds all SQL columns for channelmonitorhistory fields.
|
||||
var Columns = []string{
|
||||
FieldID,
|
||||
FieldMonitorID,
|
||||
FieldModel,
|
||||
FieldStatus,
|
||||
FieldLatencyMs,
|
||||
FieldPingLatencyMs,
|
||||
FieldMessage,
|
||||
FieldCheckedAt,
|
||||
}
|
||||
|
||||
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||
func ValidColumn(column string) bool {
|
||||
for i := range Columns {
|
||||
if column == Columns[i] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var (
|
||||
// ModelValidator is a validator for the "model" field. It is called by the builders before save.
|
||||
ModelValidator func(string) error
|
||||
// DefaultMessage holds the default value on creation for the "message" field.
|
||||
DefaultMessage string
|
||||
// MessageValidator is a validator for the "message" field. It is called by the builders before save.
|
||||
MessageValidator func(string) error
|
||||
// DefaultCheckedAt holds the default value on creation for the "checked_at" field.
|
||||
DefaultCheckedAt func() time.Time
|
||||
)
|
||||
|
||||
// Status defines the type for the "status" enum field.
|
||||
type Status string
|
||||
|
||||
// Status values.
|
||||
const (
|
||||
StatusOperational Status = "operational"
|
||||
StatusDegraded Status = "degraded"
|
||||
StatusFailed Status = "failed"
|
||||
StatusError Status = "error"
|
||||
)
|
||||
|
||||
func (s Status) String() string {
|
||||
return string(s)
|
||||
}
|
||||
|
||||
// StatusValidator is a validator for the "status" field enum values. It is called by the builders before save.
|
||||
func StatusValidator(s Status) error {
|
||||
switch s {
|
||||
case StatusOperational, StatusDegraded, StatusFailed, StatusError:
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("channelmonitorhistory: invalid enum value for status field: %q", s)
|
||||
}
|
||||
}
|
||||
|
||||
// OrderOption defines the ordering options for the ChannelMonitorHistory queries.
|
||||
type OrderOption func(*sql.Selector)
|
||||
|
||||
// ByID orders the results by the id field.
|
||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByMonitorID orders the results by the monitor_id field.
|
||||
func ByMonitorID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldMonitorID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByModel orders the results by the model field.
|
||||
func ByModel(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldModel, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByStatus orders the results by the status field.
|
||||
func ByStatus(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldStatus, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByLatencyMs orders the results by the latency_ms field.
|
||||
func ByLatencyMs(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldLatencyMs, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByPingLatencyMs orders the results by the ping_latency_ms field.
|
||||
func ByPingLatencyMs(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldPingLatencyMs, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByMessage orders the results by the message field.
|
||||
func ByMessage(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldMessage, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCheckedAt orders the results by the checked_at field.
|
||||
func ByCheckedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCheckedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByMonitorField orders the results by monitor field.
|
||||
func ByMonitorField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newMonitorStep(), sql.OrderByField(field, opts...))
|
||||
}
|
||||
}
|
||||
func newMonitorStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(MonitorInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, MonitorTable, MonitorColumn),
|
||||
)
|
||||
}
|
||||
444
backend/ent/channelmonitorhistory/where.go
Normal file
444
backend/ent/channelmonitorhistory/where.go
Normal file
@@ -0,0 +1,444 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package channelmonitorhistory
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ID filters vertices based on their ID field.
|
||||
func ID(id int64) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDEQ applies the EQ predicate on the ID field.
|
||||
func IDEQ(id int64) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDNEQ applies the NEQ predicate on the ID field.
|
||||
func IDNEQ(id int64) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDIn applies the In predicate on the ID field.
|
||||
func IDIn(ids ...int64) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDNotIn applies the NotIn predicate on the ID field.
|
||||
func IDNotIn(ids ...int64) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNotIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDGT applies the GT predicate on the ID field.
|
||||
func IDGT(id int64) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldGT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDGTE applies the GTE predicate on the ID field.
|
||||
func IDGTE(id int64) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldGTE(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLT applies the LT predicate on the ID field.
|
||||
func IDLT(id int64) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldLT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLTE applies the LTE predicate on the ID field.
|
||||
func IDLTE(id int64) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldLTE(FieldID, id))
|
||||
}
|
||||
|
||||
// MonitorID applies equality check predicate on the "monitor_id" field. It's identical to MonitorIDEQ.
|
||||
func MonitorID(v int64) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldMonitorID, v))
|
||||
}
|
||||
|
||||
// Model applies equality check predicate on the "model" field. It's identical to ModelEQ.
|
||||
func Model(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldModel, v))
|
||||
}
|
||||
|
||||
// LatencyMs applies equality check predicate on the "latency_ms" field. It's identical to LatencyMsEQ.
|
||||
func LatencyMs(v int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldLatencyMs, v))
|
||||
}
|
||||
|
||||
// PingLatencyMs applies equality check predicate on the "ping_latency_ms" field. It's identical to PingLatencyMsEQ.
|
||||
func PingLatencyMs(v int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldPingLatencyMs, v))
|
||||
}
|
||||
|
||||
// Message applies equality check predicate on the "message" field. It's identical to MessageEQ.
|
||||
func Message(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldMessage, v))
|
||||
}
|
||||
|
||||
// CheckedAt applies equality check predicate on the "checked_at" field. It's identical to CheckedAtEQ.
|
||||
func CheckedAt(v time.Time) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldCheckedAt, v))
|
||||
}
|
||||
|
||||
// MonitorIDEQ applies the EQ predicate on the "monitor_id" field.
|
||||
func MonitorIDEQ(v int64) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldMonitorID, v))
|
||||
}
|
||||
|
||||
// MonitorIDNEQ applies the NEQ predicate on the "monitor_id" field.
|
||||
func MonitorIDNEQ(v int64) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNEQ(FieldMonitorID, v))
|
||||
}
|
||||
|
||||
// MonitorIDIn applies the In predicate on the "monitor_id" field.
|
||||
func MonitorIDIn(vs ...int64) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldIn(FieldMonitorID, vs...))
|
||||
}
|
||||
|
||||
// MonitorIDNotIn applies the NotIn predicate on the "monitor_id" field.
|
||||
func MonitorIDNotIn(vs ...int64) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNotIn(FieldMonitorID, vs...))
|
||||
}
|
||||
|
||||
// ModelEQ applies the EQ predicate on the "model" field.
|
||||
func ModelEQ(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelNEQ applies the NEQ predicate on the "model" field.
|
||||
func ModelNEQ(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNEQ(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelIn applies the In predicate on the "model" field.
|
||||
func ModelIn(vs ...string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldIn(FieldModel, vs...))
|
||||
}
|
||||
|
||||
// ModelNotIn applies the NotIn predicate on the "model" field.
|
||||
func ModelNotIn(vs ...string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNotIn(FieldModel, vs...))
|
||||
}
|
||||
|
||||
// ModelGT applies the GT predicate on the "model" field.
|
||||
func ModelGT(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldGT(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelGTE applies the GTE predicate on the "model" field.
|
||||
func ModelGTE(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldGTE(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelLT applies the LT predicate on the "model" field.
|
||||
func ModelLT(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldLT(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelLTE applies the LTE predicate on the "model" field.
|
||||
func ModelLTE(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldLTE(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelContains applies the Contains predicate on the "model" field.
|
||||
func ModelContains(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldContains(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelHasPrefix applies the HasPrefix predicate on the "model" field.
|
||||
func ModelHasPrefix(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldHasPrefix(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelHasSuffix applies the HasSuffix predicate on the "model" field.
|
||||
func ModelHasSuffix(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldHasSuffix(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelEqualFold applies the EqualFold predicate on the "model" field.
|
||||
func ModelEqualFold(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEqualFold(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelContainsFold applies the ContainsFold predicate on the "model" field.
|
||||
func ModelContainsFold(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldContainsFold(FieldModel, v))
|
||||
}
|
||||
|
||||
// StatusEQ applies the EQ predicate on the "status" field.
|
||||
func StatusEQ(v Status) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldStatus, v))
|
||||
}
|
||||
|
||||
// StatusNEQ applies the NEQ predicate on the "status" field.
|
||||
func StatusNEQ(v Status) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNEQ(FieldStatus, v))
|
||||
}
|
||||
|
||||
// StatusIn applies the In predicate on the "status" field.
|
||||
func StatusIn(vs ...Status) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldIn(FieldStatus, vs...))
|
||||
}
|
||||
|
||||
// StatusNotIn applies the NotIn predicate on the "status" field.
|
||||
func StatusNotIn(vs ...Status) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNotIn(FieldStatus, vs...))
|
||||
}
|
||||
|
||||
// LatencyMsEQ applies the EQ predicate on the "latency_ms" field.
|
||||
func LatencyMsEQ(v int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldLatencyMs, v))
|
||||
}
|
||||
|
||||
// LatencyMsNEQ applies the NEQ predicate on the "latency_ms" field.
|
||||
func LatencyMsNEQ(v int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNEQ(FieldLatencyMs, v))
|
||||
}
|
||||
|
||||
// LatencyMsIn applies the In predicate on the "latency_ms" field.
|
||||
func LatencyMsIn(vs ...int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldIn(FieldLatencyMs, vs...))
|
||||
}
|
||||
|
||||
// LatencyMsNotIn applies the NotIn predicate on the "latency_ms" field.
|
||||
func LatencyMsNotIn(vs ...int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNotIn(FieldLatencyMs, vs...))
|
||||
}
|
||||
|
||||
// LatencyMsGT applies the GT predicate on the "latency_ms" field.
|
||||
func LatencyMsGT(v int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldGT(FieldLatencyMs, v))
|
||||
}
|
||||
|
||||
// LatencyMsGTE applies the GTE predicate on the "latency_ms" field.
|
||||
func LatencyMsGTE(v int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldGTE(FieldLatencyMs, v))
|
||||
}
|
||||
|
||||
// LatencyMsLT applies the LT predicate on the "latency_ms" field.
|
||||
func LatencyMsLT(v int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldLT(FieldLatencyMs, v))
|
||||
}
|
||||
|
||||
// LatencyMsLTE applies the LTE predicate on the "latency_ms" field.
|
||||
func LatencyMsLTE(v int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldLTE(FieldLatencyMs, v))
|
||||
}
|
||||
|
||||
// LatencyMsIsNil applies the IsNil predicate on the "latency_ms" field.
|
||||
func LatencyMsIsNil() predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldIsNull(FieldLatencyMs))
|
||||
}
|
||||
|
||||
// LatencyMsNotNil applies the NotNil predicate on the "latency_ms" field.
|
||||
func LatencyMsNotNil() predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNotNull(FieldLatencyMs))
|
||||
}
|
||||
|
||||
// PingLatencyMsEQ applies the EQ predicate on the "ping_latency_ms" field.
|
||||
func PingLatencyMsEQ(v int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldPingLatencyMs, v))
|
||||
}
|
||||
|
||||
// PingLatencyMsNEQ applies the NEQ predicate on the "ping_latency_ms" field.
|
||||
func PingLatencyMsNEQ(v int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNEQ(FieldPingLatencyMs, v))
|
||||
}
|
||||
|
||||
// PingLatencyMsIn applies the In predicate on the "ping_latency_ms" field.
|
||||
func PingLatencyMsIn(vs ...int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldIn(FieldPingLatencyMs, vs...))
|
||||
}
|
||||
|
||||
// PingLatencyMsNotIn applies the NotIn predicate on the "ping_latency_ms" field.
|
||||
func PingLatencyMsNotIn(vs ...int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNotIn(FieldPingLatencyMs, vs...))
|
||||
}
|
||||
|
||||
// PingLatencyMsGT applies the GT predicate on the "ping_latency_ms" field.
|
||||
func PingLatencyMsGT(v int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldGT(FieldPingLatencyMs, v))
|
||||
}
|
||||
|
||||
// PingLatencyMsGTE applies the GTE predicate on the "ping_latency_ms" field.
|
||||
func PingLatencyMsGTE(v int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldGTE(FieldPingLatencyMs, v))
|
||||
}
|
||||
|
||||
// PingLatencyMsLT applies the LT predicate on the "ping_latency_ms" field.
|
||||
func PingLatencyMsLT(v int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldLT(FieldPingLatencyMs, v))
|
||||
}
|
||||
|
||||
// PingLatencyMsLTE applies the LTE predicate on the "ping_latency_ms" field.
|
||||
func PingLatencyMsLTE(v int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldLTE(FieldPingLatencyMs, v))
|
||||
}
|
||||
|
||||
// PingLatencyMsIsNil applies the IsNil predicate on the "ping_latency_ms" field.
|
||||
func PingLatencyMsIsNil() predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldIsNull(FieldPingLatencyMs))
|
||||
}
|
||||
|
||||
// PingLatencyMsNotNil applies the NotNil predicate on the "ping_latency_ms" field.
|
||||
func PingLatencyMsNotNil() predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNotNull(FieldPingLatencyMs))
|
||||
}
|
||||
|
||||
// MessageEQ applies the EQ predicate on the "message" field.
|
||||
func MessageEQ(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldMessage, v))
|
||||
}
|
||||
|
||||
// MessageNEQ applies the NEQ predicate on the "message" field.
|
||||
func MessageNEQ(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNEQ(FieldMessage, v))
|
||||
}
|
||||
|
||||
// MessageIn applies the In predicate on the "message" field.
|
||||
func MessageIn(vs ...string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldIn(FieldMessage, vs...))
|
||||
}
|
||||
|
||||
// MessageNotIn applies the NotIn predicate on the "message" field.
|
||||
func MessageNotIn(vs ...string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNotIn(FieldMessage, vs...))
|
||||
}
|
||||
|
||||
// MessageGT applies the GT predicate on the "message" field.
|
||||
func MessageGT(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldGT(FieldMessage, v))
|
||||
}
|
||||
|
||||
// MessageGTE applies the GTE predicate on the "message" field.
|
||||
func MessageGTE(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldGTE(FieldMessage, v))
|
||||
}
|
||||
|
||||
// MessageLT applies the LT predicate on the "message" field.
|
||||
func MessageLT(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldLT(FieldMessage, v))
|
||||
}
|
||||
|
||||
// MessageLTE applies the LTE predicate on the "message" field.
|
||||
func MessageLTE(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldLTE(FieldMessage, v))
|
||||
}
|
||||
|
||||
// MessageContains applies the Contains predicate on the "message" field.
|
||||
func MessageContains(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldContains(FieldMessage, v))
|
||||
}
|
||||
|
||||
// MessageHasPrefix applies the HasPrefix predicate on the "message" field.
|
||||
func MessageHasPrefix(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldHasPrefix(FieldMessage, v))
|
||||
}
|
||||
|
||||
// MessageHasSuffix applies the HasSuffix predicate on the "message" field.
|
||||
func MessageHasSuffix(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldHasSuffix(FieldMessage, v))
|
||||
}
|
||||
|
||||
// MessageIsNil applies the IsNil predicate on the "message" field.
|
||||
func MessageIsNil() predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldIsNull(FieldMessage))
|
||||
}
|
||||
|
||||
// MessageNotNil applies the NotNil predicate on the "message" field.
|
||||
func MessageNotNil() predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNotNull(FieldMessage))
|
||||
}
|
||||
|
||||
// MessageEqualFold applies the EqualFold predicate on the "message" field.
|
||||
func MessageEqualFold(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEqualFold(FieldMessage, v))
|
||||
}
|
||||
|
||||
// MessageContainsFold applies the ContainsFold predicate on the "message" field.
|
||||
func MessageContainsFold(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldContainsFold(FieldMessage, v))
|
||||
}
|
||||
|
||||
// CheckedAtEQ applies the EQ predicate on the "checked_at" field.
|
||||
func CheckedAtEQ(v time.Time) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldCheckedAt, v))
|
||||
}
|
||||
|
||||
// CheckedAtNEQ applies the NEQ predicate on the "checked_at" field.
|
||||
func CheckedAtNEQ(v time.Time) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNEQ(FieldCheckedAt, v))
|
||||
}
|
||||
|
||||
// CheckedAtIn applies the In predicate on the "checked_at" field.
|
||||
func CheckedAtIn(vs ...time.Time) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldIn(FieldCheckedAt, vs...))
|
||||
}
|
||||
|
||||
// CheckedAtNotIn applies the NotIn predicate on the "checked_at" field.
|
||||
func CheckedAtNotIn(vs ...time.Time) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNotIn(FieldCheckedAt, vs...))
|
||||
}
|
||||
|
||||
// CheckedAtGT applies the GT predicate on the "checked_at" field.
|
||||
func CheckedAtGT(v time.Time) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldGT(FieldCheckedAt, v))
|
||||
}
|
||||
|
||||
// CheckedAtGTE applies the GTE predicate on the "checked_at" field.
|
||||
func CheckedAtGTE(v time.Time) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldGTE(FieldCheckedAt, v))
|
||||
}
|
||||
|
||||
// CheckedAtLT applies the LT predicate on the "checked_at" field.
|
||||
func CheckedAtLT(v time.Time) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldLT(FieldCheckedAt, v))
|
||||
}
|
||||
|
||||
// CheckedAtLTE applies the LTE predicate on the "checked_at" field.
|
||||
func CheckedAtLTE(v time.Time) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldLTE(FieldCheckedAt, v))
|
||||
}
|
||||
|
||||
// HasMonitor applies the HasEdge predicate on the "monitor" edge.
|
||||
func HasMonitor() predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, MonitorTable, MonitorColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasMonitorWith applies the HasEdge predicate on the "monitor" edge with a given conditions (other predicates).
|
||||
func HasMonitorWith(preds ...predicate.ChannelMonitor) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(func(s *sql.Selector) {
|
||||
step := newMonitorStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// And groups predicates with the AND operator between them.
|
||||
func And(predicates ...predicate.ChannelMonitorHistory) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.AndPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Or groups predicates with the OR operator between them.
|
||||
func Or(predicates ...predicate.ChannelMonitorHistory) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.OrPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Not applies the not operator on the given predicate.
|
||||
func Not(p predicate.ChannelMonitorHistory) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.NotPredicates(p))
|
||||
}
|
||||
947
backend/ent/channelmonitorhistory_create.go
Normal file
947
backend/ent/channelmonitorhistory_create.go
Normal file
@@ -0,0 +1,947 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory"
|
||||
)
|
||||
|
||||
// ChannelMonitorHistoryCreate is the builder for creating a ChannelMonitorHistory entity.
|
||||
type ChannelMonitorHistoryCreate struct {
|
||||
config
|
||||
mutation *ChannelMonitorHistoryMutation
|
||||
hooks []Hook
|
||||
conflict []sql.ConflictOption
|
||||
}
|
||||
|
||||
// SetMonitorID sets the "monitor_id" field.
|
||||
func (_c *ChannelMonitorHistoryCreate) SetMonitorID(v int64) *ChannelMonitorHistoryCreate {
|
||||
_c.mutation.SetMonitorID(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetModel sets the "model" field.
|
||||
func (_c *ChannelMonitorHistoryCreate) SetModel(v string) *ChannelMonitorHistoryCreate {
|
||||
_c.mutation.SetModel(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetStatus sets the "status" field.
|
||||
func (_c *ChannelMonitorHistoryCreate) SetStatus(v channelmonitorhistory.Status) *ChannelMonitorHistoryCreate {
|
||||
_c.mutation.SetStatus(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetLatencyMs sets the "latency_ms" field.
|
||||
func (_c *ChannelMonitorHistoryCreate) SetLatencyMs(v int) *ChannelMonitorHistoryCreate {
|
||||
_c.mutation.SetLatencyMs(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableLatencyMs sets the "latency_ms" field if the given value is not nil.
|
||||
func (_c *ChannelMonitorHistoryCreate) SetNillableLatencyMs(v *int) *ChannelMonitorHistoryCreate {
|
||||
if v != nil {
|
||||
_c.SetLatencyMs(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetPingLatencyMs sets the "ping_latency_ms" field.
|
||||
func (_c *ChannelMonitorHistoryCreate) SetPingLatencyMs(v int) *ChannelMonitorHistoryCreate {
|
||||
_c.mutation.SetPingLatencyMs(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillablePingLatencyMs sets the "ping_latency_ms" field if the given value is not nil.
|
||||
func (_c *ChannelMonitorHistoryCreate) SetNillablePingLatencyMs(v *int) *ChannelMonitorHistoryCreate {
|
||||
if v != nil {
|
||||
_c.SetPingLatencyMs(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetMessage sets the "message" field.
|
||||
func (_c *ChannelMonitorHistoryCreate) SetMessage(v string) *ChannelMonitorHistoryCreate {
|
||||
_c.mutation.SetMessage(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableMessage sets the "message" field if the given value is not nil.
|
||||
func (_c *ChannelMonitorHistoryCreate) SetNillableMessage(v *string) *ChannelMonitorHistoryCreate {
|
||||
if v != nil {
|
||||
_c.SetMessage(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetCheckedAt sets the "checked_at" field.
|
||||
func (_c *ChannelMonitorHistoryCreate) SetCheckedAt(v time.Time) *ChannelMonitorHistoryCreate {
|
||||
_c.mutation.SetCheckedAt(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableCheckedAt sets the "checked_at" field if the given value is not nil.
|
||||
func (_c *ChannelMonitorHistoryCreate) SetNillableCheckedAt(v *time.Time) *ChannelMonitorHistoryCreate {
|
||||
if v != nil {
|
||||
_c.SetCheckedAt(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetMonitor sets the "monitor" edge to the ChannelMonitor entity.
|
||||
func (_c *ChannelMonitorHistoryCreate) SetMonitor(v *ChannelMonitor) *ChannelMonitorHistoryCreate {
|
||||
return _c.SetMonitorID(v.ID)
|
||||
}
|
||||
|
||||
// Mutation returns the ChannelMonitorHistoryMutation object of the builder.
|
||||
func (_c *ChannelMonitorHistoryCreate) Mutation() *ChannelMonitorHistoryMutation {
|
||||
return _c.mutation
|
||||
}
|
||||
|
||||
// Save creates the ChannelMonitorHistory in the database.
|
||||
func (_c *ChannelMonitorHistoryCreate) Save(ctx context.Context) (*ChannelMonitorHistory, error) {
|
||||
_c.defaults()
|
||||
return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks)
|
||||
}
|
||||
|
||||
// SaveX calls Save and panics if Save returns an error.
|
||||
func (_c *ChannelMonitorHistoryCreate) SaveX(ctx context.Context) *ChannelMonitorHistory {
|
||||
v, err := _c.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (_c *ChannelMonitorHistoryCreate) Exec(ctx context.Context) error {
|
||||
_, err := _c.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_c *ChannelMonitorHistoryCreate) ExecX(ctx context.Context) {
|
||||
if err := _c.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (_c *ChannelMonitorHistoryCreate) defaults() {
|
||||
if _, ok := _c.mutation.Message(); !ok {
|
||||
v := channelmonitorhistory.DefaultMessage
|
||||
_c.mutation.SetMessage(v)
|
||||
}
|
||||
if _, ok := _c.mutation.CheckedAt(); !ok {
|
||||
v := channelmonitorhistory.DefaultCheckedAt()
|
||||
_c.mutation.SetCheckedAt(v)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (_c *ChannelMonitorHistoryCreate) check() error {
|
||||
if _, ok := _c.mutation.MonitorID(); !ok {
|
||||
return &ValidationError{Name: "monitor_id", err: errors.New(`ent: missing required field "ChannelMonitorHistory.monitor_id"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.Model(); !ok {
|
||||
return &ValidationError{Name: "model", err: errors.New(`ent: missing required field "ChannelMonitorHistory.model"`)}
|
||||
}
|
||||
if v, ok := _c.mutation.Model(); ok {
|
||||
if err := channelmonitorhistory.ModelValidator(v); err != nil {
|
||||
return &ValidationError{Name: "model", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorHistory.model": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := _c.mutation.Status(); !ok {
|
||||
return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "ChannelMonitorHistory.status"`)}
|
||||
}
|
||||
if v, ok := _c.mutation.Status(); ok {
|
||||
if err := channelmonitorhistory.StatusValidator(v); err != nil {
|
||||
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorHistory.status": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _c.mutation.Message(); ok {
|
||||
if err := channelmonitorhistory.MessageValidator(v); err != nil {
|
||||
return &ValidationError{Name: "message", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorHistory.message": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := _c.mutation.CheckedAt(); !ok {
|
||||
return &ValidationError{Name: "checked_at", err: errors.New(`ent: missing required field "ChannelMonitorHistory.checked_at"`)}
|
||||
}
|
||||
if len(_c.mutation.MonitorIDs()) == 0 {
|
||||
return &ValidationError{Name: "monitor", err: errors.New(`ent: missing required edge "ChannelMonitorHistory.monitor"`)}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_c *ChannelMonitorHistoryCreate) sqlSave(ctx context.Context) (*ChannelMonitorHistory, error) {
|
||||
if err := _c.check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_node, _spec := _c.createSpec()
|
||||
if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
id := _spec.ID.Value.(int64)
|
||||
_node.ID = int64(id)
|
||||
_c.mutation.id = &_node.ID
|
||||
_c.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
|
||||
func (_c *ChannelMonitorHistoryCreate) createSpec() (*ChannelMonitorHistory, *sqlgraph.CreateSpec) {
|
||||
var (
|
||||
_node = &ChannelMonitorHistory{config: _c.config}
|
||||
_spec = sqlgraph.NewCreateSpec(channelmonitorhistory.Table, sqlgraph.NewFieldSpec(channelmonitorhistory.FieldID, field.TypeInt64))
|
||||
)
|
||||
_spec.OnConflict = _c.conflict
|
||||
if value, ok := _c.mutation.Model(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldModel, field.TypeString, value)
|
||||
_node.Model = value
|
||||
}
|
||||
if value, ok := _c.mutation.Status(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldStatus, field.TypeEnum, value)
|
||||
_node.Status = value
|
||||
}
|
||||
if value, ok := _c.mutation.LatencyMs(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldLatencyMs, field.TypeInt, value)
|
||||
_node.LatencyMs = &value
|
||||
}
|
||||
if value, ok := _c.mutation.PingLatencyMs(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldPingLatencyMs, field.TypeInt, value)
|
||||
_node.PingLatencyMs = &value
|
||||
}
|
||||
if value, ok := _c.mutation.Message(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldMessage, field.TypeString, value)
|
||||
_node.Message = value
|
||||
}
|
||||
if value, ok := _c.mutation.CheckedAt(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldCheckedAt, field.TypeTime, value)
|
||||
_node.CheckedAt = value
|
||||
}
|
||||
if nodes := _c.mutation.MonitorIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: channelmonitorhistory.MonitorTable,
|
||||
Columns: []string{channelmonitorhistory.MonitorColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_node.MonitorID = nodes[0]
|
||||
_spec.Edges = append(_spec.Edges, edge)
|
||||
}
|
||||
return _node, _spec
|
||||
}
|
||||
|
||||
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||
// of the `INSERT` statement. For example:
|
||||
//
|
||||
// client.ChannelMonitorHistory.Create().
|
||||
// SetMonitorID(v).
|
||||
// OnConflict(
|
||||
// // Update the row with the new values
|
||||
// // the was proposed for insertion.
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// // Override some of the fields with custom
|
||||
// // update values.
|
||||
// Update(func(u *ent.ChannelMonitorHistoryUpsert) {
|
||||
// SetMonitorID(v+v).
|
||||
// }).
|
||||
// Exec(ctx)
|
||||
func (_c *ChannelMonitorHistoryCreate) OnConflict(opts ...sql.ConflictOption) *ChannelMonitorHistoryUpsertOne {
|
||||
_c.conflict = opts
|
||||
return &ChannelMonitorHistoryUpsertOne{
|
||||
create: _c,
|
||||
}
|
||||
}
|
||||
|
||||
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||
// as conflict target. Using this option is equivalent to using:
|
||||
//
|
||||
// client.ChannelMonitorHistory.Create().
|
||||
// OnConflict(sql.ConflictColumns(columns...)).
|
||||
// Exec(ctx)
|
||||
func (_c *ChannelMonitorHistoryCreate) OnConflictColumns(columns ...string) *ChannelMonitorHistoryUpsertOne {
|
||||
_c.conflict = append(_c.conflict, sql.ConflictColumns(columns...))
|
||||
return &ChannelMonitorHistoryUpsertOne{
|
||||
create: _c,
|
||||
}
|
||||
}
|
||||
|
||||
type (
|
||||
// ChannelMonitorHistoryUpsertOne is the builder for "upsert"-ing
|
||||
// one ChannelMonitorHistory node.
|
||||
ChannelMonitorHistoryUpsertOne struct {
|
||||
create *ChannelMonitorHistoryCreate
|
||||
}
|
||||
|
||||
// ChannelMonitorHistoryUpsert is the "OnConflict" setter.
|
||||
ChannelMonitorHistoryUpsert struct {
|
||||
*sql.UpdateSet
|
||||
}
|
||||
)
|
||||
|
||||
// SetMonitorID sets the "monitor_id" field.
|
||||
func (u *ChannelMonitorHistoryUpsert) SetMonitorID(v int64) *ChannelMonitorHistoryUpsert {
|
||||
u.Set(channelmonitorhistory.FieldMonitorID, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateMonitorID sets the "monitor_id" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsert) UpdateMonitorID() *ChannelMonitorHistoryUpsert {
|
||||
u.SetExcluded(channelmonitorhistory.FieldMonitorID)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetModel sets the "model" field.
|
||||
func (u *ChannelMonitorHistoryUpsert) SetModel(v string) *ChannelMonitorHistoryUpsert {
|
||||
u.Set(channelmonitorhistory.FieldModel, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateModel sets the "model" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsert) UpdateModel() *ChannelMonitorHistoryUpsert {
|
||||
u.SetExcluded(channelmonitorhistory.FieldModel)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetStatus sets the "status" field.
|
||||
func (u *ChannelMonitorHistoryUpsert) SetStatus(v channelmonitorhistory.Status) *ChannelMonitorHistoryUpsert {
|
||||
u.Set(channelmonitorhistory.FieldStatus, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateStatus sets the "status" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsert) UpdateStatus() *ChannelMonitorHistoryUpsert {
|
||||
u.SetExcluded(channelmonitorhistory.FieldStatus)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetLatencyMs sets the "latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsert) SetLatencyMs(v int) *ChannelMonitorHistoryUpsert {
|
||||
u.Set(channelmonitorhistory.FieldLatencyMs, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateLatencyMs sets the "latency_ms" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsert) UpdateLatencyMs() *ChannelMonitorHistoryUpsert {
|
||||
u.SetExcluded(channelmonitorhistory.FieldLatencyMs)
|
||||
return u
|
||||
}
|
||||
|
||||
// AddLatencyMs adds v to the "latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsert) AddLatencyMs(v int) *ChannelMonitorHistoryUpsert {
|
||||
u.Add(channelmonitorhistory.FieldLatencyMs, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// ClearLatencyMs clears the value of the "latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsert) ClearLatencyMs() *ChannelMonitorHistoryUpsert {
|
||||
u.SetNull(channelmonitorhistory.FieldLatencyMs)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetPingLatencyMs sets the "ping_latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsert) SetPingLatencyMs(v int) *ChannelMonitorHistoryUpsert {
|
||||
u.Set(channelmonitorhistory.FieldPingLatencyMs, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdatePingLatencyMs sets the "ping_latency_ms" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsert) UpdatePingLatencyMs() *ChannelMonitorHistoryUpsert {
|
||||
u.SetExcluded(channelmonitorhistory.FieldPingLatencyMs)
|
||||
return u
|
||||
}
|
||||
|
||||
// AddPingLatencyMs adds v to the "ping_latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsert) AddPingLatencyMs(v int) *ChannelMonitorHistoryUpsert {
|
||||
u.Add(channelmonitorhistory.FieldPingLatencyMs, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// ClearPingLatencyMs clears the value of the "ping_latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsert) ClearPingLatencyMs() *ChannelMonitorHistoryUpsert {
|
||||
u.SetNull(channelmonitorhistory.FieldPingLatencyMs)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetMessage sets the "message" field.
|
||||
func (u *ChannelMonitorHistoryUpsert) SetMessage(v string) *ChannelMonitorHistoryUpsert {
|
||||
u.Set(channelmonitorhistory.FieldMessage, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateMessage sets the "message" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsert) UpdateMessage() *ChannelMonitorHistoryUpsert {
|
||||
u.SetExcluded(channelmonitorhistory.FieldMessage)
|
||||
return u
|
||||
}
|
||||
|
||||
// ClearMessage clears the value of the "message" field.
|
||||
func (u *ChannelMonitorHistoryUpsert) ClearMessage() *ChannelMonitorHistoryUpsert {
|
||||
u.SetNull(channelmonitorhistory.FieldMessage)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetCheckedAt sets the "checked_at" field.
|
||||
func (u *ChannelMonitorHistoryUpsert) SetCheckedAt(v time.Time) *ChannelMonitorHistoryUpsert {
|
||||
u.Set(channelmonitorhistory.FieldCheckedAt, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateCheckedAt sets the "checked_at" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsert) UpdateCheckedAt() *ChannelMonitorHistoryUpsert {
|
||||
u.SetExcluded(channelmonitorhistory.FieldCheckedAt)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateNewValues updates the mutable fields using the new values that were set on create.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
// client.ChannelMonitorHistory.Create().
|
||||
// OnConflict(
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// Exec(ctx)
|
||||
func (u *ChannelMonitorHistoryUpsertOne) UpdateNewValues() *ChannelMonitorHistoryUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||
return u
|
||||
}
|
||||
|
||||
// Ignore sets each column to itself in case of conflict.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
// client.ChannelMonitorHistory.Create().
|
||||
// OnConflict(sql.ResolveWithIgnore()).
|
||||
// Exec(ctx)
|
||||
func (u *ChannelMonitorHistoryUpsertOne) Ignore() *ChannelMonitorHistoryUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||
return u
|
||||
}
|
||||
|
||||
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||
// Supported only by SQLite and PostgreSQL.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) DoNothing() *ChannelMonitorHistoryUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||
return u
|
||||
}
|
||||
|
||||
// Update allows overriding fields `UPDATE` values. See the ChannelMonitorHistoryCreate.OnConflict
|
||||
// documentation for more info.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) Update(set func(*ChannelMonitorHistoryUpsert)) *ChannelMonitorHistoryUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||
set(&ChannelMonitorHistoryUpsert{UpdateSet: update})
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// SetMonitorID sets the "monitor_id" field.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) SetMonitorID(v int64) *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.SetMonitorID(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateMonitorID sets the "monitor_id" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) UpdateMonitorID() *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.UpdateMonitorID()
|
||||
})
|
||||
}
|
||||
|
||||
// SetModel sets the "model" field.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) SetModel(v string) *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.SetModel(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateModel sets the "model" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) UpdateModel() *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.UpdateModel()
|
||||
})
|
||||
}
|
||||
|
||||
// SetStatus sets the "status" field.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) SetStatus(v channelmonitorhistory.Status) *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.SetStatus(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateStatus sets the "status" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) UpdateStatus() *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.UpdateStatus()
|
||||
})
|
||||
}
|
||||
|
||||
// SetLatencyMs sets the "latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) SetLatencyMs(v int) *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.SetLatencyMs(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddLatencyMs adds v to the "latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) AddLatencyMs(v int) *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.AddLatencyMs(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateLatencyMs sets the "latency_ms" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) UpdateLatencyMs() *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.UpdateLatencyMs()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearLatencyMs clears the value of the "latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) ClearLatencyMs() *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.ClearLatencyMs()
|
||||
})
|
||||
}
|
||||
|
||||
// SetPingLatencyMs sets the "ping_latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) SetPingLatencyMs(v int) *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.SetPingLatencyMs(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddPingLatencyMs adds v to the "ping_latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) AddPingLatencyMs(v int) *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.AddPingLatencyMs(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdatePingLatencyMs sets the "ping_latency_ms" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) UpdatePingLatencyMs() *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.UpdatePingLatencyMs()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearPingLatencyMs clears the value of the "ping_latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) ClearPingLatencyMs() *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.ClearPingLatencyMs()
|
||||
})
|
||||
}
|
||||
|
||||
// SetMessage sets the "message" field.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) SetMessage(v string) *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.SetMessage(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateMessage sets the "message" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) UpdateMessage() *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.UpdateMessage()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearMessage clears the value of the "message" field.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) ClearMessage() *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.ClearMessage()
|
||||
})
|
||||
}
|
||||
|
||||
// SetCheckedAt sets the "checked_at" field.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) SetCheckedAt(v time.Time) *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.SetCheckedAt(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateCheckedAt sets the "checked_at" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) UpdateCheckedAt() *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.UpdateCheckedAt()
|
||||
})
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) Exec(ctx context.Context) error {
|
||||
if len(u.create.conflict) == 0 {
|
||||
return errors.New("ent: missing options for ChannelMonitorHistoryCreate.OnConflict")
|
||||
}
|
||||
return u.create.Exec(ctx)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) ExecX(ctx context.Context) {
|
||||
if err := u.create.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Exec executes the UPSERT query and returns the inserted/updated ID.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) ID(ctx context.Context) (id int64, err error) {
|
||||
node, err := u.create.Save(ctx)
|
||||
if err != nil {
|
||||
return id, err
|
||||
}
|
||||
return node.ID, nil
|
||||
}
|
||||
|
||||
// IDX is like ID, but panics if an error occurs.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) IDX(ctx context.Context) int64 {
|
||||
id, err := u.ID(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// ChannelMonitorHistoryCreateBulk is the builder for creating many ChannelMonitorHistory entities in bulk.
|
||||
type ChannelMonitorHistoryCreateBulk struct {
|
||||
config
|
||||
err error
|
||||
builders []*ChannelMonitorHistoryCreate
|
||||
conflict []sql.ConflictOption
|
||||
}
|
||||
|
||||
// Save creates the ChannelMonitorHistory entities in the database.
|
||||
func (_c *ChannelMonitorHistoryCreateBulk) Save(ctx context.Context) ([]*ChannelMonitorHistory, error) {
|
||||
if _c.err != nil {
|
||||
return nil, _c.err
|
||||
}
|
||||
specs := make([]*sqlgraph.CreateSpec, len(_c.builders))
|
||||
nodes := make([]*ChannelMonitorHistory, len(_c.builders))
|
||||
mutators := make([]Mutator, len(_c.builders))
|
||||
for i := range _c.builders {
|
||||
func(i int, root context.Context) {
|
||||
builder := _c.builders[i]
|
||||
builder.defaults()
|
||||
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
||||
mutation, ok := m.(*ChannelMonitorHistoryMutation)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
||||
}
|
||||
if err := builder.check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
builder.mutation = mutation
|
||||
var err error
|
||||
nodes[i], specs[i] = builder.createSpec()
|
||||
if i < len(mutators)-1 {
|
||||
_, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation)
|
||||
} else {
|
||||
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
|
||||
spec.OnConflict = _c.conflict
|
||||
// Invoke the actual operation on the latest mutation in the chain.
|
||||
if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mutation.id = &nodes[i].ID
|
||||
if specs[i].ID.Value != nil {
|
||||
id := specs[i].ID.Value.(int64)
|
||||
nodes[i].ID = int64(id)
|
||||
}
|
||||
mutation.done = true
|
||||
return nodes[i], nil
|
||||
})
|
||||
for i := len(builder.hooks) - 1; i >= 0; i-- {
|
||||
mut = builder.hooks[i](mut)
|
||||
}
|
||||
mutators[i] = mut
|
||||
}(i, ctx)
|
||||
}
|
||||
if len(mutators) > 0 {
|
||||
if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (_c *ChannelMonitorHistoryCreateBulk) SaveX(ctx context.Context) []*ChannelMonitorHistory {
|
||||
v, err := _c.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (_c *ChannelMonitorHistoryCreateBulk) Exec(ctx context.Context) error {
|
||||
_, err := _c.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_c *ChannelMonitorHistoryCreateBulk) ExecX(ctx context.Context) {
|
||||
if err := _c.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||
// of the `INSERT` statement. For example:
|
||||
//
|
||||
// client.ChannelMonitorHistory.CreateBulk(builders...).
|
||||
// OnConflict(
|
||||
// // Update the row with the new values
|
||||
// // the was proposed for insertion.
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// // Override some of the fields with custom
|
||||
// // update values.
|
||||
// Update(func(u *ent.ChannelMonitorHistoryUpsert) {
|
||||
// SetMonitorID(v+v).
|
||||
// }).
|
||||
// Exec(ctx)
|
||||
func (_c *ChannelMonitorHistoryCreateBulk) OnConflict(opts ...sql.ConflictOption) *ChannelMonitorHistoryUpsertBulk {
|
||||
_c.conflict = opts
|
||||
return &ChannelMonitorHistoryUpsertBulk{
|
||||
create: _c,
|
||||
}
|
||||
}
|
||||
|
||||
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||
// as conflict target. Using this option is equivalent to using:
|
||||
//
|
||||
// client.ChannelMonitorHistory.Create().
|
||||
// OnConflict(sql.ConflictColumns(columns...)).
|
||||
// Exec(ctx)
|
||||
func (_c *ChannelMonitorHistoryCreateBulk) OnConflictColumns(columns ...string) *ChannelMonitorHistoryUpsertBulk {
|
||||
_c.conflict = append(_c.conflict, sql.ConflictColumns(columns...))
|
||||
return &ChannelMonitorHistoryUpsertBulk{
|
||||
create: _c,
|
||||
}
|
||||
}
|
||||
|
||||
// ChannelMonitorHistoryUpsertBulk is the builder for "upsert"-ing
|
||||
// a bulk of ChannelMonitorHistory nodes.
|
||||
type ChannelMonitorHistoryUpsertBulk struct {
|
||||
create *ChannelMonitorHistoryCreateBulk
|
||||
}
|
||||
|
||||
// UpdateNewValues updates the mutable fields using the new values that
|
||||
// were set on create. Using this option is equivalent to using:
|
||||
//
|
||||
// client.ChannelMonitorHistory.Create().
|
||||
// OnConflict(
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// Exec(ctx)
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) UpdateNewValues() *ChannelMonitorHistoryUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||
return u
|
||||
}
|
||||
|
||||
// Ignore sets each column to itself in case of conflict.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
// client.ChannelMonitorHistory.Create().
|
||||
// OnConflict(sql.ResolveWithIgnore()).
|
||||
// Exec(ctx)
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) Ignore() *ChannelMonitorHistoryUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||
return u
|
||||
}
|
||||
|
||||
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||
// Supported only by SQLite and PostgreSQL.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) DoNothing() *ChannelMonitorHistoryUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||
return u
|
||||
}
|
||||
|
||||
// Update allows overriding fields `UPDATE` values. See the ChannelMonitorHistoryCreateBulk.OnConflict
|
||||
// documentation for more info.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) Update(set func(*ChannelMonitorHistoryUpsert)) *ChannelMonitorHistoryUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||
set(&ChannelMonitorHistoryUpsert{UpdateSet: update})
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// SetMonitorID sets the "monitor_id" field.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) SetMonitorID(v int64) *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.SetMonitorID(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateMonitorID sets the "monitor_id" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) UpdateMonitorID() *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.UpdateMonitorID()
|
||||
})
|
||||
}
|
||||
|
||||
// SetModel sets the "model" field.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) SetModel(v string) *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.SetModel(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateModel sets the "model" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) UpdateModel() *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.UpdateModel()
|
||||
})
|
||||
}
|
||||
|
||||
// SetStatus sets the "status" field.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) SetStatus(v channelmonitorhistory.Status) *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.SetStatus(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateStatus sets the "status" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) UpdateStatus() *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.UpdateStatus()
|
||||
})
|
||||
}
|
||||
|
||||
// SetLatencyMs sets the "latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) SetLatencyMs(v int) *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.SetLatencyMs(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddLatencyMs adds v to the "latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) AddLatencyMs(v int) *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.AddLatencyMs(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateLatencyMs sets the "latency_ms" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) UpdateLatencyMs() *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.UpdateLatencyMs()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearLatencyMs clears the value of the "latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) ClearLatencyMs() *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.ClearLatencyMs()
|
||||
})
|
||||
}
|
||||
|
||||
// SetPingLatencyMs sets the "ping_latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) SetPingLatencyMs(v int) *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.SetPingLatencyMs(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddPingLatencyMs adds v to the "ping_latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) AddPingLatencyMs(v int) *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.AddPingLatencyMs(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdatePingLatencyMs sets the "ping_latency_ms" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) UpdatePingLatencyMs() *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.UpdatePingLatencyMs()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearPingLatencyMs clears the value of the "ping_latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) ClearPingLatencyMs() *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.ClearPingLatencyMs()
|
||||
})
|
||||
}
|
||||
|
||||
// SetMessage sets the "message" field.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) SetMessage(v string) *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.SetMessage(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateMessage sets the "message" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) UpdateMessage() *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.UpdateMessage()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearMessage clears the value of the "message" field.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) ClearMessage() *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.ClearMessage()
|
||||
})
|
||||
}
|
||||
|
||||
// SetCheckedAt sets the "checked_at" field.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) SetCheckedAt(v time.Time) *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.SetCheckedAt(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateCheckedAt sets the "checked_at" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) UpdateCheckedAt() *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.UpdateCheckedAt()
|
||||
})
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) Exec(ctx context.Context) error {
|
||||
if u.create.err != nil {
|
||||
return u.create.err
|
||||
}
|
||||
for i, b := range u.create.builders {
|
||||
if len(b.conflict) != 0 {
|
||||
return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the ChannelMonitorHistoryCreateBulk instead", i)
|
||||
}
|
||||
}
|
||||
if len(u.create.conflict) == 0 {
|
||||
return errors.New("ent: missing options for ChannelMonitorHistoryCreateBulk.OnConflict")
|
||||
}
|
||||
return u.create.Exec(ctx)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) ExecX(ctx context.Context) {
|
||||
if err := u.create.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
88
backend/ent/channelmonitorhistory_delete.go
Normal file
88
backend/ent/channelmonitorhistory_delete.go
Normal file
@@ -0,0 +1,88 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ChannelMonitorHistoryDelete is the builder for deleting a ChannelMonitorHistory entity.
|
||||
type ChannelMonitorHistoryDelete struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *ChannelMonitorHistoryMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the ChannelMonitorHistoryDelete builder.
|
||||
func (_d *ChannelMonitorHistoryDelete) Where(ps ...predicate.ChannelMonitorHistory) *ChannelMonitorHistoryDelete {
|
||||
_d.mutation.Where(ps...)
|
||||
return _d
|
||||
}
|
||||
|
||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||
func (_d *ChannelMonitorHistoryDelete) Exec(ctx context.Context) (int, error) {
|
||||
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_d *ChannelMonitorHistoryDelete) ExecX(ctx context.Context) int {
|
||||
n, err := _d.Exec(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (_d *ChannelMonitorHistoryDelete) sqlExec(ctx context.Context) (int, error) {
|
||||
_spec := sqlgraph.NewDeleteSpec(channelmonitorhistory.Table, sqlgraph.NewFieldSpec(channelmonitorhistory.FieldID, field.TypeInt64))
|
||||
if ps := _d.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
||||
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
_d.mutation.done = true
|
||||
return affected, err
|
||||
}
|
||||
|
||||
// ChannelMonitorHistoryDeleteOne is the builder for deleting a single ChannelMonitorHistory entity.
|
||||
type ChannelMonitorHistoryDeleteOne struct {
|
||||
_d *ChannelMonitorHistoryDelete
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the ChannelMonitorHistoryDelete builder.
|
||||
func (_d *ChannelMonitorHistoryDeleteOne) Where(ps ...predicate.ChannelMonitorHistory) *ChannelMonitorHistoryDeleteOne {
|
||||
_d._d.mutation.Where(ps...)
|
||||
return _d
|
||||
}
|
||||
|
||||
// Exec executes the deletion query.
|
||||
func (_d *ChannelMonitorHistoryDeleteOne) Exec(ctx context.Context) error {
|
||||
n, err := _d._d.Exec(ctx)
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case n == 0:
|
||||
return &NotFoundError{channelmonitorhistory.Label}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_d *ChannelMonitorHistoryDeleteOne) ExecX(ctx context.Context) {
|
||||
if err := _d.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
643
backend/ent/channelmonitorhistory_query.go
Normal file
643
backend/ent/channelmonitorhistory_query.go
Normal file
@@ -0,0 +1,643 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ChannelMonitorHistoryQuery is the builder for querying ChannelMonitorHistory entities.
|
||||
type ChannelMonitorHistoryQuery struct {
|
||||
config
|
||||
ctx *QueryContext
|
||||
order []channelmonitorhistory.OrderOption
|
||||
inters []Interceptor
|
||||
predicates []predicate.ChannelMonitorHistory
|
||||
withMonitor *ChannelMonitorQuery
|
||||
modifiers []func(*sql.Selector)
|
||||
// intermediate query (i.e. traversal path).
|
||||
sql *sql.Selector
|
||||
path func(context.Context) (*sql.Selector, error)
|
||||
}
|
||||
|
||||
// Where adds a new predicate for the ChannelMonitorHistoryQuery builder.
|
||||
func (_q *ChannelMonitorHistoryQuery) Where(ps ...predicate.ChannelMonitorHistory) *ChannelMonitorHistoryQuery {
|
||||
_q.predicates = append(_q.predicates, ps...)
|
||||
return _q
|
||||
}
|
||||
|
||||
// Limit the number of records to be returned by this query.
|
||||
func (_q *ChannelMonitorHistoryQuery) Limit(limit int) *ChannelMonitorHistoryQuery {
|
||||
_q.ctx.Limit = &limit
|
||||
return _q
|
||||
}
|
||||
|
||||
// Offset to start from.
|
||||
func (_q *ChannelMonitorHistoryQuery) Offset(offset int) *ChannelMonitorHistoryQuery {
|
||||
_q.ctx.Offset = &offset
|
||||
return _q
|
||||
}
|
||||
|
||||
// Unique configures the query builder to filter duplicate records on query.
|
||||
// By default, unique is set to true, and can be disabled using this method.
|
||||
func (_q *ChannelMonitorHistoryQuery) Unique(unique bool) *ChannelMonitorHistoryQuery {
|
||||
_q.ctx.Unique = &unique
|
||||
return _q
|
||||
}
|
||||
|
||||
// Order specifies how the records should be ordered.
|
||||
func (_q *ChannelMonitorHistoryQuery) Order(o ...channelmonitorhistory.OrderOption) *ChannelMonitorHistoryQuery {
|
||||
_q.order = append(_q.order, o...)
|
||||
return _q
|
||||
}
|
||||
|
||||
// QueryMonitor chains the current query on the "monitor" edge.
|
||||
func (_q *ChannelMonitorHistoryQuery) QueryMonitor() *ChannelMonitorQuery {
|
||||
query := (&ChannelMonitorClient{config: _q.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := _q.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(channelmonitorhistory.Table, channelmonitorhistory.FieldID, selector),
|
||||
sqlgraph.To(channelmonitor.Table, channelmonitor.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, channelmonitorhistory.MonitorTable, channelmonitorhistory.MonitorColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// First returns the first ChannelMonitorHistory entity from the query.
|
||||
// Returns a *NotFoundError when no ChannelMonitorHistory was found.
|
||||
func (_q *ChannelMonitorHistoryQuery) First(ctx context.Context) (*ChannelMonitorHistory, error) {
|
||||
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nil, &NotFoundError{channelmonitorhistory.Label}
|
||||
}
|
||||
return nodes[0], nil
|
||||
}
|
||||
|
||||
// FirstX is like First, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorHistoryQuery) FirstX(ctx context.Context) *ChannelMonitorHistory {
|
||||
node, err := _q.First(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// FirstID returns the first ChannelMonitorHistory ID from the query.
|
||||
// Returns a *NotFoundError when no ChannelMonitorHistory ID was found.
|
||||
func (_q *ChannelMonitorHistoryQuery) FirstID(ctx context.Context) (id int64, err error) {
|
||||
var ids []int64
|
||||
if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
|
||||
return
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
err = &NotFoundError{channelmonitorhistory.Label}
|
||||
return
|
||||
}
|
||||
return ids[0], nil
|
||||
}
|
||||
|
||||
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorHistoryQuery) FirstIDX(ctx context.Context) int64 {
|
||||
id, err := _q.FirstID(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// Only returns a single ChannelMonitorHistory entity found by the query, ensuring it only returns one.
|
||||
// Returns a *NotSingularError when more than one ChannelMonitorHistory entity is found.
|
||||
// Returns a *NotFoundError when no ChannelMonitorHistory entities are found.
|
||||
func (_q *ChannelMonitorHistoryQuery) Only(ctx context.Context) (*ChannelMonitorHistory, error) {
|
||||
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch len(nodes) {
|
||||
case 1:
|
||||
return nodes[0], nil
|
||||
case 0:
|
||||
return nil, &NotFoundError{channelmonitorhistory.Label}
|
||||
default:
|
||||
return nil, &NotSingularError{channelmonitorhistory.Label}
|
||||
}
|
||||
}
|
||||
|
||||
// OnlyX is like Only, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorHistoryQuery) OnlyX(ctx context.Context) *ChannelMonitorHistory {
|
||||
node, err := _q.Only(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// OnlyID is like Only, but returns the only ChannelMonitorHistory ID in the query.
|
||||
// Returns a *NotSingularError when more than one ChannelMonitorHistory ID is found.
|
||||
// Returns a *NotFoundError when no entities are found.
|
||||
func (_q *ChannelMonitorHistoryQuery) OnlyID(ctx context.Context) (id int64, err error) {
|
||||
var ids []int64
|
||||
if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(ids) {
|
||||
case 1:
|
||||
id = ids[0]
|
||||
case 0:
|
||||
err = &NotFoundError{channelmonitorhistory.Label}
|
||||
default:
|
||||
err = &NotSingularError{channelmonitorhistory.Label}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorHistoryQuery) OnlyIDX(ctx context.Context) int64 {
|
||||
id, err := _q.OnlyID(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// All executes the query and returns a list of ChannelMonitorHistories.
|
||||
func (_q *ChannelMonitorHistoryQuery) All(ctx context.Context) ([]*ChannelMonitorHistory, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qr := querierAll[[]*ChannelMonitorHistory, *ChannelMonitorHistoryQuery]()
|
||||
return withInterceptors[[]*ChannelMonitorHistory](ctx, _q, qr, _q.inters)
|
||||
}
|
||||
|
||||
// AllX is like All, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorHistoryQuery) AllX(ctx context.Context) []*ChannelMonitorHistory {
|
||||
nodes, err := _q.All(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
// IDs executes the query and returns a list of ChannelMonitorHistory IDs.
|
||||
func (_q *ChannelMonitorHistoryQuery) IDs(ctx context.Context) (ids []int64, err error) {
|
||||
if _q.ctx.Unique == nil && _q.path != nil {
|
||||
_q.Unique(true)
|
||||
}
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
|
||||
if err = _q.Select(channelmonitorhistory.FieldID).Scan(ctx, &ids); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// IDsX is like IDs, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorHistoryQuery) IDsX(ctx context.Context) []int64 {
|
||||
ids, err := _q.IDs(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// Count returns the count of the given query.
|
||||
func (_q *ChannelMonitorHistoryQuery) Count(ctx context.Context) (int, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return withInterceptors[int](ctx, _q, querierCount[*ChannelMonitorHistoryQuery](), _q.inters)
|
||||
}
|
||||
|
||||
// CountX is like Count, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorHistoryQuery) CountX(ctx context.Context) int {
|
||||
count, err := _q.Count(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// Exist returns true if the query has elements in the graph.
|
||||
func (_q *ChannelMonitorHistoryQuery) Exist(ctx context.Context) (bool, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
||||
switch _, err := _q.FirstID(ctx); {
|
||||
case IsNotFound(err):
|
||||
return false, nil
|
||||
case err != nil:
|
||||
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExistX is like Exist, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorHistoryQuery) ExistX(ctx context.Context) bool {
|
||||
exist, err := _q.Exist(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return exist
|
||||
}
|
||||
|
||||
// Clone returns a duplicate of the ChannelMonitorHistoryQuery builder, including all associated steps. It can be
|
||||
// used to prepare common query builders and use them differently after the clone is made.
|
||||
func (_q *ChannelMonitorHistoryQuery) Clone() *ChannelMonitorHistoryQuery {
|
||||
if _q == nil {
|
||||
return nil
|
||||
}
|
||||
return &ChannelMonitorHistoryQuery{
|
||||
config: _q.config,
|
||||
ctx: _q.ctx.Clone(),
|
||||
order: append([]channelmonitorhistory.OrderOption{}, _q.order...),
|
||||
inters: append([]Interceptor{}, _q.inters...),
|
||||
predicates: append([]predicate.ChannelMonitorHistory{}, _q.predicates...),
|
||||
withMonitor: _q.withMonitor.Clone(),
|
||||
// clone intermediate query.
|
||||
sql: _q.sql.Clone(),
|
||||
path: _q.path,
|
||||
}
|
||||
}
|
||||
|
||||
// WithMonitor tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "monitor" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (_q *ChannelMonitorHistoryQuery) WithMonitor(opts ...func(*ChannelMonitorQuery)) *ChannelMonitorHistoryQuery {
|
||||
query := (&ChannelMonitorClient{config: _q.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
_q.withMonitor = query
|
||||
return _q
|
||||
}
|
||||
|
||||
// GroupBy is used to group vertices by one or more fields/columns.
|
||||
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// MonitorID int64 `json:"monitor_id,omitempty"`
|
||||
// Count int `json:"count,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.ChannelMonitorHistory.Query().
|
||||
// GroupBy(channelmonitorhistory.FieldMonitorID).
|
||||
// Aggregate(ent.Count()).
|
||||
// Scan(ctx, &v)
|
||||
func (_q *ChannelMonitorHistoryQuery) GroupBy(field string, fields ...string) *ChannelMonitorHistoryGroupBy {
|
||||
_q.ctx.Fields = append([]string{field}, fields...)
|
||||
grbuild := &ChannelMonitorHistoryGroupBy{build: _q}
|
||||
grbuild.flds = &_q.ctx.Fields
|
||||
grbuild.label = channelmonitorhistory.Label
|
||||
grbuild.scan = grbuild.Scan
|
||||
return grbuild
|
||||
}
|
||||
|
||||
// Select allows the selection one or more fields/columns for the given query,
|
||||
// instead of selecting all fields in the entity.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// MonitorID int64 `json:"monitor_id,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.ChannelMonitorHistory.Query().
|
||||
// Select(channelmonitorhistory.FieldMonitorID).
|
||||
// Scan(ctx, &v)
|
||||
func (_q *ChannelMonitorHistoryQuery) Select(fields ...string) *ChannelMonitorHistorySelect {
|
||||
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
||||
sbuild := &ChannelMonitorHistorySelect{ChannelMonitorHistoryQuery: _q}
|
||||
sbuild.label = channelmonitorhistory.Label
|
||||
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
||||
return sbuild
|
||||
}
|
||||
|
||||
// Aggregate returns a ChannelMonitorHistorySelect configured with the given aggregations.
|
||||
func (_q *ChannelMonitorHistoryQuery) Aggregate(fns ...AggregateFunc) *ChannelMonitorHistorySelect {
|
||||
return _q.Select().Aggregate(fns...)
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorHistoryQuery) prepareQuery(ctx context.Context) error {
|
||||
for _, inter := range _q.inters {
|
||||
if inter == nil {
|
||||
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||
}
|
||||
if trv, ok := inter.(Traverser); ok {
|
||||
if err := trv.Traverse(ctx, _q); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, f := range _q.ctx.Fields {
|
||||
if !channelmonitorhistory.ValidColumn(f) {
|
||||
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
}
|
||||
if _q.path != nil {
|
||||
prev, err := _q.path(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_q.sql = prev
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorHistoryQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*ChannelMonitorHistory, error) {
|
||||
var (
|
||||
nodes = []*ChannelMonitorHistory{}
|
||||
_spec = _q.querySpec()
|
||||
loadedTypes = [1]bool{
|
||||
_q.withMonitor != nil,
|
||||
}
|
||||
)
|
||||
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||
return (*ChannelMonitorHistory).scanValues(nil, columns)
|
||||
}
|
||||
_spec.Assign = func(columns []string, values []any) error {
|
||||
node := &ChannelMonitorHistory{config: _q.config}
|
||||
nodes = append(nodes, node)
|
||||
node.Edges.loadedTypes = loadedTypes
|
||||
return node.assignValues(columns, values)
|
||||
}
|
||||
if len(_q.modifiers) > 0 {
|
||||
_spec.Modifiers = _q.modifiers
|
||||
}
|
||||
for i := range hooks {
|
||||
hooks[i](ctx, _spec)
|
||||
}
|
||||
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nodes, nil
|
||||
}
|
||||
if query := _q.withMonitor; query != nil {
|
||||
if err := _q.loadMonitor(ctx, query, nodes, nil,
|
||||
func(n *ChannelMonitorHistory, e *ChannelMonitor) { n.Edges.Monitor = e }); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorHistoryQuery) loadMonitor(ctx context.Context, query *ChannelMonitorQuery, nodes []*ChannelMonitorHistory, init func(*ChannelMonitorHistory), assign func(*ChannelMonitorHistory, *ChannelMonitor)) error {
|
||||
ids := make([]int64, 0, len(nodes))
|
||||
nodeids := make(map[int64][]*ChannelMonitorHistory)
|
||||
for i := range nodes {
|
||||
fk := nodes[i].MonitorID
|
||||
if _, ok := nodeids[fk]; !ok {
|
||||
ids = append(ids, fk)
|
||||
}
|
||||
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
query.Where(channelmonitor.IDIn(ids...))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
nodes, ok := nodeids[n.ID]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "monitor_id" returned %v`, n.ID)
|
||||
}
|
||||
for i := range nodes {
|
||||
assign(nodes[i], n)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorHistoryQuery) sqlCount(ctx context.Context) (int, error) {
|
||||
_spec := _q.querySpec()
|
||||
if len(_q.modifiers) > 0 {
|
||||
_spec.Modifiers = _q.modifiers
|
||||
}
|
||||
_spec.Node.Columns = _q.ctx.Fields
|
||||
if len(_q.ctx.Fields) > 0 {
|
||||
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||
}
|
||||
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorHistoryQuery) querySpec() *sqlgraph.QuerySpec {
|
||||
_spec := sqlgraph.NewQuerySpec(channelmonitorhistory.Table, channelmonitorhistory.Columns, sqlgraph.NewFieldSpec(channelmonitorhistory.FieldID, field.TypeInt64))
|
||||
_spec.From = _q.sql
|
||||
if unique := _q.ctx.Unique; unique != nil {
|
||||
_spec.Unique = *unique
|
||||
} else if _q.path != nil {
|
||||
_spec.Unique = true
|
||||
}
|
||||
if fields := _q.ctx.Fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, channelmonitorhistory.FieldID)
|
||||
for i := range fields {
|
||||
if fields[i] != channelmonitorhistory.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||
}
|
||||
}
|
||||
if _q.withMonitor != nil {
|
||||
_spec.Node.AddColumnOnce(channelmonitorhistory.FieldMonitorID)
|
||||
}
|
||||
}
|
||||
if ps := _q.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if limit := _q.ctx.Limit; limit != nil {
|
||||
_spec.Limit = *limit
|
||||
}
|
||||
if offset := _q.ctx.Offset; offset != nil {
|
||||
_spec.Offset = *offset
|
||||
}
|
||||
if ps := _q.order; len(ps) > 0 {
|
||||
_spec.Order = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
return _spec
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorHistoryQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||
builder := sql.Dialect(_q.driver.Dialect())
|
||||
t1 := builder.Table(channelmonitorhistory.Table)
|
||||
columns := _q.ctx.Fields
|
||||
if len(columns) == 0 {
|
||||
columns = channelmonitorhistory.Columns
|
||||
}
|
||||
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||
if _q.sql != nil {
|
||||
selector = _q.sql
|
||||
selector.Select(selector.Columns(columns...)...)
|
||||
}
|
||||
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||
selector.Distinct()
|
||||
}
|
||||
for _, m := range _q.modifiers {
|
||||
m(selector)
|
||||
}
|
||||
for _, p := range _q.predicates {
|
||||
p(selector)
|
||||
}
|
||||
for _, p := range _q.order {
|
||||
p(selector)
|
||||
}
|
||||
if offset := _q.ctx.Offset; offset != nil {
|
||||
// limit is mandatory for offset clause. We start
|
||||
// with default value, and override it below if needed.
|
||||
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||
}
|
||||
if limit := _q.ctx.Limit; limit != nil {
|
||||
selector.Limit(*limit)
|
||||
}
|
||||
return selector
|
||||
}
|
||||
|
||||
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||
// either committed or rolled-back.
|
||||
func (_q *ChannelMonitorHistoryQuery) ForUpdate(opts ...sql.LockOption) *ChannelMonitorHistoryQuery {
|
||||
if _q.driver.Dialect() == dialect.Postgres {
|
||||
_q.Unique(false)
|
||||
}
|
||||
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||
s.ForUpdate(opts...)
|
||||
})
|
||||
return _q
|
||||
}
|
||||
|
||||
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||
// until your transaction commits.
|
||||
func (_q *ChannelMonitorHistoryQuery) ForShare(opts ...sql.LockOption) *ChannelMonitorHistoryQuery {
|
||||
if _q.driver.Dialect() == dialect.Postgres {
|
||||
_q.Unique(false)
|
||||
}
|
||||
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||
s.ForShare(opts...)
|
||||
})
|
||||
return _q
|
||||
}
|
||||
|
||||
// ChannelMonitorHistoryGroupBy is the group-by builder for ChannelMonitorHistory entities.
|
||||
type ChannelMonitorHistoryGroupBy struct {
|
||||
selector
|
||||
build *ChannelMonitorHistoryQuery
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the group-by query.
|
||||
func (_g *ChannelMonitorHistoryGroupBy) Aggregate(fns ...AggregateFunc) *ChannelMonitorHistoryGroupBy {
|
||||
_g.fns = append(_g.fns, fns...)
|
||||
return _g
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (_g *ChannelMonitorHistoryGroupBy) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
||||
if err := _g.build.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*ChannelMonitorHistoryQuery, *ChannelMonitorHistoryGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
||||
}
|
||||
|
||||
func (_g *ChannelMonitorHistoryGroupBy) sqlScan(ctx context.Context, root *ChannelMonitorHistoryQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx).Select()
|
||||
aggregation := make([]string, 0, len(_g.fns))
|
||||
for _, fn := range _g.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
if len(selector.SelectedColumns()) == 0 {
|
||||
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
||||
for _, f := range *_g.flds {
|
||||
columns = append(columns, selector.C(f))
|
||||
}
|
||||
columns = append(columns, aggregation...)
|
||||
selector.Select(columns...)
|
||||
}
|
||||
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
||||
if err := selector.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
|
||||
// ChannelMonitorHistorySelect is the builder for selecting fields of ChannelMonitorHistory entities.
|
||||
type ChannelMonitorHistorySelect struct {
|
||||
*ChannelMonitorHistoryQuery
|
||||
selector
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the selector query.
|
||||
func (_s *ChannelMonitorHistorySelect) Aggregate(fns ...AggregateFunc) *ChannelMonitorHistorySelect {
|
||||
_s.fns = append(_s.fns, fns...)
|
||||
return _s
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (_s *ChannelMonitorHistorySelect) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
||||
if err := _s.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*ChannelMonitorHistoryQuery, *ChannelMonitorHistorySelect](ctx, _s.ChannelMonitorHistoryQuery, _s, _s.inters, v)
|
||||
}
|
||||
|
||||
func (_s *ChannelMonitorHistorySelect) sqlScan(ctx context.Context, root *ChannelMonitorHistoryQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx)
|
||||
aggregation := make([]string, 0, len(_s.fns))
|
||||
for _, fn := range _s.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
switch n := len(*_s.selector.flds); {
|
||||
case n == 0 && len(aggregation) > 0:
|
||||
selector.Select(aggregation...)
|
||||
case n != 0 && len(aggregation) > 0:
|
||||
selector.AppendSelect(aggregation...)
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
635
backend/ent/channelmonitorhistory_update.go
Normal file
635
backend/ent/channelmonitorhistory_update.go
Normal file
@@ -0,0 +1,635 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ChannelMonitorHistoryUpdate is the builder for updating ChannelMonitorHistory entities.
|
||||
type ChannelMonitorHistoryUpdate struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *ChannelMonitorHistoryMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the ChannelMonitorHistoryUpdate builder.
|
||||
func (_u *ChannelMonitorHistoryUpdate) Where(ps ...predicate.ChannelMonitorHistory) *ChannelMonitorHistoryUpdate {
|
||||
_u.mutation.Where(ps...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetMonitorID sets the "monitor_id" field.
|
||||
func (_u *ChannelMonitorHistoryUpdate) SetMonitorID(v int64) *ChannelMonitorHistoryUpdate {
|
||||
_u.mutation.SetMonitorID(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableMonitorID sets the "monitor_id" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorHistoryUpdate) SetNillableMonitorID(v *int64) *ChannelMonitorHistoryUpdate {
|
||||
if v != nil {
|
||||
_u.SetMonitorID(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetModel sets the "model" field.
|
||||
func (_u *ChannelMonitorHistoryUpdate) SetModel(v string) *ChannelMonitorHistoryUpdate {
|
||||
_u.mutation.SetModel(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableModel sets the "model" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorHistoryUpdate) SetNillableModel(v *string) *ChannelMonitorHistoryUpdate {
|
||||
if v != nil {
|
||||
_u.SetModel(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetStatus sets the "status" field.
|
||||
func (_u *ChannelMonitorHistoryUpdate) SetStatus(v channelmonitorhistory.Status) *ChannelMonitorHistoryUpdate {
|
||||
_u.mutation.SetStatus(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableStatus sets the "status" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorHistoryUpdate) SetNillableStatus(v *channelmonitorhistory.Status) *ChannelMonitorHistoryUpdate {
|
||||
if v != nil {
|
||||
_u.SetStatus(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetLatencyMs sets the "latency_ms" field.
|
||||
func (_u *ChannelMonitorHistoryUpdate) SetLatencyMs(v int) *ChannelMonitorHistoryUpdate {
|
||||
_u.mutation.ResetLatencyMs()
|
||||
_u.mutation.SetLatencyMs(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableLatencyMs sets the "latency_ms" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorHistoryUpdate) SetNillableLatencyMs(v *int) *ChannelMonitorHistoryUpdate {
|
||||
if v != nil {
|
||||
_u.SetLatencyMs(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddLatencyMs adds value to the "latency_ms" field.
|
||||
func (_u *ChannelMonitorHistoryUpdate) AddLatencyMs(v int) *ChannelMonitorHistoryUpdate {
|
||||
_u.mutation.AddLatencyMs(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearLatencyMs clears the value of the "latency_ms" field.
|
||||
func (_u *ChannelMonitorHistoryUpdate) ClearLatencyMs() *ChannelMonitorHistoryUpdate {
|
||||
_u.mutation.ClearLatencyMs()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetPingLatencyMs sets the "ping_latency_ms" field.
|
||||
func (_u *ChannelMonitorHistoryUpdate) SetPingLatencyMs(v int) *ChannelMonitorHistoryUpdate {
|
||||
_u.mutation.ResetPingLatencyMs()
|
||||
_u.mutation.SetPingLatencyMs(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillablePingLatencyMs sets the "ping_latency_ms" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorHistoryUpdate) SetNillablePingLatencyMs(v *int) *ChannelMonitorHistoryUpdate {
|
||||
if v != nil {
|
||||
_u.SetPingLatencyMs(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddPingLatencyMs adds value to the "ping_latency_ms" field.
|
||||
func (_u *ChannelMonitorHistoryUpdate) AddPingLatencyMs(v int) *ChannelMonitorHistoryUpdate {
|
||||
_u.mutation.AddPingLatencyMs(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearPingLatencyMs clears the value of the "ping_latency_ms" field.
|
||||
func (_u *ChannelMonitorHistoryUpdate) ClearPingLatencyMs() *ChannelMonitorHistoryUpdate {
|
||||
_u.mutation.ClearPingLatencyMs()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetMessage sets the "message" field.
|
||||
func (_u *ChannelMonitorHistoryUpdate) SetMessage(v string) *ChannelMonitorHistoryUpdate {
|
||||
_u.mutation.SetMessage(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableMessage sets the "message" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorHistoryUpdate) SetNillableMessage(v *string) *ChannelMonitorHistoryUpdate {
|
||||
if v != nil {
|
||||
_u.SetMessage(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearMessage clears the value of the "message" field.
|
||||
func (_u *ChannelMonitorHistoryUpdate) ClearMessage() *ChannelMonitorHistoryUpdate {
|
||||
_u.mutation.ClearMessage()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetCheckedAt sets the "checked_at" field.
|
||||
func (_u *ChannelMonitorHistoryUpdate) SetCheckedAt(v time.Time) *ChannelMonitorHistoryUpdate {
|
||||
_u.mutation.SetCheckedAt(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableCheckedAt sets the "checked_at" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorHistoryUpdate) SetNillableCheckedAt(v *time.Time) *ChannelMonitorHistoryUpdate {
|
||||
if v != nil {
|
||||
_u.SetCheckedAt(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetMonitor sets the "monitor" edge to the ChannelMonitor entity.
|
||||
func (_u *ChannelMonitorHistoryUpdate) SetMonitor(v *ChannelMonitor) *ChannelMonitorHistoryUpdate {
|
||||
return _u.SetMonitorID(v.ID)
|
||||
}
|
||||
|
||||
// Mutation returns the ChannelMonitorHistoryMutation object of the builder.
|
||||
func (_u *ChannelMonitorHistoryUpdate) Mutation() *ChannelMonitorHistoryMutation {
|
||||
return _u.mutation
|
||||
}
|
||||
|
||||
// ClearMonitor clears the "monitor" edge to the ChannelMonitor entity.
|
||||
func (_u *ChannelMonitorHistoryUpdate) ClearMonitor() *ChannelMonitorHistoryUpdate {
|
||||
_u.mutation.ClearMonitor()
|
||||
return _u
|
||||
}
|
||||
|
||||
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||
func (_u *ChannelMonitorHistoryUpdate) Save(ctx context.Context) (int, error) {
|
||||
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (_u *ChannelMonitorHistoryUpdate) SaveX(ctx context.Context) int {
|
||||
affected, err := _u.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return affected
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (_u *ChannelMonitorHistoryUpdate) Exec(ctx context.Context) error {
|
||||
_, err := _u.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_u *ChannelMonitorHistoryUpdate) ExecX(ctx context.Context) {
|
||||
if err := _u.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (_u *ChannelMonitorHistoryUpdate) check() error {
|
||||
if v, ok := _u.mutation.Model(); ok {
|
||||
if err := channelmonitorhistory.ModelValidator(v); err != nil {
|
||||
return &ValidationError{Name: "model", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorHistory.model": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.Status(); ok {
|
||||
if err := channelmonitorhistory.StatusValidator(v); err != nil {
|
||||
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorHistory.status": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.Message(); ok {
|
||||
if err := channelmonitorhistory.MessageValidator(v); err != nil {
|
||||
return &ValidationError{Name: "message", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorHistory.message": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _u.mutation.MonitorCleared() && len(_u.mutation.MonitorIDs()) > 0 {
|
||||
return errors.New(`ent: clearing a required unique edge "ChannelMonitorHistory.monitor"`)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_u *ChannelMonitorHistoryUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
||||
if err := _u.check(); err != nil {
|
||||
return _node, err
|
||||
}
|
||||
_spec := sqlgraph.NewUpdateSpec(channelmonitorhistory.Table, channelmonitorhistory.Columns, sqlgraph.NewFieldSpec(channelmonitorhistory.FieldID, field.TypeInt64))
|
||||
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if value, ok := _u.mutation.Model(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldModel, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Status(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldStatus, field.TypeEnum, value)
|
||||
}
|
||||
if value, ok := _u.mutation.LatencyMs(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldLatencyMs, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedLatencyMs(); ok {
|
||||
_spec.AddField(channelmonitorhistory.FieldLatencyMs, field.TypeInt, value)
|
||||
}
|
||||
if _u.mutation.LatencyMsCleared() {
|
||||
_spec.ClearField(channelmonitorhistory.FieldLatencyMs, field.TypeInt)
|
||||
}
|
||||
if value, ok := _u.mutation.PingLatencyMs(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldPingLatencyMs, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedPingLatencyMs(); ok {
|
||||
_spec.AddField(channelmonitorhistory.FieldPingLatencyMs, field.TypeInt, value)
|
||||
}
|
||||
if _u.mutation.PingLatencyMsCleared() {
|
||||
_spec.ClearField(channelmonitorhistory.FieldPingLatencyMs, field.TypeInt)
|
||||
}
|
||||
if value, ok := _u.mutation.Message(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldMessage, field.TypeString, value)
|
||||
}
|
||||
if _u.mutation.MessageCleared() {
|
||||
_spec.ClearField(channelmonitorhistory.FieldMessage, field.TypeString)
|
||||
}
|
||||
if value, ok := _u.mutation.CheckedAt(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldCheckedAt, field.TypeTime, value)
|
||||
}
|
||||
if _u.mutation.MonitorCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: channelmonitorhistory.MonitorTable,
|
||||
Columns: []string{channelmonitorhistory.MonitorColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := _u.mutation.MonitorIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: channelmonitorhistory.MonitorTable,
|
||||
Columns: []string{channelmonitorhistory.MonitorColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{channelmonitorhistory.Label}
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
_u.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
|
||||
// ChannelMonitorHistoryUpdateOne is the builder for updating a single ChannelMonitorHistory entity.
|
||||
type ChannelMonitorHistoryUpdateOne struct {
|
||||
config
|
||||
fields []string
|
||||
hooks []Hook
|
||||
mutation *ChannelMonitorHistoryMutation
|
||||
}
|
||||
|
||||
// SetMonitorID sets the "monitor_id" field.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) SetMonitorID(v int64) *ChannelMonitorHistoryUpdateOne {
|
||||
_u.mutation.SetMonitorID(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableMonitorID sets the "monitor_id" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) SetNillableMonitorID(v *int64) *ChannelMonitorHistoryUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetMonitorID(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetModel sets the "model" field.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) SetModel(v string) *ChannelMonitorHistoryUpdateOne {
|
||||
_u.mutation.SetModel(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableModel sets the "model" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) SetNillableModel(v *string) *ChannelMonitorHistoryUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetModel(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetStatus sets the "status" field.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) SetStatus(v channelmonitorhistory.Status) *ChannelMonitorHistoryUpdateOne {
|
||||
_u.mutation.SetStatus(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableStatus sets the "status" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) SetNillableStatus(v *channelmonitorhistory.Status) *ChannelMonitorHistoryUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetStatus(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetLatencyMs sets the "latency_ms" field.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) SetLatencyMs(v int) *ChannelMonitorHistoryUpdateOne {
|
||||
_u.mutation.ResetLatencyMs()
|
||||
_u.mutation.SetLatencyMs(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableLatencyMs sets the "latency_ms" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) SetNillableLatencyMs(v *int) *ChannelMonitorHistoryUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetLatencyMs(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddLatencyMs adds value to the "latency_ms" field.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) AddLatencyMs(v int) *ChannelMonitorHistoryUpdateOne {
|
||||
_u.mutation.AddLatencyMs(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearLatencyMs clears the value of the "latency_ms" field.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) ClearLatencyMs() *ChannelMonitorHistoryUpdateOne {
|
||||
_u.mutation.ClearLatencyMs()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetPingLatencyMs sets the "ping_latency_ms" field.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) SetPingLatencyMs(v int) *ChannelMonitorHistoryUpdateOne {
|
||||
_u.mutation.ResetPingLatencyMs()
|
||||
_u.mutation.SetPingLatencyMs(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillablePingLatencyMs sets the "ping_latency_ms" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) SetNillablePingLatencyMs(v *int) *ChannelMonitorHistoryUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetPingLatencyMs(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddPingLatencyMs adds value to the "ping_latency_ms" field.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) AddPingLatencyMs(v int) *ChannelMonitorHistoryUpdateOne {
|
||||
_u.mutation.AddPingLatencyMs(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearPingLatencyMs clears the value of the "ping_latency_ms" field.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) ClearPingLatencyMs() *ChannelMonitorHistoryUpdateOne {
|
||||
_u.mutation.ClearPingLatencyMs()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetMessage sets the "message" field.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) SetMessage(v string) *ChannelMonitorHistoryUpdateOne {
|
||||
_u.mutation.SetMessage(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableMessage sets the "message" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) SetNillableMessage(v *string) *ChannelMonitorHistoryUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetMessage(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearMessage clears the value of the "message" field.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) ClearMessage() *ChannelMonitorHistoryUpdateOne {
|
||||
_u.mutation.ClearMessage()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetCheckedAt sets the "checked_at" field.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) SetCheckedAt(v time.Time) *ChannelMonitorHistoryUpdateOne {
|
||||
_u.mutation.SetCheckedAt(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableCheckedAt sets the "checked_at" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) SetNillableCheckedAt(v *time.Time) *ChannelMonitorHistoryUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetCheckedAt(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetMonitor sets the "monitor" edge to the ChannelMonitor entity.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) SetMonitor(v *ChannelMonitor) *ChannelMonitorHistoryUpdateOne {
|
||||
return _u.SetMonitorID(v.ID)
|
||||
}
|
||||
|
||||
// Mutation returns the ChannelMonitorHistoryMutation object of the builder.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) Mutation() *ChannelMonitorHistoryMutation {
|
||||
return _u.mutation
|
||||
}
|
||||
|
||||
// ClearMonitor clears the "monitor" edge to the ChannelMonitor entity.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) ClearMonitor() *ChannelMonitorHistoryUpdateOne {
|
||||
_u.mutation.ClearMonitor()
|
||||
return _u
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the ChannelMonitorHistoryUpdate builder.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) Where(ps ...predicate.ChannelMonitorHistory) *ChannelMonitorHistoryUpdateOne {
|
||||
_u.mutation.Where(ps...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||
// The default is selecting all fields defined in the entity schema.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) Select(field string, fields ...string) *ChannelMonitorHistoryUpdateOne {
|
||||
_u.fields = append([]string{field}, fields...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// Save executes the query and returns the updated ChannelMonitorHistory entity.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) Save(ctx context.Context) (*ChannelMonitorHistory, error) {
|
||||
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) SaveX(ctx context.Context) *ChannelMonitorHistory {
|
||||
node, err := _u.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// Exec executes the query on the entity.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) Exec(ctx context.Context) error {
|
||||
_, err := _u.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) ExecX(ctx context.Context) {
|
||||
if err := _u.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) check() error {
|
||||
if v, ok := _u.mutation.Model(); ok {
|
||||
if err := channelmonitorhistory.ModelValidator(v); err != nil {
|
||||
return &ValidationError{Name: "model", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorHistory.model": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.Status(); ok {
|
||||
if err := channelmonitorhistory.StatusValidator(v); err != nil {
|
||||
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorHistory.status": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.Message(); ok {
|
||||
if err := channelmonitorhistory.MessageValidator(v); err != nil {
|
||||
return &ValidationError{Name: "message", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorHistory.message": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _u.mutation.MonitorCleared() && len(_u.mutation.MonitorIDs()) > 0 {
|
||||
return errors.New(`ent: clearing a required unique edge "ChannelMonitorHistory.monitor"`)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) sqlSave(ctx context.Context) (_node *ChannelMonitorHistory, err error) {
|
||||
if err := _u.check(); err != nil {
|
||||
return _node, err
|
||||
}
|
||||
_spec := sqlgraph.NewUpdateSpec(channelmonitorhistory.Table, channelmonitorhistory.Columns, sqlgraph.NewFieldSpec(channelmonitorhistory.FieldID, field.TypeInt64))
|
||||
id, ok := _u.mutation.ID()
|
||||
if !ok {
|
||||
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "ChannelMonitorHistory.id" for update`)}
|
||||
}
|
||||
_spec.Node.ID.Value = id
|
||||
if fields := _u.fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, channelmonitorhistory.FieldID)
|
||||
for _, f := range fields {
|
||||
if !channelmonitorhistory.ValidColumn(f) {
|
||||
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
if f != channelmonitorhistory.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if value, ok := _u.mutation.Model(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldModel, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Status(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldStatus, field.TypeEnum, value)
|
||||
}
|
||||
if value, ok := _u.mutation.LatencyMs(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldLatencyMs, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedLatencyMs(); ok {
|
||||
_spec.AddField(channelmonitorhistory.FieldLatencyMs, field.TypeInt, value)
|
||||
}
|
||||
if _u.mutation.LatencyMsCleared() {
|
||||
_spec.ClearField(channelmonitorhistory.FieldLatencyMs, field.TypeInt)
|
||||
}
|
||||
if value, ok := _u.mutation.PingLatencyMs(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldPingLatencyMs, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedPingLatencyMs(); ok {
|
||||
_spec.AddField(channelmonitorhistory.FieldPingLatencyMs, field.TypeInt, value)
|
||||
}
|
||||
if _u.mutation.PingLatencyMsCleared() {
|
||||
_spec.ClearField(channelmonitorhistory.FieldPingLatencyMs, field.TypeInt)
|
||||
}
|
||||
if value, ok := _u.mutation.Message(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldMessage, field.TypeString, value)
|
||||
}
|
||||
if _u.mutation.MessageCleared() {
|
||||
_spec.ClearField(channelmonitorhistory.FieldMessage, field.TypeString)
|
||||
}
|
||||
if value, ok := _u.mutation.CheckedAt(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldCheckedAt, field.TypeTime, value)
|
||||
}
|
||||
if _u.mutation.MonitorCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: channelmonitorhistory.MonitorTable,
|
||||
Columns: []string{channelmonitorhistory.MonitorColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := _u.mutation.MonitorIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: channelmonitorhistory.MonitorTable,
|
||||
Columns: []string{channelmonitorhistory.MonitorColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
_node = &ChannelMonitorHistory{config: _u.config}
|
||||
_spec.Assign = _node.assignValues
|
||||
_spec.ScanValues = _node.scanValues
|
||||
if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{channelmonitorhistory.Label}
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
_u.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
216
backend/ent/channelmonitorrequesttemplate.go
Normal file
216
backend/ent/channelmonitorrequesttemplate.go
Normal file
@@ -0,0 +1,216 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorrequesttemplate"
|
||||
)
|
||||
|
||||
// ChannelMonitorRequestTemplate is the model entity for the ChannelMonitorRequestTemplate schema.
|
||||
type ChannelMonitorRequestTemplate struct {
|
||||
config `json:"-"`
|
||||
// ID of the ent.
|
||||
ID int64 `json:"id,omitempty"`
|
||||
// CreatedAt holds the value of the "created_at" field.
|
||||
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// UpdatedAt holds the value of the "updated_at" field.
|
||||
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||
// Name holds the value of the "name" field.
|
||||
Name string `json:"name,omitempty"`
|
||||
// Provider holds the value of the "provider" field.
|
||||
Provider channelmonitorrequesttemplate.Provider `json:"provider,omitempty"`
|
||||
// Description holds the value of the "description" field.
|
||||
Description string `json:"description,omitempty"`
|
||||
// ExtraHeaders holds the value of the "extra_headers" field.
|
||||
ExtraHeaders map[string]string `json:"extra_headers,omitempty"`
|
||||
// BodyOverrideMode holds the value of the "body_override_mode" field.
|
||||
BodyOverrideMode string `json:"body_override_mode,omitempty"`
|
||||
// BodyOverride holds the value of the "body_override" field.
|
||||
BodyOverride map[string]interface{} `json:"body_override,omitempty"`
|
||||
// Edges holds the relations/edges for other nodes in the graph.
|
||||
// The values are being populated by the ChannelMonitorRequestTemplateQuery when eager-loading is set.
|
||||
Edges ChannelMonitorRequestTemplateEdges `json:"edges"`
|
||||
selectValues sql.SelectValues
|
||||
}
|
||||
|
||||
// ChannelMonitorRequestTemplateEdges holds the relations/edges for other nodes in the graph.
|
||||
type ChannelMonitorRequestTemplateEdges struct {
|
||||
// Monitors holds the value of the monitors edge.
|
||||
Monitors []*ChannelMonitor `json:"monitors,omitempty"`
|
||||
// loadedTypes holds the information for reporting if a
|
||||
// type was loaded (or requested) in eager-loading or not.
|
||||
loadedTypes [1]bool
|
||||
}
|
||||
|
||||
// MonitorsOrErr returns the Monitors value or an error if the edge
|
||||
// was not loaded in eager-loading.
|
||||
func (e ChannelMonitorRequestTemplateEdges) MonitorsOrErr() ([]*ChannelMonitor, error) {
|
||||
if e.loadedTypes[0] {
|
||||
return e.Monitors, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "monitors"}
|
||||
}
|
||||
|
||||
// scanValues returns the types for scanning values from sql.Rows.
|
||||
func (*ChannelMonitorRequestTemplate) scanValues(columns []string) ([]any, error) {
|
||||
values := make([]any, len(columns))
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case channelmonitorrequesttemplate.FieldExtraHeaders, channelmonitorrequesttemplate.FieldBodyOverride:
|
||||
values[i] = new([]byte)
|
||||
case channelmonitorrequesttemplate.FieldID:
|
||||
values[i] = new(sql.NullInt64)
|
||||
case channelmonitorrequesttemplate.FieldName, channelmonitorrequesttemplate.FieldProvider, channelmonitorrequesttemplate.FieldDescription, channelmonitorrequesttemplate.FieldBodyOverrideMode:
|
||||
values[i] = new(sql.NullString)
|
||||
case channelmonitorrequesttemplate.FieldCreatedAt, channelmonitorrequesttemplate.FieldUpdatedAt:
|
||||
values[i] = new(sql.NullTime)
|
||||
default:
|
||||
values[i] = new(sql.UnknownType)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||
// to the ChannelMonitorRequestTemplate fields.
|
||||
func (_m *ChannelMonitorRequestTemplate) assignValues(columns []string, values []any) error {
|
||||
if m, n := len(values), len(columns); m < n {
|
||||
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||
}
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case channelmonitorrequesttemplate.FieldID:
|
||||
value, ok := values[i].(*sql.NullInt64)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected type %T for field id", value)
|
||||
}
|
||||
_m.ID = int64(value.Int64)
|
||||
case channelmonitorrequesttemplate.FieldCreatedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||
} else if value.Valid {
|
||||
_m.CreatedAt = value.Time
|
||||
}
|
||||
case channelmonitorrequesttemplate.FieldUpdatedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
|
||||
} else if value.Valid {
|
||||
_m.UpdatedAt = value.Time
|
||||
}
|
||||
case channelmonitorrequesttemplate.FieldName:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field name", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Name = value.String
|
||||
}
|
||||
case channelmonitorrequesttemplate.FieldProvider:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field provider", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Provider = channelmonitorrequesttemplate.Provider(value.String)
|
||||
}
|
||||
case channelmonitorrequesttemplate.FieldDescription:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field description", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Description = value.String
|
||||
}
|
||||
case channelmonitorrequesttemplate.FieldExtraHeaders:
|
||||
if value, ok := values[i].(*[]byte); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field extra_headers", values[i])
|
||||
} else if value != nil && len(*value) > 0 {
|
||||
if err := json.Unmarshal(*value, &_m.ExtraHeaders); err != nil {
|
||||
return fmt.Errorf("unmarshal field extra_headers: %w", err)
|
||||
}
|
||||
}
|
||||
case channelmonitorrequesttemplate.FieldBodyOverrideMode:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field body_override_mode", values[i])
|
||||
} else if value.Valid {
|
||||
_m.BodyOverrideMode = value.String
|
||||
}
|
||||
case channelmonitorrequesttemplate.FieldBodyOverride:
|
||||
if value, ok := values[i].(*[]byte); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field body_override", values[i])
|
||||
} else if value != nil && len(*value) > 0 {
|
||||
if err := json.Unmarshal(*value, &_m.BodyOverride); err != nil {
|
||||
return fmt.Errorf("unmarshal field body_override: %w", err)
|
||||
}
|
||||
}
|
||||
default:
|
||||
_m.selectValues.Set(columns[i], values[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value returns the ent.Value that was dynamically selected and assigned to the ChannelMonitorRequestTemplate.
|
||||
// This includes values selected through modifiers, order, etc.
|
||||
func (_m *ChannelMonitorRequestTemplate) Value(name string) (ent.Value, error) {
|
||||
return _m.selectValues.Get(name)
|
||||
}
|
||||
|
||||
// QueryMonitors queries the "monitors" edge of the ChannelMonitorRequestTemplate entity.
|
||||
func (_m *ChannelMonitorRequestTemplate) QueryMonitors() *ChannelMonitorQuery {
|
||||
return NewChannelMonitorRequestTemplateClient(_m.config).QueryMonitors(_m)
|
||||
}
|
||||
|
||||
// Update returns a builder for updating this ChannelMonitorRequestTemplate.
|
||||
// Note that you need to call ChannelMonitorRequestTemplate.Unwrap() before calling this method if this ChannelMonitorRequestTemplate
|
||||
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||
func (_m *ChannelMonitorRequestTemplate) Update() *ChannelMonitorRequestTemplateUpdateOne {
|
||||
return NewChannelMonitorRequestTemplateClient(_m.config).UpdateOne(_m)
|
||||
}
|
||||
|
||||
// Unwrap unwraps the ChannelMonitorRequestTemplate entity that was returned from a transaction after it was closed,
|
||||
// so that all future queries will be executed through the driver which created the transaction.
|
||||
func (_m *ChannelMonitorRequestTemplate) Unwrap() *ChannelMonitorRequestTemplate {
|
||||
_tx, ok := _m.config.driver.(*txDriver)
|
||||
if !ok {
|
||||
panic("ent: ChannelMonitorRequestTemplate is not a transactional entity")
|
||||
}
|
||||
_m.config.driver = _tx.drv
|
||||
return _m
|
||||
}
|
||||
|
||||
// String implements the fmt.Stringer.
|
||||
func (_m *ChannelMonitorRequestTemplate) String() string {
|
||||
var builder strings.Builder
|
||||
builder.WriteString("ChannelMonitorRequestTemplate(")
|
||||
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
||||
builder.WriteString("created_at=")
|
||||
builder.WriteString(_m.CreatedAt.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("updated_at=")
|
||||
builder.WriteString(_m.UpdatedAt.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("name=")
|
||||
builder.WriteString(_m.Name)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("provider=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.Provider))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("description=")
|
||||
builder.WriteString(_m.Description)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("extra_headers=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.ExtraHeaders))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("body_override_mode=")
|
||||
builder.WriteString(_m.BodyOverrideMode)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("body_override=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.BodyOverride))
|
||||
builder.WriteByte(')')
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
// ChannelMonitorRequestTemplates is a parsable slice of ChannelMonitorRequestTemplate.
|
||||
type ChannelMonitorRequestTemplates []*ChannelMonitorRequestTemplate
|
||||
@@ -0,0 +1,172 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package channelmonitorrequesttemplate
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
)
|
||||
|
||||
const (
|
||||
// Label holds the string label denoting the channelmonitorrequesttemplate type in the database.
|
||||
Label = "channel_monitor_request_template"
|
||||
// FieldID holds the string denoting the id field in the database.
|
||||
FieldID = "id"
|
||||
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||
FieldCreatedAt = "created_at"
|
||||
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
|
||||
FieldUpdatedAt = "updated_at"
|
||||
// FieldName holds the string denoting the name field in the database.
|
||||
FieldName = "name"
|
||||
// FieldProvider holds the string denoting the provider field in the database.
|
||||
FieldProvider = "provider"
|
||||
// FieldDescription holds the string denoting the description field in the database.
|
||||
FieldDescription = "description"
|
||||
// FieldExtraHeaders holds the string denoting the extra_headers field in the database.
|
||||
FieldExtraHeaders = "extra_headers"
|
||||
// FieldBodyOverrideMode holds the string denoting the body_override_mode field in the database.
|
||||
FieldBodyOverrideMode = "body_override_mode"
|
||||
// FieldBodyOverride holds the string denoting the body_override field in the database.
|
||||
FieldBodyOverride = "body_override"
|
||||
// EdgeMonitors holds the string denoting the monitors edge name in mutations.
|
||||
EdgeMonitors = "monitors"
|
||||
// Table holds the table name of the channelmonitorrequesttemplate in the database.
|
||||
Table = "channel_monitor_request_templates"
|
||||
// MonitorsTable is the table that holds the monitors relation/edge.
|
||||
MonitorsTable = "channel_monitors"
|
||||
// MonitorsInverseTable is the table name for the ChannelMonitor entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "channelmonitor" package.
|
||||
MonitorsInverseTable = "channel_monitors"
|
||||
// MonitorsColumn is the table column denoting the monitors relation/edge.
|
||||
MonitorsColumn = "template_id"
|
||||
)
|
||||
|
||||
// Columns holds all SQL columns for channelmonitorrequesttemplate fields.
|
||||
var Columns = []string{
|
||||
FieldID,
|
||||
FieldCreatedAt,
|
||||
FieldUpdatedAt,
|
||||
FieldName,
|
||||
FieldProvider,
|
||||
FieldDescription,
|
||||
FieldExtraHeaders,
|
||||
FieldBodyOverrideMode,
|
||||
FieldBodyOverride,
|
||||
}
|
||||
|
||||
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||
func ValidColumn(column string) bool {
|
||||
for i := range Columns {
|
||||
if column == Columns[i] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var (
|
||||
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||
DefaultCreatedAt func() time.Time
|
||||
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
|
||||
DefaultUpdatedAt func() time.Time
|
||||
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
|
||||
UpdateDefaultUpdatedAt func() time.Time
|
||||
// NameValidator is a validator for the "name" field. It is called by the builders before save.
|
||||
NameValidator func(string) error
|
||||
// DefaultDescription holds the default value on creation for the "description" field.
|
||||
DefaultDescription string
|
||||
// DescriptionValidator is a validator for the "description" field. It is called by the builders before save.
|
||||
DescriptionValidator func(string) error
|
||||
// DefaultExtraHeaders holds the default value on creation for the "extra_headers" field.
|
||||
DefaultExtraHeaders map[string]string
|
||||
// DefaultBodyOverrideMode holds the default value on creation for the "body_override_mode" field.
|
||||
DefaultBodyOverrideMode string
|
||||
// BodyOverrideModeValidator is a validator for the "body_override_mode" field. It is called by the builders before save.
|
||||
BodyOverrideModeValidator func(string) error
|
||||
)
|
||||
|
||||
// Provider defines the type for the "provider" enum field.
|
||||
type Provider string
|
||||
|
||||
// Provider values.
|
||||
const (
|
||||
ProviderOpenai Provider = "openai"
|
||||
ProviderAnthropic Provider = "anthropic"
|
||||
ProviderGemini Provider = "gemini"
|
||||
)
|
||||
|
||||
func (pr Provider) String() string {
|
||||
return string(pr)
|
||||
}
|
||||
|
||||
// ProviderValidator is a validator for the "provider" field enum values. It is called by the builders before save.
|
||||
func ProviderValidator(pr Provider) error {
|
||||
switch pr {
|
||||
case ProviderOpenai, ProviderAnthropic, ProviderGemini:
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("channelmonitorrequesttemplate: invalid enum value for provider field: %q", pr)
|
||||
}
|
||||
}
|
||||
|
||||
// OrderOption defines the ordering options for the ChannelMonitorRequestTemplate queries.
|
||||
type OrderOption func(*sql.Selector)
|
||||
|
||||
// ByID orders the results by the id field.
|
||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCreatedAt orders the results by the created_at field.
|
||||
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUpdatedAt orders the results by the updated_at field.
|
||||
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByName orders the results by the name field.
|
||||
func ByName(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldName, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByProvider orders the results by the provider field.
|
||||
func ByProvider(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldProvider, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByDescription orders the results by the description field.
|
||||
func ByDescription(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldDescription, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByBodyOverrideMode orders the results by the body_override_mode field.
|
||||
func ByBodyOverrideMode(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldBodyOverrideMode, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByMonitorsCount orders the results by monitors count.
|
||||
func ByMonitorsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborsCount(s, newMonitorsStep(), opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByMonitors orders the results by monitors terms.
|
||||
func ByMonitors(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newMonitorsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||
}
|
||||
}
|
||||
func newMonitorsStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(MonitorsInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, true, MonitorsTable, MonitorsColumn),
|
||||
)
|
||||
}
|
||||
434
backend/ent/channelmonitorrequesttemplate/where.go
Normal file
434
backend/ent/channelmonitorrequesttemplate/where.go
Normal file
@@ -0,0 +1,434 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package channelmonitorrequesttemplate
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ID filters vertices based on their ID field.
|
||||
func ID(id int64) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDEQ applies the EQ predicate on the ID field.
|
||||
func IDEQ(id int64) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDNEQ applies the NEQ predicate on the ID field.
|
||||
func IDNEQ(id int64) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldNEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDIn applies the In predicate on the ID field.
|
||||
func IDIn(ids ...int64) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDNotIn applies the NotIn predicate on the ID field.
|
||||
func IDNotIn(ids ...int64) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldNotIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDGT applies the GT predicate on the ID field.
|
||||
func IDGT(id int64) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldGT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDGTE applies the GTE predicate on the ID field.
|
||||
func IDGTE(id int64) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldGTE(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLT applies the LT predicate on the ID field.
|
||||
func IDLT(id int64) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldLT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLTE applies the LTE predicate on the ID field.
|
||||
func IDLTE(id int64) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldLTE(FieldID, id))
|
||||
}
|
||||
|
||||
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
||||
func CreatedAt(v time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
|
||||
func UpdatedAt(v time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
|
||||
func Name(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldEQ(FieldName, v))
|
||||
}
|
||||
|
||||
// Description applies equality check predicate on the "description" field. It's identical to DescriptionEQ.
|
||||
func Description(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldEQ(FieldDescription, v))
|
||||
}
|
||||
|
||||
// BodyOverrideMode applies equality check predicate on the "body_override_mode" field. It's identical to BodyOverrideModeEQ.
|
||||
func BodyOverrideMode(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldEQ(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||
func CreatedAtEQ(v time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
|
||||
func CreatedAtNEQ(v time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldNEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtIn applies the In predicate on the "created_at" field.
|
||||
func CreatedAtIn(vs ...time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldIn(FieldCreatedAt, vs...))
|
||||
}
|
||||
|
||||
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
|
||||
func CreatedAtNotIn(vs ...time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldNotIn(FieldCreatedAt, vs...))
|
||||
}
|
||||
|
||||
// CreatedAtGT applies the GT predicate on the "created_at" field.
|
||||
func CreatedAtGT(v time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldGT(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
|
||||
func CreatedAtGTE(v time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldGTE(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtLT applies the LT predicate on the "created_at" field.
|
||||
func CreatedAtLT(v time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldLT(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
|
||||
func CreatedAtLTE(v time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldLTE(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
|
||||
func UpdatedAtEQ(v time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
|
||||
func UpdatedAtNEQ(v time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldNEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtIn applies the In predicate on the "updated_at" field.
|
||||
func UpdatedAtIn(vs ...time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldIn(FieldUpdatedAt, vs...))
|
||||
}
|
||||
|
||||
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
|
||||
func UpdatedAtNotIn(vs ...time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldNotIn(FieldUpdatedAt, vs...))
|
||||
}
|
||||
|
||||
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
|
||||
func UpdatedAtGT(v time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldGT(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
|
||||
func UpdatedAtGTE(v time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldGTE(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
|
||||
func UpdatedAtLT(v time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldLT(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
|
||||
func UpdatedAtLTE(v time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldLTE(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// NameEQ applies the EQ predicate on the "name" field.
|
||||
func NameEQ(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldEQ(FieldName, v))
|
||||
}
|
||||
|
||||
// NameNEQ applies the NEQ predicate on the "name" field.
|
||||
func NameNEQ(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldNEQ(FieldName, v))
|
||||
}
|
||||
|
||||
// NameIn applies the In predicate on the "name" field.
|
||||
func NameIn(vs ...string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldIn(FieldName, vs...))
|
||||
}
|
||||
|
||||
// NameNotIn applies the NotIn predicate on the "name" field.
|
||||
func NameNotIn(vs ...string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldNotIn(FieldName, vs...))
|
||||
}
|
||||
|
||||
// NameGT applies the GT predicate on the "name" field.
|
||||
func NameGT(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldGT(FieldName, v))
|
||||
}
|
||||
|
||||
// NameGTE applies the GTE predicate on the "name" field.
|
||||
func NameGTE(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldGTE(FieldName, v))
|
||||
}
|
||||
|
||||
// NameLT applies the LT predicate on the "name" field.
|
||||
func NameLT(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldLT(FieldName, v))
|
||||
}
|
||||
|
||||
// NameLTE applies the LTE predicate on the "name" field.
|
||||
func NameLTE(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldLTE(FieldName, v))
|
||||
}
|
||||
|
||||
// NameContains applies the Contains predicate on the "name" field.
|
||||
func NameContains(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldContains(FieldName, v))
|
||||
}
|
||||
|
||||
// NameHasPrefix applies the HasPrefix predicate on the "name" field.
|
||||
func NameHasPrefix(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldHasPrefix(FieldName, v))
|
||||
}
|
||||
|
||||
// NameHasSuffix applies the HasSuffix predicate on the "name" field.
|
||||
func NameHasSuffix(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldHasSuffix(FieldName, v))
|
||||
}
|
||||
|
||||
// NameEqualFold applies the EqualFold predicate on the "name" field.
|
||||
func NameEqualFold(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldEqualFold(FieldName, v))
|
||||
}
|
||||
|
||||
// NameContainsFold applies the ContainsFold predicate on the "name" field.
|
||||
func NameContainsFold(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldContainsFold(FieldName, v))
|
||||
}
|
||||
|
||||
// ProviderEQ applies the EQ predicate on the "provider" field.
|
||||
func ProviderEQ(v Provider) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldEQ(FieldProvider, v))
|
||||
}
|
||||
|
||||
// ProviderNEQ applies the NEQ predicate on the "provider" field.
|
||||
func ProviderNEQ(v Provider) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldNEQ(FieldProvider, v))
|
||||
}
|
||||
|
||||
// ProviderIn applies the In predicate on the "provider" field.
|
||||
func ProviderIn(vs ...Provider) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldIn(FieldProvider, vs...))
|
||||
}
|
||||
|
||||
// ProviderNotIn applies the NotIn predicate on the "provider" field.
|
||||
func ProviderNotIn(vs ...Provider) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldNotIn(FieldProvider, vs...))
|
||||
}
|
||||
|
||||
// DescriptionEQ applies the EQ predicate on the "description" field.
|
||||
func DescriptionEQ(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldEQ(FieldDescription, v))
|
||||
}
|
||||
|
||||
// DescriptionNEQ applies the NEQ predicate on the "description" field.
|
||||
func DescriptionNEQ(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldNEQ(FieldDescription, v))
|
||||
}
|
||||
|
||||
// DescriptionIn applies the In predicate on the "description" field.
|
||||
func DescriptionIn(vs ...string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldIn(FieldDescription, vs...))
|
||||
}
|
||||
|
||||
// DescriptionNotIn applies the NotIn predicate on the "description" field.
|
||||
func DescriptionNotIn(vs ...string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldNotIn(FieldDescription, vs...))
|
||||
}
|
||||
|
||||
// DescriptionGT applies the GT predicate on the "description" field.
|
||||
func DescriptionGT(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldGT(FieldDescription, v))
|
||||
}
|
||||
|
||||
// DescriptionGTE applies the GTE predicate on the "description" field.
|
||||
func DescriptionGTE(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldGTE(FieldDescription, v))
|
||||
}
|
||||
|
||||
// DescriptionLT applies the LT predicate on the "description" field.
|
||||
func DescriptionLT(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldLT(FieldDescription, v))
|
||||
}
|
||||
|
||||
// DescriptionLTE applies the LTE predicate on the "description" field.
|
||||
func DescriptionLTE(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldLTE(FieldDescription, v))
|
||||
}
|
||||
|
||||
// DescriptionContains applies the Contains predicate on the "description" field.
|
||||
func DescriptionContains(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldContains(FieldDescription, v))
|
||||
}
|
||||
|
||||
// DescriptionHasPrefix applies the HasPrefix predicate on the "description" field.
|
||||
func DescriptionHasPrefix(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldHasPrefix(FieldDescription, v))
|
||||
}
|
||||
|
||||
// DescriptionHasSuffix applies the HasSuffix predicate on the "description" field.
|
||||
func DescriptionHasSuffix(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldHasSuffix(FieldDescription, v))
|
||||
}
|
||||
|
||||
// DescriptionIsNil applies the IsNil predicate on the "description" field.
|
||||
func DescriptionIsNil() predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldIsNull(FieldDescription))
|
||||
}
|
||||
|
||||
// DescriptionNotNil applies the NotNil predicate on the "description" field.
|
||||
func DescriptionNotNil() predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldNotNull(FieldDescription))
|
||||
}
|
||||
|
||||
// DescriptionEqualFold applies the EqualFold predicate on the "description" field.
|
||||
func DescriptionEqualFold(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldEqualFold(FieldDescription, v))
|
||||
}
|
||||
|
||||
// DescriptionContainsFold applies the ContainsFold predicate on the "description" field.
|
||||
func DescriptionContainsFold(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldContainsFold(FieldDescription, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeEQ applies the EQ predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeEQ(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldEQ(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeNEQ applies the NEQ predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeNEQ(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldNEQ(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeIn applies the In predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeIn(vs ...string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldIn(FieldBodyOverrideMode, vs...))
|
||||
}
|
||||
|
||||
// BodyOverrideModeNotIn applies the NotIn predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeNotIn(vs ...string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldNotIn(FieldBodyOverrideMode, vs...))
|
||||
}
|
||||
|
||||
// BodyOverrideModeGT applies the GT predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeGT(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldGT(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeGTE applies the GTE predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeGTE(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldGTE(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeLT applies the LT predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeLT(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldLT(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeLTE applies the LTE predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeLTE(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldLTE(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeContains applies the Contains predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeContains(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldContains(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeHasPrefix applies the HasPrefix predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeHasPrefix(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldHasPrefix(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeHasSuffix applies the HasSuffix predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeHasSuffix(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldHasSuffix(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeEqualFold applies the EqualFold predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeEqualFold(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldEqualFold(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeContainsFold applies the ContainsFold predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeContainsFold(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldContainsFold(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideIsNil applies the IsNil predicate on the "body_override" field.
|
||||
func BodyOverrideIsNil() predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldIsNull(FieldBodyOverride))
|
||||
}
|
||||
|
||||
// BodyOverrideNotNil applies the NotNil predicate on the "body_override" field.
|
||||
func BodyOverrideNotNil() predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldNotNull(FieldBodyOverride))
|
||||
}
|
||||
|
||||
// HasMonitors applies the HasEdge predicate on the "monitors" edge.
|
||||
func HasMonitors() predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, true, MonitorsTable, MonitorsColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasMonitorsWith applies the HasEdge predicate on the "monitors" edge with a given conditions (other predicates).
|
||||
func HasMonitorsWith(preds ...predicate.ChannelMonitor) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(func(s *sql.Selector) {
|
||||
step := newMonitorsStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// And groups predicates with the AND operator between them.
|
||||
func And(predicates ...predicate.ChannelMonitorRequestTemplate) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.AndPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Or groups predicates with the OR operator between them.
|
||||
func Or(predicates ...predicate.ChannelMonitorRequestTemplate) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.OrPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Not applies the not operator on the given predicate.
|
||||
func Not(p predicate.ChannelMonitorRequestTemplate) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.NotPredicates(p))
|
||||
}
|
||||
942
backend/ent/channelmonitorrequesttemplate_create.go
Normal file
942
backend/ent/channelmonitorrequesttemplate_create.go
Normal file
@@ -0,0 +1,942 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorrequesttemplate"
|
||||
)
|
||||
|
||||
// ChannelMonitorRequestTemplateCreate is the builder for creating a ChannelMonitorRequestTemplate entity.
|
||||
type ChannelMonitorRequestTemplateCreate struct {
|
||||
config
|
||||
mutation *ChannelMonitorRequestTemplateMutation
|
||||
hooks []Hook
|
||||
conflict []sql.ConflictOption
|
||||
}
|
||||
|
||||
// SetCreatedAt sets the "created_at" field.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) SetCreatedAt(v time.Time) *ChannelMonitorRequestTemplateCreate {
|
||||
_c.mutation.SetCreatedAt(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) SetNillableCreatedAt(v *time.Time) *ChannelMonitorRequestTemplateCreate {
|
||||
if v != nil {
|
||||
_c.SetCreatedAt(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) SetUpdatedAt(v time.Time) *ChannelMonitorRequestTemplateCreate {
|
||||
_c.mutation.SetUpdatedAt(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) SetNillableUpdatedAt(v *time.Time) *ChannelMonitorRequestTemplateCreate {
|
||||
if v != nil {
|
||||
_c.SetUpdatedAt(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetName sets the "name" field.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) SetName(v string) *ChannelMonitorRequestTemplateCreate {
|
||||
_c.mutation.SetName(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetProvider sets the "provider" field.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) SetProvider(v channelmonitorrequesttemplate.Provider) *ChannelMonitorRequestTemplateCreate {
|
||||
_c.mutation.SetProvider(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetDescription sets the "description" field.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) SetDescription(v string) *ChannelMonitorRequestTemplateCreate {
|
||||
_c.mutation.SetDescription(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableDescription sets the "description" field if the given value is not nil.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) SetNillableDescription(v *string) *ChannelMonitorRequestTemplateCreate {
|
||||
if v != nil {
|
||||
_c.SetDescription(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetExtraHeaders sets the "extra_headers" field.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) SetExtraHeaders(v map[string]string) *ChannelMonitorRequestTemplateCreate {
|
||||
_c.mutation.SetExtraHeaders(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetBodyOverrideMode sets the "body_override_mode" field.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) SetBodyOverrideMode(v string) *ChannelMonitorRequestTemplateCreate {
|
||||
_c.mutation.SetBodyOverrideMode(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableBodyOverrideMode sets the "body_override_mode" field if the given value is not nil.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) SetNillableBodyOverrideMode(v *string) *ChannelMonitorRequestTemplateCreate {
|
||||
if v != nil {
|
||||
_c.SetBodyOverrideMode(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetBodyOverride sets the "body_override" field.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) SetBodyOverride(v map[string]interface{}) *ChannelMonitorRequestTemplateCreate {
|
||||
_c.mutation.SetBodyOverride(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// AddMonitorIDs adds the "monitors" edge to the ChannelMonitor entity by IDs.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) AddMonitorIDs(ids ...int64) *ChannelMonitorRequestTemplateCreate {
|
||||
_c.mutation.AddMonitorIDs(ids...)
|
||||
return _c
|
||||
}
|
||||
|
||||
// AddMonitors adds the "monitors" edges to the ChannelMonitor entity.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) AddMonitors(v ...*ChannelMonitor) *ChannelMonitorRequestTemplateCreate {
|
||||
ids := make([]int64, len(v))
|
||||
for i := range v {
|
||||
ids[i] = v[i].ID
|
||||
}
|
||||
return _c.AddMonitorIDs(ids...)
|
||||
}
|
||||
|
||||
// Mutation returns the ChannelMonitorRequestTemplateMutation object of the builder.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) Mutation() *ChannelMonitorRequestTemplateMutation {
|
||||
return _c.mutation
|
||||
}
|
||||
|
||||
// Save creates the ChannelMonitorRequestTemplate in the database.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) Save(ctx context.Context) (*ChannelMonitorRequestTemplate, error) {
|
||||
_c.defaults()
|
||||
return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks)
|
||||
}
|
||||
|
||||
// SaveX calls Save and panics if Save returns an error.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) SaveX(ctx context.Context) *ChannelMonitorRequestTemplate {
|
||||
v, err := _c.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) Exec(ctx context.Context) error {
|
||||
_, err := _c.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) ExecX(ctx context.Context) {
|
||||
if err := _c.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) defaults() {
|
||||
if _, ok := _c.mutation.CreatedAt(); !ok {
|
||||
v := channelmonitorrequesttemplate.DefaultCreatedAt()
|
||||
_c.mutation.SetCreatedAt(v)
|
||||
}
|
||||
if _, ok := _c.mutation.UpdatedAt(); !ok {
|
||||
v := channelmonitorrequesttemplate.DefaultUpdatedAt()
|
||||
_c.mutation.SetUpdatedAt(v)
|
||||
}
|
||||
if _, ok := _c.mutation.Description(); !ok {
|
||||
v := channelmonitorrequesttemplate.DefaultDescription
|
||||
_c.mutation.SetDescription(v)
|
||||
}
|
||||
if _, ok := _c.mutation.ExtraHeaders(); !ok {
|
||||
v := channelmonitorrequesttemplate.DefaultExtraHeaders
|
||||
_c.mutation.SetExtraHeaders(v)
|
||||
}
|
||||
if _, ok := _c.mutation.BodyOverrideMode(); !ok {
|
||||
v := channelmonitorrequesttemplate.DefaultBodyOverrideMode
|
||||
_c.mutation.SetBodyOverrideMode(v)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) check() error {
|
||||
if _, ok := _c.mutation.CreatedAt(); !ok {
|
||||
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "ChannelMonitorRequestTemplate.created_at"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.UpdatedAt(); !ok {
|
||||
return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "ChannelMonitorRequestTemplate.updated_at"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.Name(); !ok {
|
||||
return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "ChannelMonitorRequestTemplate.name"`)}
|
||||
}
|
||||
if v, ok := _c.mutation.Name(); ok {
|
||||
if err := channelmonitorrequesttemplate.NameValidator(v); err != nil {
|
||||
return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorRequestTemplate.name": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := _c.mutation.Provider(); !ok {
|
||||
return &ValidationError{Name: "provider", err: errors.New(`ent: missing required field "ChannelMonitorRequestTemplate.provider"`)}
|
||||
}
|
||||
if v, ok := _c.mutation.Provider(); ok {
|
||||
if err := channelmonitorrequesttemplate.ProviderValidator(v); err != nil {
|
||||
return &ValidationError{Name: "provider", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorRequestTemplate.provider": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _c.mutation.Description(); ok {
|
||||
if err := channelmonitorrequesttemplate.DescriptionValidator(v); err != nil {
|
||||
return &ValidationError{Name: "description", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorRequestTemplate.description": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := _c.mutation.ExtraHeaders(); !ok {
|
||||
return &ValidationError{Name: "extra_headers", err: errors.New(`ent: missing required field "ChannelMonitorRequestTemplate.extra_headers"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.BodyOverrideMode(); !ok {
|
||||
return &ValidationError{Name: "body_override_mode", err: errors.New(`ent: missing required field "ChannelMonitorRequestTemplate.body_override_mode"`)}
|
||||
}
|
||||
if v, ok := _c.mutation.BodyOverrideMode(); ok {
|
||||
if err := channelmonitorrequesttemplate.BodyOverrideModeValidator(v); err != nil {
|
||||
return &ValidationError{Name: "body_override_mode", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorRequestTemplate.body_override_mode": %w`, err)}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) sqlSave(ctx context.Context) (*ChannelMonitorRequestTemplate, error) {
|
||||
if err := _c.check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_node, _spec := _c.createSpec()
|
||||
if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
id := _spec.ID.Value.(int64)
|
||||
_node.ID = int64(id)
|
||||
_c.mutation.id = &_node.ID
|
||||
_c.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) createSpec() (*ChannelMonitorRequestTemplate, *sqlgraph.CreateSpec) {
|
||||
var (
|
||||
_node = &ChannelMonitorRequestTemplate{config: _c.config}
|
||||
_spec = sqlgraph.NewCreateSpec(channelmonitorrequesttemplate.Table, sqlgraph.NewFieldSpec(channelmonitorrequesttemplate.FieldID, field.TypeInt64))
|
||||
)
|
||||
_spec.OnConflict = _c.conflict
|
||||
if value, ok := _c.mutation.CreatedAt(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldCreatedAt, field.TypeTime, value)
|
||||
_node.CreatedAt = value
|
||||
}
|
||||
if value, ok := _c.mutation.UpdatedAt(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldUpdatedAt, field.TypeTime, value)
|
||||
_node.UpdatedAt = value
|
||||
}
|
||||
if value, ok := _c.mutation.Name(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldName, field.TypeString, value)
|
||||
_node.Name = value
|
||||
}
|
||||
if value, ok := _c.mutation.Provider(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldProvider, field.TypeEnum, value)
|
||||
_node.Provider = value
|
||||
}
|
||||
if value, ok := _c.mutation.Description(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldDescription, field.TypeString, value)
|
||||
_node.Description = value
|
||||
}
|
||||
if value, ok := _c.mutation.ExtraHeaders(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldExtraHeaders, field.TypeJSON, value)
|
||||
_node.ExtraHeaders = value
|
||||
}
|
||||
if value, ok := _c.mutation.BodyOverrideMode(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldBodyOverrideMode, field.TypeString, value)
|
||||
_node.BodyOverrideMode = value
|
||||
}
|
||||
if value, ok := _c.mutation.BodyOverride(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldBodyOverride, field.TypeJSON, value)
|
||||
_node.BodyOverride = value
|
||||
}
|
||||
if nodes := _c.mutation.MonitorsIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: true,
|
||||
Table: channelmonitorrequesttemplate.MonitorsTable,
|
||||
Columns: []string{channelmonitorrequesttemplate.MonitorsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges = append(_spec.Edges, edge)
|
||||
}
|
||||
return _node, _spec
|
||||
}
|
||||
|
||||
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||
// of the `INSERT` statement. For example:
|
||||
//
|
||||
// client.ChannelMonitorRequestTemplate.Create().
|
||||
// SetCreatedAt(v).
|
||||
// OnConflict(
|
||||
// // Update the row with the new values
|
||||
// // the was proposed for insertion.
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// // Override some of the fields with custom
|
||||
// // update values.
|
||||
// Update(func(u *ent.ChannelMonitorRequestTemplateUpsert) {
|
||||
// SetCreatedAt(v+v).
|
||||
// }).
|
||||
// Exec(ctx)
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) OnConflict(opts ...sql.ConflictOption) *ChannelMonitorRequestTemplateUpsertOne {
|
||||
_c.conflict = opts
|
||||
return &ChannelMonitorRequestTemplateUpsertOne{
|
||||
create: _c,
|
||||
}
|
||||
}
|
||||
|
||||
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||
// as conflict target. Using this option is equivalent to using:
|
||||
//
|
||||
// client.ChannelMonitorRequestTemplate.Create().
|
||||
// OnConflict(sql.ConflictColumns(columns...)).
|
||||
// Exec(ctx)
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) OnConflictColumns(columns ...string) *ChannelMonitorRequestTemplateUpsertOne {
|
||||
_c.conflict = append(_c.conflict, sql.ConflictColumns(columns...))
|
||||
return &ChannelMonitorRequestTemplateUpsertOne{
|
||||
create: _c,
|
||||
}
|
||||
}
|
||||
|
||||
type (
|
||||
// ChannelMonitorRequestTemplateUpsertOne is the builder for "upsert"-ing
|
||||
// one ChannelMonitorRequestTemplate node.
|
||||
ChannelMonitorRequestTemplateUpsertOne struct {
|
||||
create *ChannelMonitorRequestTemplateCreate
|
||||
}
|
||||
|
||||
// ChannelMonitorRequestTemplateUpsert is the "OnConflict" setter.
|
||||
ChannelMonitorRequestTemplateUpsert struct {
|
||||
*sql.UpdateSet
|
||||
}
|
||||
)
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsert) SetUpdatedAt(v time.Time) *ChannelMonitorRequestTemplateUpsert {
|
||||
u.Set(channelmonitorrequesttemplate.FieldUpdatedAt, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsert) UpdateUpdatedAt() *ChannelMonitorRequestTemplateUpsert {
|
||||
u.SetExcluded(channelmonitorrequesttemplate.FieldUpdatedAt)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetName sets the "name" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsert) SetName(v string) *ChannelMonitorRequestTemplateUpsert {
|
||||
u.Set(channelmonitorrequesttemplate.FieldName, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateName sets the "name" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsert) UpdateName() *ChannelMonitorRequestTemplateUpsert {
|
||||
u.SetExcluded(channelmonitorrequesttemplate.FieldName)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetProvider sets the "provider" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsert) SetProvider(v channelmonitorrequesttemplate.Provider) *ChannelMonitorRequestTemplateUpsert {
|
||||
u.Set(channelmonitorrequesttemplate.FieldProvider, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateProvider sets the "provider" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsert) UpdateProvider() *ChannelMonitorRequestTemplateUpsert {
|
||||
u.SetExcluded(channelmonitorrequesttemplate.FieldProvider)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetDescription sets the "description" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsert) SetDescription(v string) *ChannelMonitorRequestTemplateUpsert {
|
||||
u.Set(channelmonitorrequesttemplate.FieldDescription, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateDescription sets the "description" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsert) UpdateDescription() *ChannelMonitorRequestTemplateUpsert {
|
||||
u.SetExcluded(channelmonitorrequesttemplate.FieldDescription)
|
||||
return u
|
||||
}
|
||||
|
||||
// ClearDescription clears the value of the "description" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsert) ClearDescription() *ChannelMonitorRequestTemplateUpsert {
|
||||
u.SetNull(channelmonitorrequesttemplate.FieldDescription)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetExtraHeaders sets the "extra_headers" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsert) SetExtraHeaders(v map[string]string) *ChannelMonitorRequestTemplateUpsert {
|
||||
u.Set(channelmonitorrequesttemplate.FieldExtraHeaders, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateExtraHeaders sets the "extra_headers" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsert) UpdateExtraHeaders() *ChannelMonitorRequestTemplateUpsert {
|
||||
u.SetExcluded(channelmonitorrequesttemplate.FieldExtraHeaders)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetBodyOverrideMode sets the "body_override_mode" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsert) SetBodyOverrideMode(v string) *ChannelMonitorRequestTemplateUpsert {
|
||||
u.Set(channelmonitorrequesttemplate.FieldBodyOverrideMode, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateBodyOverrideMode sets the "body_override_mode" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsert) UpdateBodyOverrideMode() *ChannelMonitorRequestTemplateUpsert {
|
||||
u.SetExcluded(channelmonitorrequesttemplate.FieldBodyOverrideMode)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetBodyOverride sets the "body_override" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsert) SetBodyOverride(v map[string]interface{}) *ChannelMonitorRequestTemplateUpsert {
|
||||
u.Set(channelmonitorrequesttemplate.FieldBodyOverride, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateBodyOverride sets the "body_override" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsert) UpdateBodyOverride() *ChannelMonitorRequestTemplateUpsert {
|
||||
u.SetExcluded(channelmonitorrequesttemplate.FieldBodyOverride)
|
||||
return u
|
||||
}
|
||||
|
||||
// ClearBodyOverride clears the value of the "body_override" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsert) ClearBodyOverride() *ChannelMonitorRequestTemplateUpsert {
|
||||
u.SetNull(channelmonitorrequesttemplate.FieldBodyOverride)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateNewValues updates the mutable fields using the new values that were set on create.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
// client.ChannelMonitorRequestTemplate.Create().
|
||||
// OnConflict(
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// Exec(ctx)
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) UpdateNewValues() *ChannelMonitorRequestTemplateUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
|
||||
if _, exists := u.create.mutation.CreatedAt(); exists {
|
||||
s.SetIgnore(channelmonitorrequesttemplate.FieldCreatedAt)
|
||||
}
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// Ignore sets each column to itself in case of conflict.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
// client.ChannelMonitorRequestTemplate.Create().
|
||||
// OnConflict(sql.ResolveWithIgnore()).
|
||||
// Exec(ctx)
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) Ignore() *ChannelMonitorRequestTemplateUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||
return u
|
||||
}
|
||||
|
||||
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||
// Supported only by SQLite and PostgreSQL.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) DoNothing() *ChannelMonitorRequestTemplateUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||
return u
|
||||
}
|
||||
|
||||
// Update allows overriding fields `UPDATE` values. See the ChannelMonitorRequestTemplateCreate.OnConflict
|
||||
// documentation for more info.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) Update(set func(*ChannelMonitorRequestTemplateUpsert)) *ChannelMonitorRequestTemplateUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||
set(&ChannelMonitorRequestTemplateUpsert{UpdateSet: update})
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) SetUpdatedAt(v time.Time) *ChannelMonitorRequestTemplateUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.SetUpdatedAt(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) UpdateUpdatedAt() *ChannelMonitorRequestTemplateUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.UpdateUpdatedAt()
|
||||
})
|
||||
}
|
||||
|
||||
// SetName sets the "name" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) SetName(v string) *ChannelMonitorRequestTemplateUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.SetName(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateName sets the "name" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) UpdateName() *ChannelMonitorRequestTemplateUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.UpdateName()
|
||||
})
|
||||
}
|
||||
|
||||
// SetProvider sets the "provider" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) SetProvider(v channelmonitorrequesttemplate.Provider) *ChannelMonitorRequestTemplateUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.SetProvider(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateProvider sets the "provider" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) UpdateProvider() *ChannelMonitorRequestTemplateUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.UpdateProvider()
|
||||
})
|
||||
}
|
||||
|
||||
// SetDescription sets the "description" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) SetDescription(v string) *ChannelMonitorRequestTemplateUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.SetDescription(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateDescription sets the "description" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) UpdateDescription() *ChannelMonitorRequestTemplateUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.UpdateDescription()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearDescription clears the value of the "description" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) ClearDescription() *ChannelMonitorRequestTemplateUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.ClearDescription()
|
||||
})
|
||||
}
|
||||
|
||||
// SetExtraHeaders sets the "extra_headers" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) SetExtraHeaders(v map[string]string) *ChannelMonitorRequestTemplateUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.SetExtraHeaders(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateExtraHeaders sets the "extra_headers" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) UpdateExtraHeaders() *ChannelMonitorRequestTemplateUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.UpdateExtraHeaders()
|
||||
})
|
||||
}
|
||||
|
||||
// SetBodyOverrideMode sets the "body_override_mode" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) SetBodyOverrideMode(v string) *ChannelMonitorRequestTemplateUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.SetBodyOverrideMode(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateBodyOverrideMode sets the "body_override_mode" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) UpdateBodyOverrideMode() *ChannelMonitorRequestTemplateUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.UpdateBodyOverrideMode()
|
||||
})
|
||||
}
|
||||
|
||||
// SetBodyOverride sets the "body_override" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) SetBodyOverride(v map[string]interface{}) *ChannelMonitorRequestTemplateUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.SetBodyOverride(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateBodyOverride sets the "body_override" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) UpdateBodyOverride() *ChannelMonitorRequestTemplateUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.UpdateBodyOverride()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearBodyOverride clears the value of the "body_override" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) ClearBodyOverride() *ChannelMonitorRequestTemplateUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.ClearBodyOverride()
|
||||
})
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) Exec(ctx context.Context) error {
|
||||
if len(u.create.conflict) == 0 {
|
||||
return errors.New("ent: missing options for ChannelMonitorRequestTemplateCreate.OnConflict")
|
||||
}
|
||||
return u.create.Exec(ctx)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) ExecX(ctx context.Context) {
|
||||
if err := u.create.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Exec executes the UPSERT query and returns the inserted/updated ID.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) ID(ctx context.Context) (id int64, err error) {
|
||||
node, err := u.create.Save(ctx)
|
||||
if err != nil {
|
||||
return id, err
|
||||
}
|
||||
return node.ID, nil
|
||||
}
|
||||
|
||||
// IDX is like ID, but panics if an error occurs.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) IDX(ctx context.Context) int64 {
|
||||
id, err := u.ID(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// ChannelMonitorRequestTemplateCreateBulk is the builder for creating many ChannelMonitorRequestTemplate entities in bulk.
|
||||
type ChannelMonitorRequestTemplateCreateBulk struct {
|
||||
config
|
||||
err error
|
||||
builders []*ChannelMonitorRequestTemplateCreate
|
||||
conflict []sql.ConflictOption
|
||||
}
|
||||
|
||||
// Save creates the ChannelMonitorRequestTemplate entities in the database.
|
||||
func (_c *ChannelMonitorRequestTemplateCreateBulk) Save(ctx context.Context) ([]*ChannelMonitorRequestTemplate, error) {
|
||||
if _c.err != nil {
|
||||
return nil, _c.err
|
||||
}
|
||||
specs := make([]*sqlgraph.CreateSpec, len(_c.builders))
|
||||
nodes := make([]*ChannelMonitorRequestTemplate, len(_c.builders))
|
||||
mutators := make([]Mutator, len(_c.builders))
|
||||
for i := range _c.builders {
|
||||
func(i int, root context.Context) {
|
||||
builder := _c.builders[i]
|
||||
builder.defaults()
|
||||
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
||||
mutation, ok := m.(*ChannelMonitorRequestTemplateMutation)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
||||
}
|
||||
if err := builder.check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
builder.mutation = mutation
|
||||
var err error
|
||||
nodes[i], specs[i] = builder.createSpec()
|
||||
if i < len(mutators)-1 {
|
||||
_, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation)
|
||||
} else {
|
||||
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
|
||||
spec.OnConflict = _c.conflict
|
||||
// Invoke the actual operation on the latest mutation in the chain.
|
||||
if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mutation.id = &nodes[i].ID
|
||||
if specs[i].ID.Value != nil {
|
||||
id := specs[i].ID.Value.(int64)
|
||||
nodes[i].ID = int64(id)
|
||||
}
|
||||
mutation.done = true
|
||||
return nodes[i], nil
|
||||
})
|
||||
for i := len(builder.hooks) - 1; i >= 0; i-- {
|
||||
mut = builder.hooks[i](mut)
|
||||
}
|
||||
mutators[i] = mut
|
||||
}(i, ctx)
|
||||
}
|
||||
if len(mutators) > 0 {
|
||||
if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (_c *ChannelMonitorRequestTemplateCreateBulk) SaveX(ctx context.Context) []*ChannelMonitorRequestTemplate {
|
||||
v, err := _c.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (_c *ChannelMonitorRequestTemplateCreateBulk) Exec(ctx context.Context) error {
|
||||
_, err := _c.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_c *ChannelMonitorRequestTemplateCreateBulk) ExecX(ctx context.Context) {
|
||||
if err := _c.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||
// of the `INSERT` statement. For example:
|
||||
//
|
||||
// client.ChannelMonitorRequestTemplate.CreateBulk(builders...).
|
||||
// OnConflict(
|
||||
// // Update the row with the new values
|
||||
// // the was proposed for insertion.
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// // Override some of the fields with custom
|
||||
// // update values.
|
||||
// Update(func(u *ent.ChannelMonitorRequestTemplateUpsert) {
|
||||
// SetCreatedAt(v+v).
|
||||
// }).
|
||||
// Exec(ctx)
|
||||
func (_c *ChannelMonitorRequestTemplateCreateBulk) OnConflict(opts ...sql.ConflictOption) *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
_c.conflict = opts
|
||||
return &ChannelMonitorRequestTemplateUpsertBulk{
|
||||
create: _c,
|
||||
}
|
||||
}
|
||||
|
||||
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||
// as conflict target. Using this option is equivalent to using:
|
||||
//
|
||||
// client.ChannelMonitorRequestTemplate.Create().
|
||||
// OnConflict(sql.ConflictColumns(columns...)).
|
||||
// Exec(ctx)
|
||||
func (_c *ChannelMonitorRequestTemplateCreateBulk) OnConflictColumns(columns ...string) *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
_c.conflict = append(_c.conflict, sql.ConflictColumns(columns...))
|
||||
return &ChannelMonitorRequestTemplateUpsertBulk{
|
||||
create: _c,
|
||||
}
|
||||
}
|
||||
|
||||
// ChannelMonitorRequestTemplateUpsertBulk is the builder for "upsert"-ing
|
||||
// a bulk of ChannelMonitorRequestTemplate nodes.
|
||||
type ChannelMonitorRequestTemplateUpsertBulk struct {
|
||||
create *ChannelMonitorRequestTemplateCreateBulk
|
||||
}
|
||||
|
||||
// UpdateNewValues updates the mutable fields using the new values that
|
||||
// were set on create. Using this option is equivalent to using:
|
||||
//
|
||||
// client.ChannelMonitorRequestTemplate.Create().
|
||||
// OnConflict(
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// Exec(ctx)
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) UpdateNewValues() *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
|
||||
for _, b := range u.create.builders {
|
||||
if _, exists := b.mutation.CreatedAt(); exists {
|
||||
s.SetIgnore(channelmonitorrequesttemplate.FieldCreatedAt)
|
||||
}
|
||||
}
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// Ignore sets each column to itself in case of conflict.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
// client.ChannelMonitorRequestTemplate.Create().
|
||||
// OnConflict(sql.ResolveWithIgnore()).
|
||||
// Exec(ctx)
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) Ignore() *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||
return u
|
||||
}
|
||||
|
||||
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||
// Supported only by SQLite and PostgreSQL.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) DoNothing() *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||
return u
|
||||
}
|
||||
|
||||
// Update allows overriding fields `UPDATE` values. See the ChannelMonitorRequestTemplateCreateBulk.OnConflict
|
||||
// documentation for more info.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) Update(set func(*ChannelMonitorRequestTemplateUpsert)) *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||
set(&ChannelMonitorRequestTemplateUpsert{UpdateSet: update})
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) SetUpdatedAt(v time.Time) *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.SetUpdatedAt(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) UpdateUpdatedAt() *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.UpdateUpdatedAt()
|
||||
})
|
||||
}
|
||||
|
||||
// SetName sets the "name" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) SetName(v string) *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.SetName(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateName sets the "name" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) UpdateName() *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.UpdateName()
|
||||
})
|
||||
}
|
||||
|
||||
// SetProvider sets the "provider" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) SetProvider(v channelmonitorrequesttemplate.Provider) *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.SetProvider(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateProvider sets the "provider" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) UpdateProvider() *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.UpdateProvider()
|
||||
})
|
||||
}
|
||||
|
||||
// SetDescription sets the "description" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) SetDescription(v string) *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.SetDescription(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateDescription sets the "description" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) UpdateDescription() *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.UpdateDescription()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearDescription clears the value of the "description" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) ClearDescription() *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.ClearDescription()
|
||||
})
|
||||
}
|
||||
|
||||
// SetExtraHeaders sets the "extra_headers" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) SetExtraHeaders(v map[string]string) *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.SetExtraHeaders(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateExtraHeaders sets the "extra_headers" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) UpdateExtraHeaders() *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.UpdateExtraHeaders()
|
||||
})
|
||||
}
|
||||
|
||||
// SetBodyOverrideMode sets the "body_override_mode" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) SetBodyOverrideMode(v string) *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.SetBodyOverrideMode(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateBodyOverrideMode sets the "body_override_mode" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) UpdateBodyOverrideMode() *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.UpdateBodyOverrideMode()
|
||||
})
|
||||
}
|
||||
|
||||
// SetBodyOverride sets the "body_override" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) SetBodyOverride(v map[string]interface{}) *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.SetBodyOverride(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateBodyOverride sets the "body_override" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) UpdateBodyOverride() *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.UpdateBodyOverride()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearBodyOverride clears the value of the "body_override" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) ClearBodyOverride() *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.ClearBodyOverride()
|
||||
})
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) Exec(ctx context.Context) error {
|
||||
if u.create.err != nil {
|
||||
return u.create.err
|
||||
}
|
||||
for i, b := range u.create.builders {
|
||||
if len(b.conflict) != 0 {
|
||||
return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the ChannelMonitorRequestTemplateCreateBulk instead", i)
|
||||
}
|
||||
}
|
||||
if len(u.create.conflict) == 0 {
|
||||
return errors.New("ent: missing options for ChannelMonitorRequestTemplateCreateBulk.OnConflict")
|
||||
}
|
||||
return u.create.Exec(ctx)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) ExecX(ctx context.Context) {
|
||||
if err := u.create.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
88
backend/ent/channelmonitorrequesttemplate_delete.go
Normal file
88
backend/ent/channelmonitorrequesttemplate_delete.go
Normal file
@@ -0,0 +1,88 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorrequesttemplate"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ChannelMonitorRequestTemplateDelete is the builder for deleting a ChannelMonitorRequestTemplate entity.
|
||||
type ChannelMonitorRequestTemplateDelete struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *ChannelMonitorRequestTemplateMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the ChannelMonitorRequestTemplateDelete builder.
|
||||
func (_d *ChannelMonitorRequestTemplateDelete) Where(ps ...predicate.ChannelMonitorRequestTemplate) *ChannelMonitorRequestTemplateDelete {
|
||||
_d.mutation.Where(ps...)
|
||||
return _d
|
||||
}
|
||||
|
||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||
func (_d *ChannelMonitorRequestTemplateDelete) Exec(ctx context.Context) (int, error) {
|
||||
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_d *ChannelMonitorRequestTemplateDelete) ExecX(ctx context.Context) int {
|
||||
n, err := _d.Exec(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (_d *ChannelMonitorRequestTemplateDelete) sqlExec(ctx context.Context) (int, error) {
|
||||
_spec := sqlgraph.NewDeleteSpec(channelmonitorrequesttemplate.Table, sqlgraph.NewFieldSpec(channelmonitorrequesttemplate.FieldID, field.TypeInt64))
|
||||
if ps := _d.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
||||
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
_d.mutation.done = true
|
||||
return affected, err
|
||||
}
|
||||
|
||||
// ChannelMonitorRequestTemplateDeleteOne is the builder for deleting a single ChannelMonitorRequestTemplate entity.
|
||||
type ChannelMonitorRequestTemplateDeleteOne struct {
|
||||
_d *ChannelMonitorRequestTemplateDelete
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the ChannelMonitorRequestTemplateDelete builder.
|
||||
func (_d *ChannelMonitorRequestTemplateDeleteOne) Where(ps ...predicate.ChannelMonitorRequestTemplate) *ChannelMonitorRequestTemplateDeleteOne {
|
||||
_d._d.mutation.Where(ps...)
|
||||
return _d
|
||||
}
|
||||
|
||||
// Exec executes the deletion query.
|
||||
func (_d *ChannelMonitorRequestTemplateDeleteOne) Exec(ctx context.Context) error {
|
||||
n, err := _d._d.Exec(ctx)
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case n == 0:
|
||||
return &NotFoundError{channelmonitorrequesttemplate.Label}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_d *ChannelMonitorRequestTemplateDeleteOne) ExecX(ctx context.Context) {
|
||||
if err := _d.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
648
backend/ent/channelmonitorrequesttemplate_query.go
Normal file
648
backend/ent/channelmonitorrequesttemplate_query.go
Normal file
@@ -0,0 +1,648 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorrequesttemplate"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ChannelMonitorRequestTemplateQuery is the builder for querying ChannelMonitorRequestTemplate entities.
|
||||
type ChannelMonitorRequestTemplateQuery struct {
|
||||
config
|
||||
ctx *QueryContext
|
||||
order []channelmonitorrequesttemplate.OrderOption
|
||||
inters []Interceptor
|
||||
predicates []predicate.ChannelMonitorRequestTemplate
|
||||
withMonitors *ChannelMonitorQuery
|
||||
modifiers []func(*sql.Selector)
|
||||
// intermediate query (i.e. traversal path).
|
||||
sql *sql.Selector
|
||||
path func(context.Context) (*sql.Selector, error)
|
||||
}
|
||||
|
||||
// Where adds a new predicate for the ChannelMonitorRequestTemplateQuery builder.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) Where(ps ...predicate.ChannelMonitorRequestTemplate) *ChannelMonitorRequestTemplateQuery {
|
||||
_q.predicates = append(_q.predicates, ps...)
|
||||
return _q
|
||||
}
|
||||
|
||||
// Limit the number of records to be returned by this query.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) Limit(limit int) *ChannelMonitorRequestTemplateQuery {
|
||||
_q.ctx.Limit = &limit
|
||||
return _q
|
||||
}
|
||||
|
||||
// Offset to start from.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) Offset(offset int) *ChannelMonitorRequestTemplateQuery {
|
||||
_q.ctx.Offset = &offset
|
||||
return _q
|
||||
}
|
||||
|
||||
// Unique configures the query builder to filter duplicate records on query.
|
||||
// By default, unique is set to true, and can be disabled using this method.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) Unique(unique bool) *ChannelMonitorRequestTemplateQuery {
|
||||
_q.ctx.Unique = &unique
|
||||
return _q
|
||||
}
|
||||
|
||||
// Order specifies how the records should be ordered.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) Order(o ...channelmonitorrequesttemplate.OrderOption) *ChannelMonitorRequestTemplateQuery {
|
||||
_q.order = append(_q.order, o...)
|
||||
return _q
|
||||
}
|
||||
|
||||
// QueryMonitors chains the current query on the "monitors" edge.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) QueryMonitors() *ChannelMonitorQuery {
|
||||
query := (&ChannelMonitorClient{config: _q.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := _q.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(channelmonitorrequesttemplate.Table, channelmonitorrequesttemplate.FieldID, selector),
|
||||
sqlgraph.To(channelmonitor.Table, channelmonitor.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, true, channelmonitorrequesttemplate.MonitorsTable, channelmonitorrequesttemplate.MonitorsColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// First returns the first ChannelMonitorRequestTemplate entity from the query.
|
||||
// Returns a *NotFoundError when no ChannelMonitorRequestTemplate was found.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) First(ctx context.Context) (*ChannelMonitorRequestTemplate, error) {
|
||||
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nil, &NotFoundError{channelmonitorrequesttemplate.Label}
|
||||
}
|
||||
return nodes[0], nil
|
||||
}
|
||||
|
||||
// FirstX is like First, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) FirstX(ctx context.Context) *ChannelMonitorRequestTemplate {
|
||||
node, err := _q.First(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// FirstID returns the first ChannelMonitorRequestTemplate ID from the query.
|
||||
// Returns a *NotFoundError when no ChannelMonitorRequestTemplate ID was found.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) FirstID(ctx context.Context) (id int64, err error) {
|
||||
var ids []int64
|
||||
if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
|
||||
return
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
err = &NotFoundError{channelmonitorrequesttemplate.Label}
|
||||
return
|
||||
}
|
||||
return ids[0], nil
|
||||
}
|
||||
|
||||
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) FirstIDX(ctx context.Context) int64 {
|
||||
id, err := _q.FirstID(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// Only returns a single ChannelMonitorRequestTemplate entity found by the query, ensuring it only returns one.
|
||||
// Returns a *NotSingularError when more than one ChannelMonitorRequestTemplate entity is found.
|
||||
// Returns a *NotFoundError when no ChannelMonitorRequestTemplate entities are found.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) Only(ctx context.Context) (*ChannelMonitorRequestTemplate, error) {
|
||||
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch len(nodes) {
|
||||
case 1:
|
||||
return nodes[0], nil
|
||||
case 0:
|
||||
return nil, &NotFoundError{channelmonitorrequesttemplate.Label}
|
||||
default:
|
||||
return nil, &NotSingularError{channelmonitorrequesttemplate.Label}
|
||||
}
|
||||
}
|
||||
|
||||
// OnlyX is like Only, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) OnlyX(ctx context.Context) *ChannelMonitorRequestTemplate {
|
||||
node, err := _q.Only(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// OnlyID is like Only, but returns the only ChannelMonitorRequestTemplate ID in the query.
|
||||
// Returns a *NotSingularError when more than one ChannelMonitorRequestTemplate ID is found.
|
||||
// Returns a *NotFoundError when no entities are found.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) OnlyID(ctx context.Context) (id int64, err error) {
|
||||
var ids []int64
|
||||
if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(ids) {
|
||||
case 1:
|
||||
id = ids[0]
|
||||
case 0:
|
||||
err = &NotFoundError{channelmonitorrequesttemplate.Label}
|
||||
default:
|
||||
err = &NotSingularError{channelmonitorrequesttemplate.Label}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) OnlyIDX(ctx context.Context) int64 {
|
||||
id, err := _q.OnlyID(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// All executes the query and returns a list of ChannelMonitorRequestTemplates.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) All(ctx context.Context) ([]*ChannelMonitorRequestTemplate, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qr := querierAll[[]*ChannelMonitorRequestTemplate, *ChannelMonitorRequestTemplateQuery]()
|
||||
return withInterceptors[[]*ChannelMonitorRequestTemplate](ctx, _q, qr, _q.inters)
|
||||
}
|
||||
|
||||
// AllX is like All, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) AllX(ctx context.Context) []*ChannelMonitorRequestTemplate {
|
||||
nodes, err := _q.All(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
// IDs executes the query and returns a list of ChannelMonitorRequestTemplate IDs.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) IDs(ctx context.Context) (ids []int64, err error) {
|
||||
if _q.ctx.Unique == nil && _q.path != nil {
|
||||
_q.Unique(true)
|
||||
}
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
|
||||
if err = _q.Select(channelmonitorrequesttemplate.FieldID).Scan(ctx, &ids); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// IDsX is like IDs, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) IDsX(ctx context.Context) []int64 {
|
||||
ids, err := _q.IDs(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// Count returns the count of the given query.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) Count(ctx context.Context) (int, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return withInterceptors[int](ctx, _q, querierCount[*ChannelMonitorRequestTemplateQuery](), _q.inters)
|
||||
}
|
||||
|
||||
// CountX is like Count, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) CountX(ctx context.Context) int {
|
||||
count, err := _q.Count(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// Exist returns true if the query has elements in the graph.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) Exist(ctx context.Context) (bool, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
||||
switch _, err := _q.FirstID(ctx); {
|
||||
case IsNotFound(err):
|
||||
return false, nil
|
||||
case err != nil:
|
||||
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExistX is like Exist, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) ExistX(ctx context.Context) bool {
|
||||
exist, err := _q.Exist(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return exist
|
||||
}
|
||||
|
||||
// Clone returns a duplicate of the ChannelMonitorRequestTemplateQuery builder, including all associated steps. It can be
|
||||
// used to prepare common query builders and use them differently after the clone is made.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) Clone() *ChannelMonitorRequestTemplateQuery {
|
||||
if _q == nil {
|
||||
return nil
|
||||
}
|
||||
return &ChannelMonitorRequestTemplateQuery{
|
||||
config: _q.config,
|
||||
ctx: _q.ctx.Clone(),
|
||||
order: append([]channelmonitorrequesttemplate.OrderOption{}, _q.order...),
|
||||
inters: append([]Interceptor{}, _q.inters...),
|
||||
predicates: append([]predicate.ChannelMonitorRequestTemplate{}, _q.predicates...),
|
||||
withMonitors: _q.withMonitors.Clone(),
|
||||
// clone intermediate query.
|
||||
sql: _q.sql.Clone(),
|
||||
path: _q.path,
|
||||
}
|
||||
}
|
||||
|
||||
// WithMonitors tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "monitors" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) WithMonitors(opts ...func(*ChannelMonitorQuery)) *ChannelMonitorRequestTemplateQuery {
|
||||
query := (&ChannelMonitorClient{config: _q.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
_q.withMonitors = query
|
||||
return _q
|
||||
}
|
||||
|
||||
// GroupBy is used to group vertices by one or more fields/columns.
|
||||
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// Count int `json:"count,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.ChannelMonitorRequestTemplate.Query().
|
||||
// GroupBy(channelmonitorrequesttemplate.FieldCreatedAt).
|
||||
// Aggregate(ent.Count()).
|
||||
// Scan(ctx, &v)
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) GroupBy(field string, fields ...string) *ChannelMonitorRequestTemplateGroupBy {
|
||||
_q.ctx.Fields = append([]string{field}, fields...)
|
||||
grbuild := &ChannelMonitorRequestTemplateGroupBy{build: _q}
|
||||
grbuild.flds = &_q.ctx.Fields
|
||||
grbuild.label = channelmonitorrequesttemplate.Label
|
||||
grbuild.scan = grbuild.Scan
|
||||
return grbuild
|
||||
}
|
||||
|
||||
// Select allows the selection one or more fields/columns for the given query,
|
||||
// instead of selecting all fields in the entity.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.ChannelMonitorRequestTemplate.Query().
|
||||
// Select(channelmonitorrequesttemplate.FieldCreatedAt).
|
||||
// Scan(ctx, &v)
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) Select(fields ...string) *ChannelMonitorRequestTemplateSelect {
|
||||
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
||||
sbuild := &ChannelMonitorRequestTemplateSelect{ChannelMonitorRequestTemplateQuery: _q}
|
||||
sbuild.label = channelmonitorrequesttemplate.Label
|
||||
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
||||
return sbuild
|
||||
}
|
||||
|
||||
// Aggregate returns a ChannelMonitorRequestTemplateSelect configured with the given aggregations.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) Aggregate(fns ...AggregateFunc) *ChannelMonitorRequestTemplateSelect {
|
||||
return _q.Select().Aggregate(fns...)
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) prepareQuery(ctx context.Context) error {
|
||||
for _, inter := range _q.inters {
|
||||
if inter == nil {
|
||||
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||
}
|
||||
if trv, ok := inter.(Traverser); ok {
|
||||
if err := trv.Traverse(ctx, _q); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, f := range _q.ctx.Fields {
|
||||
if !channelmonitorrequesttemplate.ValidColumn(f) {
|
||||
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
}
|
||||
if _q.path != nil {
|
||||
prev, err := _q.path(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_q.sql = prev
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*ChannelMonitorRequestTemplate, error) {
|
||||
var (
|
||||
nodes = []*ChannelMonitorRequestTemplate{}
|
||||
_spec = _q.querySpec()
|
||||
loadedTypes = [1]bool{
|
||||
_q.withMonitors != nil,
|
||||
}
|
||||
)
|
||||
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||
return (*ChannelMonitorRequestTemplate).scanValues(nil, columns)
|
||||
}
|
||||
_spec.Assign = func(columns []string, values []any) error {
|
||||
node := &ChannelMonitorRequestTemplate{config: _q.config}
|
||||
nodes = append(nodes, node)
|
||||
node.Edges.loadedTypes = loadedTypes
|
||||
return node.assignValues(columns, values)
|
||||
}
|
||||
if len(_q.modifiers) > 0 {
|
||||
_spec.Modifiers = _q.modifiers
|
||||
}
|
||||
for i := range hooks {
|
||||
hooks[i](ctx, _spec)
|
||||
}
|
||||
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nodes, nil
|
||||
}
|
||||
if query := _q.withMonitors; query != nil {
|
||||
if err := _q.loadMonitors(ctx, query, nodes,
|
||||
func(n *ChannelMonitorRequestTemplate) { n.Edges.Monitors = []*ChannelMonitor{} },
|
||||
func(n *ChannelMonitorRequestTemplate, e *ChannelMonitor) {
|
||||
n.Edges.Monitors = append(n.Edges.Monitors, e)
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) loadMonitors(ctx context.Context, query *ChannelMonitorQuery, nodes []*ChannelMonitorRequestTemplate, init func(*ChannelMonitorRequestTemplate), assign func(*ChannelMonitorRequestTemplate, *ChannelMonitor)) error {
|
||||
fks := make([]driver.Value, 0, len(nodes))
|
||||
nodeids := make(map[int64]*ChannelMonitorRequestTemplate)
|
||||
for i := range nodes {
|
||||
fks = append(fks, nodes[i].ID)
|
||||
nodeids[nodes[i].ID] = nodes[i]
|
||||
if init != nil {
|
||||
init(nodes[i])
|
||||
}
|
||||
}
|
||||
if len(query.ctx.Fields) > 0 {
|
||||
query.ctx.AppendFieldOnce(channelmonitor.FieldTemplateID)
|
||||
}
|
||||
query.Where(predicate.ChannelMonitor(func(s *sql.Selector) {
|
||||
s.Where(sql.InValues(s.C(channelmonitorrequesttemplate.MonitorsColumn), fks...))
|
||||
}))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
fk := n.TemplateID
|
||||
if fk == nil {
|
||||
return fmt.Errorf(`foreign-key "template_id" is nil for node %v`, n.ID)
|
||||
}
|
||||
node, ok := nodeids[*fk]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected referenced foreign-key "template_id" returned %v for node %v`, *fk, n.ID)
|
||||
}
|
||||
assign(node, n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) sqlCount(ctx context.Context) (int, error) {
|
||||
_spec := _q.querySpec()
|
||||
if len(_q.modifiers) > 0 {
|
||||
_spec.Modifiers = _q.modifiers
|
||||
}
|
||||
_spec.Node.Columns = _q.ctx.Fields
|
||||
if len(_q.ctx.Fields) > 0 {
|
||||
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||
}
|
||||
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) querySpec() *sqlgraph.QuerySpec {
|
||||
_spec := sqlgraph.NewQuerySpec(channelmonitorrequesttemplate.Table, channelmonitorrequesttemplate.Columns, sqlgraph.NewFieldSpec(channelmonitorrequesttemplate.FieldID, field.TypeInt64))
|
||||
_spec.From = _q.sql
|
||||
if unique := _q.ctx.Unique; unique != nil {
|
||||
_spec.Unique = *unique
|
||||
} else if _q.path != nil {
|
||||
_spec.Unique = true
|
||||
}
|
||||
if fields := _q.ctx.Fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, channelmonitorrequesttemplate.FieldID)
|
||||
for i := range fields {
|
||||
if fields[i] != channelmonitorrequesttemplate.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
if ps := _q.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if limit := _q.ctx.Limit; limit != nil {
|
||||
_spec.Limit = *limit
|
||||
}
|
||||
if offset := _q.ctx.Offset; offset != nil {
|
||||
_spec.Offset = *offset
|
||||
}
|
||||
if ps := _q.order; len(ps) > 0 {
|
||||
_spec.Order = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
return _spec
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||
builder := sql.Dialect(_q.driver.Dialect())
|
||||
t1 := builder.Table(channelmonitorrequesttemplate.Table)
|
||||
columns := _q.ctx.Fields
|
||||
if len(columns) == 0 {
|
||||
columns = channelmonitorrequesttemplate.Columns
|
||||
}
|
||||
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||
if _q.sql != nil {
|
||||
selector = _q.sql
|
||||
selector.Select(selector.Columns(columns...)...)
|
||||
}
|
||||
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||
selector.Distinct()
|
||||
}
|
||||
for _, m := range _q.modifiers {
|
||||
m(selector)
|
||||
}
|
||||
for _, p := range _q.predicates {
|
||||
p(selector)
|
||||
}
|
||||
for _, p := range _q.order {
|
||||
p(selector)
|
||||
}
|
||||
if offset := _q.ctx.Offset; offset != nil {
|
||||
// limit is mandatory for offset clause. We start
|
||||
// with default value, and override it below if needed.
|
||||
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||
}
|
||||
if limit := _q.ctx.Limit; limit != nil {
|
||||
selector.Limit(*limit)
|
||||
}
|
||||
return selector
|
||||
}
|
||||
|
||||
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||
// either committed or rolled-back.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) ForUpdate(opts ...sql.LockOption) *ChannelMonitorRequestTemplateQuery {
|
||||
if _q.driver.Dialect() == dialect.Postgres {
|
||||
_q.Unique(false)
|
||||
}
|
||||
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||
s.ForUpdate(opts...)
|
||||
})
|
||||
return _q
|
||||
}
|
||||
|
||||
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||
// until your transaction commits.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) ForShare(opts ...sql.LockOption) *ChannelMonitorRequestTemplateQuery {
|
||||
if _q.driver.Dialect() == dialect.Postgres {
|
||||
_q.Unique(false)
|
||||
}
|
||||
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||
s.ForShare(opts...)
|
||||
})
|
||||
return _q
|
||||
}
|
||||
|
||||
// ChannelMonitorRequestTemplateGroupBy is the group-by builder for ChannelMonitorRequestTemplate entities.
|
||||
type ChannelMonitorRequestTemplateGroupBy struct {
|
||||
selector
|
||||
build *ChannelMonitorRequestTemplateQuery
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the group-by query.
|
||||
func (_g *ChannelMonitorRequestTemplateGroupBy) Aggregate(fns ...AggregateFunc) *ChannelMonitorRequestTemplateGroupBy {
|
||||
_g.fns = append(_g.fns, fns...)
|
||||
return _g
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (_g *ChannelMonitorRequestTemplateGroupBy) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
||||
if err := _g.build.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*ChannelMonitorRequestTemplateQuery, *ChannelMonitorRequestTemplateGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
||||
}
|
||||
|
||||
func (_g *ChannelMonitorRequestTemplateGroupBy) sqlScan(ctx context.Context, root *ChannelMonitorRequestTemplateQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx).Select()
|
||||
aggregation := make([]string, 0, len(_g.fns))
|
||||
for _, fn := range _g.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
if len(selector.SelectedColumns()) == 0 {
|
||||
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
||||
for _, f := range *_g.flds {
|
||||
columns = append(columns, selector.C(f))
|
||||
}
|
||||
columns = append(columns, aggregation...)
|
||||
selector.Select(columns...)
|
||||
}
|
||||
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
||||
if err := selector.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
|
||||
// ChannelMonitorRequestTemplateSelect is the builder for selecting fields of ChannelMonitorRequestTemplate entities.
|
||||
type ChannelMonitorRequestTemplateSelect struct {
|
||||
*ChannelMonitorRequestTemplateQuery
|
||||
selector
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the selector query.
|
||||
func (_s *ChannelMonitorRequestTemplateSelect) Aggregate(fns ...AggregateFunc) *ChannelMonitorRequestTemplateSelect {
|
||||
_s.fns = append(_s.fns, fns...)
|
||||
return _s
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (_s *ChannelMonitorRequestTemplateSelect) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
||||
if err := _s.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*ChannelMonitorRequestTemplateQuery, *ChannelMonitorRequestTemplateSelect](ctx, _s.ChannelMonitorRequestTemplateQuery, _s, _s.inters, v)
|
||||
}
|
||||
|
||||
func (_s *ChannelMonitorRequestTemplateSelect) sqlScan(ctx context.Context, root *ChannelMonitorRequestTemplateQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx)
|
||||
aggregation := make([]string, 0, len(_s.fns))
|
||||
for _, fn := range _s.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
switch n := len(*_s.selector.flds); {
|
||||
case n == 0 && len(aggregation) > 0:
|
||||
selector.Select(aggregation...)
|
||||
case n != 0 && len(aggregation) > 0:
|
||||
selector.AppendSelect(aggregation...)
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
639
backend/ent/channelmonitorrequesttemplate_update.go
Normal file
639
backend/ent/channelmonitorrequesttemplate_update.go
Normal file
@@ -0,0 +1,639 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorrequesttemplate"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ChannelMonitorRequestTemplateUpdate is the builder for updating ChannelMonitorRequestTemplate entities.
|
||||
type ChannelMonitorRequestTemplateUpdate struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *ChannelMonitorRequestTemplateMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the ChannelMonitorRequestTemplateUpdate builder.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) Where(ps ...predicate.ChannelMonitorRequestTemplate) *ChannelMonitorRequestTemplateUpdate {
|
||||
_u.mutation.Where(ps...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) SetUpdatedAt(v time.Time) *ChannelMonitorRequestTemplateUpdate {
|
||||
_u.mutation.SetUpdatedAt(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetName sets the "name" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) SetName(v string) *ChannelMonitorRequestTemplateUpdate {
|
||||
_u.mutation.SetName(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableName sets the "name" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) SetNillableName(v *string) *ChannelMonitorRequestTemplateUpdate {
|
||||
if v != nil {
|
||||
_u.SetName(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetProvider sets the "provider" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) SetProvider(v channelmonitorrequesttemplate.Provider) *ChannelMonitorRequestTemplateUpdate {
|
||||
_u.mutation.SetProvider(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableProvider sets the "provider" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) SetNillableProvider(v *channelmonitorrequesttemplate.Provider) *ChannelMonitorRequestTemplateUpdate {
|
||||
if v != nil {
|
||||
_u.SetProvider(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetDescription sets the "description" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) SetDescription(v string) *ChannelMonitorRequestTemplateUpdate {
|
||||
_u.mutation.SetDescription(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableDescription sets the "description" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) SetNillableDescription(v *string) *ChannelMonitorRequestTemplateUpdate {
|
||||
if v != nil {
|
||||
_u.SetDescription(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearDescription clears the value of the "description" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) ClearDescription() *ChannelMonitorRequestTemplateUpdate {
|
||||
_u.mutation.ClearDescription()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetExtraHeaders sets the "extra_headers" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) SetExtraHeaders(v map[string]string) *ChannelMonitorRequestTemplateUpdate {
|
||||
_u.mutation.SetExtraHeaders(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetBodyOverrideMode sets the "body_override_mode" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) SetBodyOverrideMode(v string) *ChannelMonitorRequestTemplateUpdate {
|
||||
_u.mutation.SetBodyOverrideMode(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableBodyOverrideMode sets the "body_override_mode" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) SetNillableBodyOverrideMode(v *string) *ChannelMonitorRequestTemplateUpdate {
|
||||
if v != nil {
|
||||
_u.SetBodyOverrideMode(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetBodyOverride sets the "body_override" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) SetBodyOverride(v map[string]interface{}) *ChannelMonitorRequestTemplateUpdate {
|
||||
_u.mutation.SetBodyOverride(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearBodyOverride clears the value of the "body_override" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) ClearBodyOverride() *ChannelMonitorRequestTemplateUpdate {
|
||||
_u.mutation.ClearBodyOverride()
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddMonitorIDs adds the "monitors" edge to the ChannelMonitor entity by IDs.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) AddMonitorIDs(ids ...int64) *ChannelMonitorRequestTemplateUpdate {
|
||||
_u.mutation.AddMonitorIDs(ids...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddMonitors adds the "monitors" edges to the ChannelMonitor entity.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) AddMonitors(v ...*ChannelMonitor) *ChannelMonitorRequestTemplateUpdate {
|
||||
ids := make([]int64, len(v))
|
||||
for i := range v {
|
||||
ids[i] = v[i].ID
|
||||
}
|
||||
return _u.AddMonitorIDs(ids...)
|
||||
}
|
||||
|
||||
// Mutation returns the ChannelMonitorRequestTemplateMutation object of the builder.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) Mutation() *ChannelMonitorRequestTemplateMutation {
|
||||
return _u.mutation
|
||||
}
|
||||
|
||||
// ClearMonitors clears all "monitors" edges to the ChannelMonitor entity.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) ClearMonitors() *ChannelMonitorRequestTemplateUpdate {
|
||||
_u.mutation.ClearMonitors()
|
||||
return _u
|
||||
}
|
||||
|
||||
// RemoveMonitorIDs removes the "monitors" edge to ChannelMonitor entities by IDs.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) RemoveMonitorIDs(ids ...int64) *ChannelMonitorRequestTemplateUpdate {
|
||||
_u.mutation.RemoveMonitorIDs(ids...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// RemoveMonitors removes "monitors" edges to ChannelMonitor entities.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) RemoveMonitors(v ...*ChannelMonitor) *ChannelMonitorRequestTemplateUpdate {
|
||||
ids := make([]int64, len(v))
|
||||
for i := range v {
|
||||
ids[i] = v[i].ID
|
||||
}
|
||||
return _u.RemoveMonitorIDs(ids...)
|
||||
}
|
||||
|
||||
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) Save(ctx context.Context) (int, error) {
|
||||
_u.defaults()
|
||||
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) SaveX(ctx context.Context) int {
|
||||
affected, err := _u.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return affected
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) Exec(ctx context.Context) error {
|
||||
_, err := _u.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) ExecX(ctx context.Context) {
|
||||
if err := _u.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) defaults() {
|
||||
if _, ok := _u.mutation.UpdatedAt(); !ok {
|
||||
v := channelmonitorrequesttemplate.UpdateDefaultUpdatedAt()
|
||||
_u.mutation.SetUpdatedAt(v)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) check() error {
|
||||
if v, ok := _u.mutation.Name(); ok {
|
||||
if err := channelmonitorrequesttemplate.NameValidator(v); err != nil {
|
||||
return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorRequestTemplate.name": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.Provider(); ok {
|
||||
if err := channelmonitorrequesttemplate.ProviderValidator(v); err != nil {
|
||||
return &ValidationError{Name: "provider", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorRequestTemplate.provider": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.Description(); ok {
|
||||
if err := channelmonitorrequesttemplate.DescriptionValidator(v); err != nil {
|
||||
return &ValidationError{Name: "description", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorRequestTemplate.description": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.BodyOverrideMode(); ok {
|
||||
if err := channelmonitorrequesttemplate.BodyOverrideModeValidator(v); err != nil {
|
||||
return &ValidationError{Name: "body_override_mode", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorRequestTemplate.body_override_mode": %w`, err)}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
||||
if err := _u.check(); err != nil {
|
||||
return _node, err
|
||||
}
|
||||
_spec := sqlgraph.NewUpdateSpec(channelmonitorrequesttemplate.Table, channelmonitorrequesttemplate.Columns, sqlgraph.NewFieldSpec(channelmonitorrequesttemplate.FieldID, field.TypeInt64))
|
||||
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if value, ok := _u.mutation.UpdatedAt(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldUpdatedAt, field.TypeTime, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Name(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldName, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Provider(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldProvider, field.TypeEnum, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Description(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldDescription, field.TypeString, value)
|
||||
}
|
||||
if _u.mutation.DescriptionCleared() {
|
||||
_spec.ClearField(channelmonitorrequesttemplate.FieldDescription, field.TypeString)
|
||||
}
|
||||
if value, ok := _u.mutation.ExtraHeaders(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldExtraHeaders, field.TypeJSON, value)
|
||||
}
|
||||
if value, ok := _u.mutation.BodyOverrideMode(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldBodyOverrideMode, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.BodyOverride(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldBodyOverride, field.TypeJSON, value)
|
||||
}
|
||||
if _u.mutation.BodyOverrideCleared() {
|
||||
_spec.ClearField(channelmonitorrequesttemplate.FieldBodyOverride, field.TypeJSON)
|
||||
}
|
||||
if _u.mutation.MonitorsCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: true,
|
||||
Table: channelmonitorrequesttemplate.MonitorsTable,
|
||||
Columns: []string{channelmonitorrequesttemplate.MonitorsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := _u.mutation.RemovedMonitorsIDs(); len(nodes) > 0 && !_u.mutation.MonitorsCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: true,
|
||||
Table: channelmonitorrequesttemplate.MonitorsTable,
|
||||
Columns: []string{channelmonitorrequesttemplate.MonitorsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := _u.mutation.MonitorsIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: true,
|
||||
Table: channelmonitorrequesttemplate.MonitorsTable,
|
||||
Columns: []string{channelmonitorrequesttemplate.MonitorsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{channelmonitorrequesttemplate.Label}
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
_u.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
|
||||
// ChannelMonitorRequestTemplateUpdateOne is the builder for updating a single ChannelMonitorRequestTemplate entity.
|
||||
type ChannelMonitorRequestTemplateUpdateOne struct {
|
||||
config
|
||||
fields []string
|
||||
hooks []Hook
|
||||
mutation *ChannelMonitorRequestTemplateMutation
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) SetUpdatedAt(v time.Time) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
_u.mutation.SetUpdatedAt(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetName sets the "name" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) SetName(v string) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
_u.mutation.SetName(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableName sets the "name" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) SetNillableName(v *string) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetName(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetProvider sets the "provider" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) SetProvider(v channelmonitorrequesttemplate.Provider) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
_u.mutation.SetProvider(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableProvider sets the "provider" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) SetNillableProvider(v *channelmonitorrequesttemplate.Provider) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetProvider(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetDescription sets the "description" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) SetDescription(v string) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
_u.mutation.SetDescription(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableDescription sets the "description" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) SetNillableDescription(v *string) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetDescription(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearDescription clears the value of the "description" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) ClearDescription() *ChannelMonitorRequestTemplateUpdateOne {
|
||||
_u.mutation.ClearDescription()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetExtraHeaders sets the "extra_headers" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) SetExtraHeaders(v map[string]string) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
_u.mutation.SetExtraHeaders(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetBodyOverrideMode sets the "body_override_mode" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) SetBodyOverrideMode(v string) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
_u.mutation.SetBodyOverrideMode(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableBodyOverrideMode sets the "body_override_mode" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) SetNillableBodyOverrideMode(v *string) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetBodyOverrideMode(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetBodyOverride sets the "body_override" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) SetBodyOverride(v map[string]interface{}) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
_u.mutation.SetBodyOverride(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearBodyOverride clears the value of the "body_override" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) ClearBodyOverride() *ChannelMonitorRequestTemplateUpdateOne {
|
||||
_u.mutation.ClearBodyOverride()
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddMonitorIDs adds the "monitors" edge to the ChannelMonitor entity by IDs.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) AddMonitorIDs(ids ...int64) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
_u.mutation.AddMonitorIDs(ids...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddMonitors adds the "monitors" edges to the ChannelMonitor entity.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) AddMonitors(v ...*ChannelMonitor) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
ids := make([]int64, len(v))
|
||||
for i := range v {
|
||||
ids[i] = v[i].ID
|
||||
}
|
||||
return _u.AddMonitorIDs(ids...)
|
||||
}
|
||||
|
||||
// Mutation returns the ChannelMonitorRequestTemplateMutation object of the builder.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) Mutation() *ChannelMonitorRequestTemplateMutation {
|
||||
return _u.mutation
|
||||
}
|
||||
|
||||
// ClearMonitors clears all "monitors" edges to the ChannelMonitor entity.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) ClearMonitors() *ChannelMonitorRequestTemplateUpdateOne {
|
||||
_u.mutation.ClearMonitors()
|
||||
return _u
|
||||
}
|
||||
|
||||
// RemoveMonitorIDs removes the "monitors" edge to ChannelMonitor entities by IDs.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) RemoveMonitorIDs(ids ...int64) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
_u.mutation.RemoveMonitorIDs(ids...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// RemoveMonitors removes "monitors" edges to ChannelMonitor entities.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) RemoveMonitors(v ...*ChannelMonitor) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
ids := make([]int64, len(v))
|
||||
for i := range v {
|
||||
ids[i] = v[i].ID
|
||||
}
|
||||
return _u.RemoveMonitorIDs(ids...)
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the ChannelMonitorRequestTemplateUpdate builder.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) Where(ps ...predicate.ChannelMonitorRequestTemplate) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
_u.mutation.Where(ps...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||
// The default is selecting all fields defined in the entity schema.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) Select(field string, fields ...string) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
_u.fields = append([]string{field}, fields...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// Save executes the query and returns the updated ChannelMonitorRequestTemplate entity.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) Save(ctx context.Context) (*ChannelMonitorRequestTemplate, error) {
|
||||
_u.defaults()
|
||||
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) SaveX(ctx context.Context) *ChannelMonitorRequestTemplate {
|
||||
node, err := _u.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// Exec executes the query on the entity.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) Exec(ctx context.Context) error {
|
||||
_, err := _u.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) ExecX(ctx context.Context) {
|
||||
if err := _u.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) defaults() {
|
||||
if _, ok := _u.mutation.UpdatedAt(); !ok {
|
||||
v := channelmonitorrequesttemplate.UpdateDefaultUpdatedAt()
|
||||
_u.mutation.SetUpdatedAt(v)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) check() error {
|
||||
if v, ok := _u.mutation.Name(); ok {
|
||||
if err := channelmonitorrequesttemplate.NameValidator(v); err != nil {
|
||||
return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorRequestTemplate.name": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.Provider(); ok {
|
||||
if err := channelmonitorrequesttemplate.ProviderValidator(v); err != nil {
|
||||
return &ValidationError{Name: "provider", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorRequestTemplate.provider": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.Description(); ok {
|
||||
if err := channelmonitorrequesttemplate.DescriptionValidator(v); err != nil {
|
||||
return &ValidationError{Name: "description", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorRequestTemplate.description": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.BodyOverrideMode(); ok {
|
||||
if err := channelmonitorrequesttemplate.BodyOverrideModeValidator(v); err != nil {
|
||||
return &ValidationError{Name: "body_override_mode", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorRequestTemplate.body_override_mode": %w`, err)}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) sqlSave(ctx context.Context) (_node *ChannelMonitorRequestTemplate, err error) {
|
||||
if err := _u.check(); err != nil {
|
||||
return _node, err
|
||||
}
|
||||
_spec := sqlgraph.NewUpdateSpec(channelmonitorrequesttemplate.Table, channelmonitorrequesttemplate.Columns, sqlgraph.NewFieldSpec(channelmonitorrequesttemplate.FieldID, field.TypeInt64))
|
||||
id, ok := _u.mutation.ID()
|
||||
if !ok {
|
||||
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "ChannelMonitorRequestTemplate.id" for update`)}
|
||||
}
|
||||
_spec.Node.ID.Value = id
|
||||
if fields := _u.fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, channelmonitorrequesttemplate.FieldID)
|
||||
for _, f := range fields {
|
||||
if !channelmonitorrequesttemplate.ValidColumn(f) {
|
||||
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
if f != channelmonitorrequesttemplate.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if value, ok := _u.mutation.UpdatedAt(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldUpdatedAt, field.TypeTime, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Name(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldName, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Provider(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldProvider, field.TypeEnum, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Description(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldDescription, field.TypeString, value)
|
||||
}
|
||||
if _u.mutation.DescriptionCleared() {
|
||||
_spec.ClearField(channelmonitorrequesttemplate.FieldDescription, field.TypeString)
|
||||
}
|
||||
if value, ok := _u.mutation.ExtraHeaders(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldExtraHeaders, field.TypeJSON, value)
|
||||
}
|
||||
if value, ok := _u.mutation.BodyOverrideMode(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldBodyOverrideMode, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.BodyOverride(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldBodyOverride, field.TypeJSON, value)
|
||||
}
|
||||
if _u.mutation.BodyOverrideCleared() {
|
||||
_spec.ClearField(channelmonitorrequesttemplate.FieldBodyOverride, field.TypeJSON)
|
||||
}
|
||||
if _u.mutation.MonitorsCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: true,
|
||||
Table: channelmonitorrequesttemplate.MonitorsTable,
|
||||
Columns: []string{channelmonitorrequesttemplate.MonitorsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := _u.mutation.RemovedMonitorsIDs(); len(nodes) > 0 && !_u.mutation.MonitorsCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: true,
|
||||
Table: channelmonitorrequesttemplate.MonitorsTable,
|
||||
Columns: []string{channelmonitorrequesttemplate.MonitorsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := _u.mutation.MonitorsIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: true,
|
||||
Table: channelmonitorrequesttemplate.MonitorsTable,
|
||||
Columns: []string{channelmonitorrequesttemplate.MonitorsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
_node = &ChannelMonitorRequestTemplate{config: _u.config}
|
||||
_spec.Assign = _node.assignValues
|
||||
_spec.ScanValues = _node.scanValues
|
||||
if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{channelmonitorrequesttemplate.Label}
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
_u.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
@@ -22,6 +22,10 @@ import (
|
||||
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
||||
"github.com/Wei-Shaw/sub2api/ent/authidentity"
|
||||
"github.com/Wei-Shaw/sub2api/ent/authidentitychannel"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorrequesttemplate"
|
||||
"github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule"
|
||||
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||
"github.com/Wei-Shaw/sub2api/ent/idempotencyrecord"
|
||||
@@ -68,6 +72,14 @@ type Client struct {
|
||||
AuthIdentity *AuthIdentityClient
|
||||
// AuthIdentityChannel is the client for interacting with the AuthIdentityChannel builders.
|
||||
AuthIdentityChannel *AuthIdentityChannelClient
|
||||
// ChannelMonitor is the client for interacting with the ChannelMonitor builders.
|
||||
ChannelMonitor *ChannelMonitorClient
|
||||
// ChannelMonitorDailyRollup is the client for interacting with the ChannelMonitorDailyRollup builders.
|
||||
ChannelMonitorDailyRollup *ChannelMonitorDailyRollupClient
|
||||
// ChannelMonitorHistory is the client for interacting with the ChannelMonitorHistory builders.
|
||||
ChannelMonitorHistory *ChannelMonitorHistoryClient
|
||||
// ChannelMonitorRequestTemplate is the client for interacting with the ChannelMonitorRequestTemplate builders.
|
||||
ChannelMonitorRequestTemplate *ChannelMonitorRequestTemplateClient
|
||||
// ErrorPassthroughRule is the client for interacting with the ErrorPassthroughRule builders.
|
||||
ErrorPassthroughRule *ErrorPassthroughRuleClient
|
||||
// Group is the client for interacting with the Group builders.
|
||||
@@ -132,6 +144,10 @@ func (c *Client) init() {
|
||||
c.AnnouncementRead = NewAnnouncementReadClient(c.config)
|
||||
c.AuthIdentity = NewAuthIdentityClient(c.config)
|
||||
c.AuthIdentityChannel = NewAuthIdentityChannelClient(c.config)
|
||||
c.ChannelMonitor = NewChannelMonitorClient(c.config)
|
||||
c.ChannelMonitorDailyRollup = NewChannelMonitorDailyRollupClient(c.config)
|
||||
c.ChannelMonitorHistory = NewChannelMonitorHistoryClient(c.config)
|
||||
c.ChannelMonitorRequestTemplate = NewChannelMonitorRequestTemplateClient(c.config)
|
||||
c.ErrorPassthroughRule = NewErrorPassthroughRuleClient(c.config)
|
||||
c.Group = NewGroupClient(c.config)
|
||||
c.IdempotencyRecord = NewIdempotencyRecordClient(c.config)
|
||||
@@ -245,38 +261,42 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) {
|
||||
cfg := c.config
|
||||
cfg.driver = tx
|
||||
return &Tx{
|
||||
ctx: ctx,
|
||||
config: cfg,
|
||||
APIKey: NewAPIKeyClient(cfg),
|
||||
Account: NewAccountClient(cfg),
|
||||
AccountGroup: NewAccountGroupClient(cfg),
|
||||
Announcement: NewAnnouncementClient(cfg),
|
||||
AnnouncementRead: NewAnnouncementReadClient(cfg),
|
||||
AuthIdentity: NewAuthIdentityClient(cfg),
|
||||
AuthIdentityChannel: NewAuthIdentityChannelClient(cfg),
|
||||
ErrorPassthroughRule: NewErrorPassthroughRuleClient(cfg),
|
||||
Group: NewGroupClient(cfg),
|
||||
IdempotencyRecord: NewIdempotencyRecordClient(cfg),
|
||||
IdentityAdoptionDecision: NewIdentityAdoptionDecisionClient(cfg),
|
||||
PaymentAuditLog: NewPaymentAuditLogClient(cfg),
|
||||
PaymentOrder: NewPaymentOrderClient(cfg),
|
||||
PaymentProviderInstance: NewPaymentProviderInstanceClient(cfg),
|
||||
PendingAuthSession: NewPendingAuthSessionClient(cfg),
|
||||
PromoCode: NewPromoCodeClient(cfg),
|
||||
PromoCodeUsage: NewPromoCodeUsageClient(cfg),
|
||||
Proxy: NewProxyClient(cfg),
|
||||
RedeemCode: NewRedeemCodeClient(cfg),
|
||||
SecuritySecret: NewSecuritySecretClient(cfg),
|
||||
Setting: NewSettingClient(cfg),
|
||||
SubscriptionPlan: NewSubscriptionPlanClient(cfg),
|
||||
TLSFingerprintProfile: NewTLSFingerprintProfileClient(cfg),
|
||||
UsageCleanupTask: NewUsageCleanupTaskClient(cfg),
|
||||
UsageLog: NewUsageLogClient(cfg),
|
||||
User: NewUserClient(cfg),
|
||||
UserAllowedGroup: NewUserAllowedGroupClient(cfg),
|
||||
UserAttributeDefinition: NewUserAttributeDefinitionClient(cfg),
|
||||
UserAttributeValue: NewUserAttributeValueClient(cfg),
|
||||
UserSubscription: NewUserSubscriptionClient(cfg),
|
||||
ctx: ctx,
|
||||
config: cfg,
|
||||
APIKey: NewAPIKeyClient(cfg),
|
||||
Account: NewAccountClient(cfg),
|
||||
AccountGroup: NewAccountGroupClient(cfg),
|
||||
Announcement: NewAnnouncementClient(cfg),
|
||||
AnnouncementRead: NewAnnouncementReadClient(cfg),
|
||||
AuthIdentity: NewAuthIdentityClient(cfg),
|
||||
AuthIdentityChannel: NewAuthIdentityChannelClient(cfg),
|
||||
ChannelMonitor: NewChannelMonitorClient(cfg),
|
||||
ChannelMonitorDailyRollup: NewChannelMonitorDailyRollupClient(cfg),
|
||||
ChannelMonitorHistory: NewChannelMonitorHistoryClient(cfg),
|
||||
ChannelMonitorRequestTemplate: NewChannelMonitorRequestTemplateClient(cfg),
|
||||
ErrorPassthroughRule: NewErrorPassthroughRuleClient(cfg),
|
||||
Group: NewGroupClient(cfg),
|
||||
IdempotencyRecord: NewIdempotencyRecordClient(cfg),
|
||||
IdentityAdoptionDecision: NewIdentityAdoptionDecisionClient(cfg),
|
||||
PaymentAuditLog: NewPaymentAuditLogClient(cfg),
|
||||
PaymentOrder: NewPaymentOrderClient(cfg),
|
||||
PaymentProviderInstance: NewPaymentProviderInstanceClient(cfg),
|
||||
PendingAuthSession: NewPendingAuthSessionClient(cfg),
|
||||
PromoCode: NewPromoCodeClient(cfg),
|
||||
PromoCodeUsage: NewPromoCodeUsageClient(cfg),
|
||||
Proxy: NewProxyClient(cfg),
|
||||
RedeemCode: NewRedeemCodeClient(cfg),
|
||||
SecuritySecret: NewSecuritySecretClient(cfg),
|
||||
Setting: NewSettingClient(cfg),
|
||||
SubscriptionPlan: NewSubscriptionPlanClient(cfg),
|
||||
TLSFingerprintProfile: NewTLSFingerprintProfileClient(cfg),
|
||||
UsageCleanupTask: NewUsageCleanupTaskClient(cfg),
|
||||
UsageLog: NewUsageLogClient(cfg),
|
||||
User: NewUserClient(cfg),
|
||||
UserAllowedGroup: NewUserAllowedGroupClient(cfg),
|
||||
UserAttributeDefinition: NewUserAttributeDefinitionClient(cfg),
|
||||
UserAttributeValue: NewUserAttributeValueClient(cfg),
|
||||
UserSubscription: NewUserSubscriptionClient(cfg),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -294,38 +314,42 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error)
|
||||
cfg := c.config
|
||||
cfg.driver = &txDriver{tx: tx, drv: c.driver}
|
||||
return &Tx{
|
||||
ctx: ctx,
|
||||
config: cfg,
|
||||
APIKey: NewAPIKeyClient(cfg),
|
||||
Account: NewAccountClient(cfg),
|
||||
AccountGroup: NewAccountGroupClient(cfg),
|
||||
Announcement: NewAnnouncementClient(cfg),
|
||||
AnnouncementRead: NewAnnouncementReadClient(cfg),
|
||||
AuthIdentity: NewAuthIdentityClient(cfg),
|
||||
AuthIdentityChannel: NewAuthIdentityChannelClient(cfg),
|
||||
ErrorPassthroughRule: NewErrorPassthroughRuleClient(cfg),
|
||||
Group: NewGroupClient(cfg),
|
||||
IdempotencyRecord: NewIdempotencyRecordClient(cfg),
|
||||
IdentityAdoptionDecision: NewIdentityAdoptionDecisionClient(cfg),
|
||||
PaymentAuditLog: NewPaymentAuditLogClient(cfg),
|
||||
PaymentOrder: NewPaymentOrderClient(cfg),
|
||||
PaymentProviderInstance: NewPaymentProviderInstanceClient(cfg),
|
||||
PendingAuthSession: NewPendingAuthSessionClient(cfg),
|
||||
PromoCode: NewPromoCodeClient(cfg),
|
||||
PromoCodeUsage: NewPromoCodeUsageClient(cfg),
|
||||
Proxy: NewProxyClient(cfg),
|
||||
RedeemCode: NewRedeemCodeClient(cfg),
|
||||
SecuritySecret: NewSecuritySecretClient(cfg),
|
||||
Setting: NewSettingClient(cfg),
|
||||
SubscriptionPlan: NewSubscriptionPlanClient(cfg),
|
||||
TLSFingerprintProfile: NewTLSFingerprintProfileClient(cfg),
|
||||
UsageCleanupTask: NewUsageCleanupTaskClient(cfg),
|
||||
UsageLog: NewUsageLogClient(cfg),
|
||||
User: NewUserClient(cfg),
|
||||
UserAllowedGroup: NewUserAllowedGroupClient(cfg),
|
||||
UserAttributeDefinition: NewUserAttributeDefinitionClient(cfg),
|
||||
UserAttributeValue: NewUserAttributeValueClient(cfg),
|
||||
UserSubscription: NewUserSubscriptionClient(cfg),
|
||||
ctx: ctx,
|
||||
config: cfg,
|
||||
APIKey: NewAPIKeyClient(cfg),
|
||||
Account: NewAccountClient(cfg),
|
||||
AccountGroup: NewAccountGroupClient(cfg),
|
||||
Announcement: NewAnnouncementClient(cfg),
|
||||
AnnouncementRead: NewAnnouncementReadClient(cfg),
|
||||
AuthIdentity: NewAuthIdentityClient(cfg),
|
||||
AuthIdentityChannel: NewAuthIdentityChannelClient(cfg),
|
||||
ChannelMonitor: NewChannelMonitorClient(cfg),
|
||||
ChannelMonitorDailyRollup: NewChannelMonitorDailyRollupClient(cfg),
|
||||
ChannelMonitorHistory: NewChannelMonitorHistoryClient(cfg),
|
||||
ChannelMonitorRequestTemplate: NewChannelMonitorRequestTemplateClient(cfg),
|
||||
ErrorPassthroughRule: NewErrorPassthroughRuleClient(cfg),
|
||||
Group: NewGroupClient(cfg),
|
||||
IdempotencyRecord: NewIdempotencyRecordClient(cfg),
|
||||
IdentityAdoptionDecision: NewIdentityAdoptionDecisionClient(cfg),
|
||||
PaymentAuditLog: NewPaymentAuditLogClient(cfg),
|
||||
PaymentOrder: NewPaymentOrderClient(cfg),
|
||||
PaymentProviderInstance: NewPaymentProviderInstanceClient(cfg),
|
||||
PendingAuthSession: NewPendingAuthSessionClient(cfg),
|
||||
PromoCode: NewPromoCodeClient(cfg),
|
||||
PromoCodeUsage: NewPromoCodeUsageClient(cfg),
|
||||
Proxy: NewProxyClient(cfg),
|
||||
RedeemCode: NewRedeemCodeClient(cfg),
|
||||
SecuritySecret: NewSecuritySecretClient(cfg),
|
||||
Setting: NewSettingClient(cfg),
|
||||
SubscriptionPlan: NewSubscriptionPlanClient(cfg),
|
||||
TLSFingerprintProfile: NewTLSFingerprintProfileClient(cfg),
|
||||
UsageCleanupTask: NewUsageCleanupTaskClient(cfg),
|
||||
UsageLog: NewUsageLogClient(cfg),
|
||||
User: NewUserClient(cfg),
|
||||
UserAllowedGroup: NewUserAllowedGroupClient(cfg),
|
||||
UserAttributeDefinition: NewUserAttributeDefinitionClient(cfg),
|
||||
UserAttributeValue: NewUserAttributeValueClient(cfg),
|
||||
UserSubscription: NewUserSubscriptionClient(cfg),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -356,7 +380,9 @@ func (c *Client) Close() error {
|
||||
func (c *Client) Use(hooks ...Hook) {
|
||||
for _, n := range []interface{ Use(...Hook) }{
|
||||
c.APIKey, c.Account, c.AccountGroup, c.Announcement, c.AnnouncementRead,
|
||||
c.AuthIdentity, c.AuthIdentityChannel, c.ErrorPassthroughRule, c.Group,
|
||||
c.AuthIdentity, c.AuthIdentityChannel, c.ChannelMonitor,
|
||||
c.ChannelMonitorDailyRollup, c.ChannelMonitorHistory,
|
||||
c.ChannelMonitorRequestTemplate, c.ErrorPassthroughRule, c.Group,
|
||||
c.IdempotencyRecord, c.IdentityAdoptionDecision, c.PaymentAuditLog,
|
||||
c.PaymentOrder, c.PaymentProviderInstance, c.PendingAuthSession, c.PromoCode,
|
||||
c.PromoCodeUsage, c.Proxy, c.RedeemCode, c.SecuritySecret, c.Setting,
|
||||
@@ -373,7 +399,9 @@ func (c *Client) Use(hooks ...Hook) {
|
||||
func (c *Client) Intercept(interceptors ...Interceptor) {
|
||||
for _, n := range []interface{ Intercept(...Interceptor) }{
|
||||
c.APIKey, c.Account, c.AccountGroup, c.Announcement, c.AnnouncementRead,
|
||||
c.AuthIdentity, c.AuthIdentityChannel, c.ErrorPassthroughRule, c.Group,
|
||||
c.AuthIdentity, c.AuthIdentityChannel, c.ChannelMonitor,
|
||||
c.ChannelMonitorDailyRollup, c.ChannelMonitorHistory,
|
||||
c.ChannelMonitorRequestTemplate, c.ErrorPassthroughRule, c.Group,
|
||||
c.IdempotencyRecord, c.IdentityAdoptionDecision, c.PaymentAuditLog,
|
||||
c.PaymentOrder, c.PaymentProviderInstance, c.PendingAuthSession, c.PromoCode,
|
||||
c.PromoCodeUsage, c.Proxy, c.RedeemCode, c.SecuritySecret, c.Setting,
|
||||
@@ -402,6 +430,14 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) {
|
||||
return c.AuthIdentity.mutate(ctx, m)
|
||||
case *AuthIdentityChannelMutation:
|
||||
return c.AuthIdentityChannel.mutate(ctx, m)
|
||||
case *ChannelMonitorMutation:
|
||||
return c.ChannelMonitor.mutate(ctx, m)
|
||||
case *ChannelMonitorDailyRollupMutation:
|
||||
return c.ChannelMonitorDailyRollup.mutate(ctx, m)
|
||||
case *ChannelMonitorHistoryMutation:
|
||||
return c.ChannelMonitorHistory.mutate(ctx, m)
|
||||
case *ChannelMonitorRequestTemplateMutation:
|
||||
return c.ChannelMonitorRequestTemplate.mutate(ctx, m)
|
||||
case *ErrorPassthroughRuleMutation:
|
||||
return c.ErrorPassthroughRule.mutate(ctx, m)
|
||||
case *GroupMutation:
|
||||
@@ -1595,6 +1631,634 @@ func (c *AuthIdentityChannelClient) mutate(ctx context.Context, m *AuthIdentityC
|
||||
}
|
||||
}
|
||||
|
||||
// ChannelMonitorClient is a client for the ChannelMonitor schema.
|
||||
type ChannelMonitorClient struct {
|
||||
config
|
||||
}
|
||||
|
||||
// NewChannelMonitorClient returns a client for the ChannelMonitor from the given config.
|
||||
func NewChannelMonitorClient(c config) *ChannelMonitorClient {
|
||||
return &ChannelMonitorClient{config: c}
|
||||
}
|
||||
|
||||
// Use adds a list of mutation hooks to the hooks stack.
|
||||
// A call to `Use(f, g, h)` equals to `channelmonitor.Hooks(f(g(h())))`.
|
||||
func (c *ChannelMonitorClient) Use(hooks ...Hook) {
|
||||
c.hooks.ChannelMonitor = append(c.hooks.ChannelMonitor, hooks...)
|
||||
}
|
||||
|
||||
// Intercept adds a list of query interceptors to the interceptors stack.
|
||||
// A call to `Intercept(f, g, h)` equals to `channelmonitor.Intercept(f(g(h())))`.
|
||||
func (c *ChannelMonitorClient) Intercept(interceptors ...Interceptor) {
|
||||
c.inters.ChannelMonitor = append(c.inters.ChannelMonitor, interceptors...)
|
||||
}
|
||||
|
||||
// Create returns a builder for creating a ChannelMonitor entity.
|
||||
func (c *ChannelMonitorClient) Create() *ChannelMonitorCreate {
|
||||
mutation := newChannelMonitorMutation(c.config, OpCreate)
|
||||
return &ChannelMonitorCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// CreateBulk returns a builder for creating a bulk of ChannelMonitor entities.
|
||||
func (c *ChannelMonitorClient) CreateBulk(builders ...*ChannelMonitorCreate) *ChannelMonitorCreateBulk {
|
||||
return &ChannelMonitorCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
|
||||
// a builder and applies setFunc on it.
|
||||
func (c *ChannelMonitorClient) MapCreateBulk(slice any, setFunc func(*ChannelMonitorCreate, int)) *ChannelMonitorCreateBulk {
|
||||
rv := reflect.ValueOf(slice)
|
||||
if rv.Kind() != reflect.Slice {
|
||||
return &ChannelMonitorCreateBulk{err: fmt.Errorf("calling to ChannelMonitorClient.MapCreateBulk with wrong type %T, need slice", slice)}
|
||||
}
|
||||
builders := make([]*ChannelMonitorCreate, rv.Len())
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
builders[i] = c.Create()
|
||||
setFunc(builders[i], i)
|
||||
}
|
||||
return &ChannelMonitorCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// Update returns an update builder for ChannelMonitor.
|
||||
func (c *ChannelMonitorClient) Update() *ChannelMonitorUpdate {
|
||||
mutation := newChannelMonitorMutation(c.config, OpUpdate)
|
||||
return &ChannelMonitorUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// UpdateOne returns an update builder for the given entity.
|
||||
func (c *ChannelMonitorClient) UpdateOne(_m *ChannelMonitor) *ChannelMonitorUpdateOne {
|
||||
mutation := newChannelMonitorMutation(c.config, OpUpdateOne, withChannelMonitor(_m))
|
||||
return &ChannelMonitorUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// UpdateOneID returns an update builder for the given id.
|
||||
func (c *ChannelMonitorClient) UpdateOneID(id int64) *ChannelMonitorUpdateOne {
|
||||
mutation := newChannelMonitorMutation(c.config, OpUpdateOne, withChannelMonitorID(id))
|
||||
return &ChannelMonitorUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// Delete returns a delete builder for ChannelMonitor.
|
||||
func (c *ChannelMonitorClient) Delete() *ChannelMonitorDelete {
|
||||
mutation := newChannelMonitorMutation(c.config, OpDelete)
|
||||
return &ChannelMonitorDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// DeleteOne returns a builder for deleting the given entity.
|
||||
func (c *ChannelMonitorClient) DeleteOne(_m *ChannelMonitor) *ChannelMonitorDeleteOne {
|
||||
return c.DeleteOneID(_m.ID)
|
||||
}
|
||||
|
||||
// DeleteOneID returns a builder for deleting the given entity by its id.
|
||||
func (c *ChannelMonitorClient) DeleteOneID(id int64) *ChannelMonitorDeleteOne {
|
||||
builder := c.Delete().Where(channelmonitor.ID(id))
|
||||
builder.mutation.id = &id
|
||||
builder.mutation.op = OpDeleteOne
|
||||
return &ChannelMonitorDeleteOne{builder}
|
||||
}
|
||||
|
||||
// Query returns a query builder for ChannelMonitor.
|
||||
func (c *ChannelMonitorClient) Query() *ChannelMonitorQuery {
|
||||
return &ChannelMonitorQuery{
|
||||
config: c.config,
|
||||
ctx: &QueryContext{Type: TypeChannelMonitor},
|
||||
inters: c.Interceptors(),
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a ChannelMonitor entity by its id.
|
||||
func (c *ChannelMonitorClient) Get(ctx context.Context, id int64) (*ChannelMonitor, error) {
|
||||
return c.Query().Where(channelmonitor.ID(id)).Only(ctx)
|
||||
}
|
||||
|
||||
// GetX is like Get, but panics if an error occurs.
|
||||
func (c *ChannelMonitorClient) GetX(ctx context.Context, id int64) *ChannelMonitor {
|
||||
obj, err := c.Get(ctx, id)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return obj
|
||||
}
|
||||
|
||||
// QueryHistory queries the history edge of a ChannelMonitor.
|
||||
func (c *ChannelMonitorClient) QueryHistory(_m *ChannelMonitor) *ChannelMonitorHistoryQuery {
|
||||
query := (&ChannelMonitorHistoryClient{config: c.config}).Query()
|
||||
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
|
||||
id := _m.ID
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(channelmonitor.Table, channelmonitor.FieldID, id),
|
||||
sqlgraph.To(channelmonitorhistory.Table, channelmonitorhistory.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, channelmonitor.HistoryTable, channelmonitor.HistoryColumn),
|
||||
)
|
||||
fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step)
|
||||
return fromV, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// QueryDailyRollups queries the daily_rollups edge of a ChannelMonitor.
|
||||
func (c *ChannelMonitorClient) QueryDailyRollups(_m *ChannelMonitor) *ChannelMonitorDailyRollupQuery {
|
||||
query := (&ChannelMonitorDailyRollupClient{config: c.config}).Query()
|
||||
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
|
||||
id := _m.ID
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(channelmonitor.Table, channelmonitor.FieldID, id),
|
||||
sqlgraph.To(channelmonitordailyrollup.Table, channelmonitordailyrollup.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, channelmonitor.DailyRollupsTable, channelmonitor.DailyRollupsColumn),
|
||||
)
|
||||
fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step)
|
||||
return fromV, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// QueryRequestTemplate queries the request_template edge of a ChannelMonitor.
|
||||
func (c *ChannelMonitorClient) QueryRequestTemplate(_m *ChannelMonitor) *ChannelMonitorRequestTemplateQuery {
|
||||
query := (&ChannelMonitorRequestTemplateClient{config: c.config}).Query()
|
||||
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
|
||||
id := _m.ID
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(channelmonitor.Table, channelmonitor.FieldID, id),
|
||||
sqlgraph.To(channelmonitorrequesttemplate.Table, channelmonitorrequesttemplate.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, false, channelmonitor.RequestTemplateTable, channelmonitor.RequestTemplateColumn),
|
||||
)
|
||||
fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step)
|
||||
return fromV, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// Hooks returns the client hooks.
|
||||
func (c *ChannelMonitorClient) Hooks() []Hook {
|
||||
return c.hooks.ChannelMonitor
|
||||
}
|
||||
|
||||
// Interceptors returns the client interceptors.
|
||||
func (c *ChannelMonitorClient) Interceptors() []Interceptor {
|
||||
return c.inters.ChannelMonitor
|
||||
}
|
||||
|
||||
func (c *ChannelMonitorClient) mutate(ctx context.Context, m *ChannelMonitorMutation) (Value, error) {
|
||||
switch m.Op() {
|
||||
case OpCreate:
|
||||
return (&ChannelMonitorCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
case OpUpdate:
|
||||
return (&ChannelMonitorUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
case OpUpdateOne:
|
||||
return (&ChannelMonitorUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
case OpDelete, OpDeleteOne:
|
||||
return (&ChannelMonitorDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
|
||||
default:
|
||||
return nil, fmt.Errorf("ent: unknown ChannelMonitor mutation op: %q", m.Op())
|
||||
}
|
||||
}
|
||||
|
||||
// ChannelMonitorDailyRollupClient is a client for the ChannelMonitorDailyRollup schema.
|
||||
type ChannelMonitorDailyRollupClient struct {
|
||||
config
|
||||
}
|
||||
|
||||
// NewChannelMonitorDailyRollupClient returns a client for the ChannelMonitorDailyRollup from the given config.
|
||||
func NewChannelMonitorDailyRollupClient(c config) *ChannelMonitorDailyRollupClient {
|
||||
return &ChannelMonitorDailyRollupClient{config: c}
|
||||
}
|
||||
|
||||
// Use adds a list of mutation hooks to the hooks stack.
|
||||
// A call to `Use(f, g, h)` equals to `channelmonitordailyrollup.Hooks(f(g(h())))`.
|
||||
func (c *ChannelMonitorDailyRollupClient) Use(hooks ...Hook) {
|
||||
c.hooks.ChannelMonitorDailyRollup = append(c.hooks.ChannelMonitorDailyRollup, hooks...)
|
||||
}
|
||||
|
||||
// Intercept adds a list of query interceptors to the interceptors stack.
|
||||
// A call to `Intercept(f, g, h)` equals to `channelmonitordailyrollup.Intercept(f(g(h())))`.
|
||||
func (c *ChannelMonitorDailyRollupClient) Intercept(interceptors ...Interceptor) {
|
||||
c.inters.ChannelMonitorDailyRollup = append(c.inters.ChannelMonitorDailyRollup, interceptors...)
|
||||
}
|
||||
|
||||
// Create returns a builder for creating a ChannelMonitorDailyRollup entity.
|
||||
func (c *ChannelMonitorDailyRollupClient) Create() *ChannelMonitorDailyRollupCreate {
|
||||
mutation := newChannelMonitorDailyRollupMutation(c.config, OpCreate)
|
||||
return &ChannelMonitorDailyRollupCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// CreateBulk returns a builder for creating a bulk of ChannelMonitorDailyRollup entities.
|
||||
func (c *ChannelMonitorDailyRollupClient) CreateBulk(builders ...*ChannelMonitorDailyRollupCreate) *ChannelMonitorDailyRollupCreateBulk {
|
||||
return &ChannelMonitorDailyRollupCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
|
||||
// a builder and applies setFunc on it.
|
||||
func (c *ChannelMonitorDailyRollupClient) MapCreateBulk(slice any, setFunc func(*ChannelMonitorDailyRollupCreate, int)) *ChannelMonitorDailyRollupCreateBulk {
|
||||
rv := reflect.ValueOf(slice)
|
||||
if rv.Kind() != reflect.Slice {
|
||||
return &ChannelMonitorDailyRollupCreateBulk{err: fmt.Errorf("calling to ChannelMonitorDailyRollupClient.MapCreateBulk with wrong type %T, need slice", slice)}
|
||||
}
|
||||
builders := make([]*ChannelMonitorDailyRollupCreate, rv.Len())
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
builders[i] = c.Create()
|
||||
setFunc(builders[i], i)
|
||||
}
|
||||
return &ChannelMonitorDailyRollupCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// Update returns an update builder for ChannelMonitorDailyRollup.
|
||||
func (c *ChannelMonitorDailyRollupClient) Update() *ChannelMonitorDailyRollupUpdate {
|
||||
mutation := newChannelMonitorDailyRollupMutation(c.config, OpUpdate)
|
||||
return &ChannelMonitorDailyRollupUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// UpdateOne returns an update builder for the given entity.
|
||||
func (c *ChannelMonitorDailyRollupClient) UpdateOne(_m *ChannelMonitorDailyRollup) *ChannelMonitorDailyRollupUpdateOne {
|
||||
mutation := newChannelMonitorDailyRollupMutation(c.config, OpUpdateOne, withChannelMonitorDailyRollup(_m))
|
||||
return &ChannelMonitorDailyRollupUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// UpdateOneID returns an update builder for the given id.
|
||||
func (c *ChannelMonitorDailyRollupClient) UpdateOneID(id int64) *ChannelMonitorDailyRollupUpdateOne {
|
||||
mutation := newChannelMonitorDailyRollupMutation(c.config, OpUpdateOne, withChannelMonitorDailyRollupID(id))
|
||||
return &ChannelMonitorDailyRollupUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// Delete returns a delete builder for ChannelMonitorDailyRollup.
|
||||
func (c *ChannelMonitorDailyRollupClient) Delete() *ChannelMonitorDailyRollupDelete {
|
||||
mutation := newChannelMonitorDailyRollupMutation(c.config, OpDelete)
|
||||
return &ChannelMonitorDailyRollupDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// DeleteOne returns a builder for deleting the given entity.
|
||||
func (c *ChannelMonitorDailyRollupClient) DeleteOne(_m *ChannelMonitorDailyRollup) *ChannelMonitorDailyRollupDeleteOne {
|
||||
return c.DeleteOneID(_m.ID)
|
||||
}
|
||||
|
||||
// DeleteOneID returns a builder for deleting the given entity by its id.
|
||||
func (c *ChannelMonitorDailyRollupClient) DeleteOneID(id int64) *ChannelMonitorDailyRollupDeleteOne {
|
||||
builder := c.Delete().Where(channelmonitordailyrollup.ID(id))
|
||||
builder.mutation.id = &id
|
||||
builder.mutation.op = OpDeleteOne
|
||||
return &ChannelMonitorDailyRollupDeleteOne{builder}
|
||||
}
|
||||
|
||||
// Query returns a query builder for ChannelMonitorDailyRollup.
|
||||
func (c *ChannelMonitorDailyRollupClient) Query() *ChannelMonitorDailyRollupQuery {
|
||||
return &ChannelMonitorDailyRollupQuery{
|
||||
config: c.config,
|
||||
ctx: &QueryContext{Type: TypeChannelMonitorDailyRollup},
|
||||
inters: c.Interceptors(),
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a ChannelMonitorDailyRollup entity by its id.
|
||||
func (c *ChannelMonitorDailyRollupClient) Get(ctx context.Context, id int64) (*ChannelMonitorDailyRollup, error) {
|
||||
return c.Query().Where(channelmonitordailyrollup.ID(id)).Only(ctx)
|
||||
}
|
||||
|
||||
// GetX is like Get, but panics if an error occurs.
|
||||
func (c *ChannelMonitorDailyRollupClient) GetX(ctx context.Context, id int64) *ChannelMonitorDailyRollup {
|
||||
obj, err := c.Get(ctx, id)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return obj
|
||||
}
|
||||
|
||||
// QueryMonitor queries the monitor edge of a ChannelMonitorDailyRollup.
|
||||
func (c *ChannelMonitorDailyRollupClient) QueryMonitor(_m *ChannelMonitorDailyRollup) *ChannelMonitorQuery {
|
||||
query := (&ChannelMonitorClient{config: c.config}).Query()
|
||||
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
|
||||
id := _m.ID
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(channelmonitordailyrollup.Table, channelmonitordailyrollup.FieldID, id),
|
||||
sqlgraph.To(channelmonitor.Table, channelmonitor.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, channelmonitordailyrollup.MonitorTable, channelmonitordailyrollup.MonitorColumn),
|
||||
)
|
||||
fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step)
|
||||
return fromV, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// Hooks returns the client hooks.
|
||||
func (c *ChannelMonitorDailyRollupClient) Hooks() []Hook {
|
||||
return c.hooks.ChannelMonitorDailyRollup
|
||||
}
|
||||
|
||||
// Interceptors returns the client interceptors.
|
||||
func (c *ChannelMonitorDailyRollupClient) Interceptors() []Interceptor {
|
||||
return c.inters.ChannelMonitorDailyRollup
|
||||
}
|
||||
|
||||
func (c *ChannelMonitorDailyRollupClient) mutate(ctx context.Context, m *ChannelMonitorDailyRollupMutation) (Value, error) {
|
||||
switch m.Op() {
|
||||
case OpCreate:
|
||||
return (&ChannelMonitorDailyRollupCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
case OpUpdate:
|
||||
return (&ChannelMonitorDailyRollupUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
case OpUpdateOne:
|
||||
return (&ChannelMonitorDailyRollupUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
case OpDelete, OpDeleteOne:
|
||||
return (&ChannelMonitorDailyRollupDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
|
||||
default:
|
||||
return nil, fmt.Errorf("ent: unknown ChannelMonitorDailyRollup mutation op: %q", m.Op())
|
||||
}
|
||||
}
|
||||
|
||||
// ChannelMonitorHistoryClient is a client for the ChannelMonitorHistory schema.
|
||||
type ChannelMonitorHistoryClient struct {
|
||||
config
|
||||
}
|
||||
|
||||
// NewChannelMonitorHistoryClient returns a client for the ChannelMonitorHistory from the given config.
|
||||
func NewChannelMonitorHistoryClient(c config) *ChannelMonitorHistoryClient {
|
||||
return &ChannelMonitorHistoryClient{config: c}
|
||||
}
|
||||
|
||||
// Use adds a list of mutation hooks to the hooks stack.
|
||||
// A call to `Use(f, g, h)` equals to `channelmonitorhistory.Hooks(f(g(h())))`.
|
||||
func (c *ChannelMonitorHistoryClient) Use(hooks ...Hook) {
|
||||
c.hooks.ChannelMonitorHistory = append(c.hooks.ChannelMonitorHistory, hooks...)
|
||||
}
|
||||
|
||||
// Intercept adds a list of query interceptors to the interceptors stack.
|
||||
// A call to `Intercept(f, g, h)` equals to `channelmonitorhistory.Intercept(f(g(h())))`.
|
||||
func (c *ChannelMonitorHistoryClient) Intercept(interceptors ...Interceptor) {
|
||||
c.inters.ChannelMonitorHistory = append(c.inters.ChannelMonitorHistory, interceptors...)
|
||||
}
|
||||
|
||||
// Create returns a builder for creating a ChannelMonitorHistory entity.
|
||||
func (c *ChannelMonitorHistoryClient) Create() *ChannelMonitorHistoryCreate {
|
||||
mutation := newChannelMonitorHistoryMutation(c.config, OpCreate)
|
||||
return &ChannelMonitorHistoryCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// CreateBulk returns a builder for creating a bulk of ChannelMonitorHistory entities.
|
||||
func (c *ChannelMonitorHistoryClient) CreateBulk(builders ...*ChannelMonitorHistoryCreate) *ChannelMonitorHistoryCreateBulk {
|
||||
return &ChannelMonitorHistoryCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
|
||||
// a builder and applies setFunc on it.
|
||||
func (c *ChannelMonitorHistoryClient) MapCreateBulk(slice any, setFunc func(*ChannelMonitorHistoryCreate, int)) *ChannelMonitorHistoryCreateBulk {
|
||||
rv := reflect.ValueOf(slice)
|
||||
if rv.Kind() != reflect.Slice {
|
||||
return &ChannelMonitorHistoryCreateBulk{err: fmt.Errorf("calling to ChannelMonitorHistoryClient.MapCreateBulk with wrong type %T, need slice", slice)}
|
||||
}
|
||||
builders := make([]*ChannelMonitorHistoryCreate, rv.Len())
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
builders[i] = c.Create()
|
||||
setFunc(builders[i], i)
|
||||
}
|
||||
return &ChannelMonitorHistoryCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// Update returns an update builder for ChannelMonitorHistory.
|
||||
func (c *ChannelMonitorHistoryClient) Update() *ChannelMonitorHistoryUpdate {
|
||||
mutation := newChannelMonitorHistoryMutation(c.config, OpUpdate)
|
||||
return &ChannelMonitorHistoryUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// UpdateOne returns an update builder for the given entity.
|
||||
func (c *ChannelMonitorHistoryClient) UpdateOne(_m *ChannelMonitorHistory) *ChannelMonitorHistoryUpdateOne {
|
||||
mutation := newChannelMonitorHistoryMutation(c.config, OpUpdateOne, withChannelMonitorHistory(_m))
|
||||
return &ChannelMonitorHistoryUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// UpdateOneID returns an update builder for the given id.
|
||||
func (c *ChannelMonitorHistoryClient) UpdateOneID(id int64) *ChannelMonitorHistoryUpdateOne {
|
||||
mutation := newChannelMonitorHistoryMutation(c.config, OpUpdateOne, withChannelMonitorHistoryID(id))
|
||||
return &ChannelMonitorHistoryUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// Delete returns a delete builder for ChannelMonitorHistory.
|
||||
func (c *ChannelMonitorHistoryClient) Delete() *ChannelMonitorHistoryDelete {
|
||||
mutation := newChannelMonitorHistoryMutation(c.config, OpDelete)
|
||||
return &ChannelMonitorHistoryDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// DeleteOne returns a builder for deleting the given entity.
|
||||
func (c *ChannelMonitorHistoryClient) DeleteOne(_m *ChannelMonitorHistory) *ChannelMonitorHistoryDeleteOne {
|
||||
return c.DeleteOneID(_m.ID)
|
||||
}
|
||||
|
||||
// DeleteOneID returns a builder for deleting the given entity by its id.
|
||||
func (c *ChannelMonitorHistoryClient) DeleteOneID(id int64) *ChannelMonitorHistoryDeleteOne {
|
||||
builder := c.Delete().Where(channelmonitorhistory.ID(id))
|
||||
builder.mutation.id = &id
|
||||
builder.mutation.op = OpDeleteOne
|
||||
return &ChannelMonitorHistoryDeleteOne{builder}
|
||||
}
|
||||
|
||||
// Query returns a query builder for ChannelMonitorHistory.
|
||||
func (c *ChannelMonitorHistoryClient) Query() *ChannelMonitorHistoryQuery {
|
||||
return &ChannelMonitorHistoryQuery{
|
||||
config: c.config,
|
||||
ctx: &QueryContext{Type: TypeChannelMonitorHistory},
|
||||
inters: c.Interceptors(),
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a ChannelMonitorHistory entity by its id.
|
||||
func (c *ChannelMonitorHistoryClient) Get(ctx context.Context, id int64) (*ChannelMonitorHistory, error) {
|
||||
return c.Query().Where(channelmonitorhistory.ID(id)).Only(ctx)
|
||||
}
|
||||
|
||||
// GetX is like Get, but panics if an error occurs.
|
||||
func (c *ChannelMonitorHistoryClient) GetX(ctx context.Context, id int64) *ChannelMonitorHistory {
|
||||
obj, err := c.Get(ctx, id)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return obj
|
||||
}
|
||||
|
||||
// QueryMonitor queries the monitor edge of a ChannelMonitorHistory.
|
||||
func (c *ChannelMonitorHistoryClient) QueryMonitor(_m *ChannelMonitorHistory) *ChannelMonitorQuery {
|
||||
query := (&ChannelMonitorClient{config: c.config}).Query()
|
||||
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
|
||||
id := _m.ID
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(channelmonitorhistory.Table, channelmonitorhistory.FieldID, id),
|
||||
sqlgraph.To(channelmonitor.Table, channelmonitor.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, channelmonitorhistory.MonitorTable, channelmonitorhistory.MonitorColumn),
|
||||
)
|
||||
fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step)
|
||||
return fromV, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// Hooks returns the client hooks.
|
||||
func (c *ChannelMonitorHistoryClient) Hooks() []Hook {
|
||||
return c.hooks.ChannelMonitorHistory
|
||||
}
|
||||
|
||||
// Interceptors returns the client interceptors.
|
||||
func (c *ChannelMonitorHistoryClient) Interceptors() []Interceptor {
|
||||
return c.inters.ChannelMonitorHistory
|
||||
}
|
||||
|
||||
func (c *ChannelMonitorHistoryClient) mutate(ctx context.Context, m *ChannelMonitorHistoryMutation) (Value, error) {
|
||||
switch m.Op() {
|
||||
case OpCreate:
|
||||
return (&ChannelMonitorHistoryCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
case OpUpdate:
|
||||
return (&ChannelMonitorHistoryUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
case OpUpdateOne:
|
||||
return (&ChannelMonitorHistoryUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
case OpDelete, OpDeleteOne:
|
||||
return (&ChannelMonitorHistoryDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
|
||||
default:
|
||||
return nil, fmt.Errorf("ent: unknown ChannelMonitorHistory mutation op: %q", m.Op())
|
||||
}
|
||||
}
|
||||
|
||||
// ChannelMonitorRequestTemplateClient is a client for the ChannelMonitorRequestTemplate schema.
|
||||
type ChannelMonitorRequestTemplateClient struct {
|
||||
config
|
||||
}
|
||||
|
||||
// NewChannelMonitorRequestTemplateClient returns a client for the ChannelMonitorRequestTemplate from the given config.
|
||||
func NewChannelMonitorRequestTemplateClient(c config) *ChannelMonitorRequestTemplateClient {
|
||||
return &ChannelMonitorRequestTemplateClient{config: c}
|
||||
}
|
||||
|
||||
// Use adds a list of mutation hooks to the hooks stack.
|
||||
// A call to `Use(f, g, h)` equals to `channelmonitorrequesttemplate.Hooks(f(g(h())))`.
|
||||
func (c *ChannelMonitorRequestTemplateClient) Use(hooks ...Hook) {
|
||||
c.hooks.ChannelMonitorRequestTemplate = append(c.hooks.ChannelMonitorRequestTemplate, hooks...)
|
||||
}
|
||||
|
||||
// Intercept adds a list of query interceptors to the interceptors stack.
|
||||
// A call to `Intercept(f, g, h)` equals to `channelmonitorrequesttemplate.Intercept(f(g(h())))`.
|
||||
func (c *ChannelMonitorRequestTemplateClient) Intercept(interceptors ...Interceptor) {
|
||||
c.inters.ChannelMonitorRequestTemplate = append(c.inters.ChannelMonitorRequestTemplate, interceptors...)
|
||||
}
|
||||
|
||||
// Create returns a builder for creating a ChannelMonitorRequestTemplate entity.
|
||||
func (c *ChannelMonitorRequestTemplateClient) Create() *ChannelMonitorRequestTemplateCreate {
|
||||
mutation := newChannelMonitorRequestTemplateMutation(c.config, OpCreate)
|
||||
return &ChannelMonitorRequestTemplateCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// CreateBulk returns a builder for creating a bulk of ChannelMonitorRequestTemplate entities.
|
||||
func (c *ChannelMonitorRequestTemplateClient) CreateBulk(builders ...*ChannelMonitorRequestTemplateCreate) *ChannelMonitorRequestTemplateCreateBulk {
|
||||
return &ChannelMonitorRequestTemplateCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
|
||||
// a builder and applies setFunc on it.
|
||||
func (c *ChannelMonitorRequestTemplateClient) MapCreateBulk(slice any, setFunc func(*ChannelMonitorRequestTemplateCreate, int)) *ChannelMonitorRequestTemplateCreateBulk {
|
||||
rv := reflect.ValueOf(slice)
|
||||
if rv.Kind() != reflect.Slice {
|
||||
return &ChannelMonitorRequestTemplateCreateBulk{err: fmt.Errorf("calling to ChannelMonitorRequestTemplateClient.MapCreateBulk with wrong type %T, need slice", slice)}
|
||||
}
|
||||
builders := make([]*ChannelMonitorRequestTemplateCreate, rv.Len())
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
builders[i] = c.Create()
|
||||
setFunc(builders[i], i)
|
||||
}
|
||||
return &ChannelMonitorRequestTemplateCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// Update returns an update builder for ChannelMonitorRequestTemplate.
|
||||
func (c *ChannelMonitorRequestTemplateClient) Update() *ChannelMonitorRequestTemplateUpdate {
|
||||
mutation := newChannelMonitorRequestTemplateMutation(c.config, OpUpdate)
|
||||
return &ChannelMonitorRequestTemplateUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// UpdateOne returns an update builder for the given entity.
|
||||
func (c *ChannelMonitorRequestTemplateClient) UpdateOne(_m *ChannelMonitorRequestTemplate) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
mutation := newChannelMonitorRequestTemplateMutation(c.config, OpUpdateOne, withChannelMonitorRequestTemplate(_m))
|
||||
return &ChannelMonitorRequestTemplateUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// UpdateOneID returns an update builder for the given id.
|
||||
func (c *ChannelMonitorRequestTemplateClient) UpdateOneID(id int64) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
mutation := newChannelMonitorRequestTemplateMutation(c.config, OpUpdateOne, withChannelMonitorRequestTemplateID(id))
|
||||
return &ChannelMonitorRequestTemplateUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// Delete returns a delete builder for ChannelMonitorRequestTemplate.
|
||||
func (c *ChannelMonitorRequestTemplateClient) Delete() *ChannelMonitorRequestTemplateDelete {
|
||||
mutation := newChannelMonitorRequestTemplateMutation(c.config, OpDelete)
|
||||
return &ChannelMonitorRequestTemplateDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// DeleteOne returns a builder for deleting the given entity.
|
||||
func (c *ChannelMonitorRequestTemplateClient) DeleteOne(_m *ChannelMonitorRequestTemplate) *ChannelMonitorRequestTemplateDeleteOne {
|
||||
return c.DeleteOneID(_m.ID)
|
||||
}
|
||||
|
||||
// DeleteOneID returns a builder for deleting the given entity by its id.
|
||||
func (c *ChannelMonitorRequestTemplateClient) DeleteOneID(id int64) *ChannelMonitorRequestTemplateDeleteOne {
|
||||
builder := c.Delete().Where(channelmonitorrequesttemplate.ID(id))
|
||||
builder.mutation.id = &id
|
||||
builder.mutation.op = OpDeleteOne
|
||||
return &ChannelMonitorRequestTemplateDeleteOne{builder}
|
||||
}
|
||||
|
||||
// Query returns a query builder for ChannelMonitorRequestTemplate.
|
||||
func (c *ChannelMonitorRequestTemplateClient) Query() *ChannelMonitorRequestTemplateQuery {
|
||||
return &ChannelMonitorRequestTemplateQuery{
|
||||
config: c.config,
|
||||
ctx: &QueryContext{Type: TypeChannelMonitorRequestTemplate},
|
||||
inters: c.Interceptors(),
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a ChannelMonitorRequestTemplate entity by its id.
|
||||
func (c *ChannelMonitorRequestTemplateClient) Get(ctx context.Context, id int64) (*ChannelMonitorRequestTemplate, error) {
|
||||
return c.Query().Where(channelmonitorrequesttemplate.ID(id)).Only(ctx)
|
||||
}
|
||||
|
||||
// GetX is like Get, but panics if an error occurs.
|
||||
func (c *ChannelMonitorRequestTemplateClient) GetX(ctx context.Context, id int64) *ChannelMonitorRequestTemplate {
|
||||
obj, err := c.Get(ctx, id)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return obj
|
||||
}
|
||||
|
||||
// QueryMonitors queries the monitors edge of a ChannelMonitorRequestTemplate.
|
||||
func (c *ChannelMonitorRequestTemplateClient) QueryMonitors(_m *ChannelMonitorRequestTemplate) *ChannelMonitorQuery {
|
||||
query := (&ChannelMonitorClient{config: c.config}).Query()
|
||||
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
|
||||
id := _m.ID
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(channelmonitorrequesttemplate.Table, channelmonitorrequesttemplate.FieldID, id),
|
||||
sqlgraph.To(channelmonitor.Table, channelmonitor.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, true, channelmonitorrequesttemplate.MonitorsTable, channelmonitorrequesttemplate.MonitorsColumn),
|
||||
)
|
||||
fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step)
|
||||
return fromV, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// Hooks returns the client hooks.
|
||||
func (c *ChannelMonitorRequestTemplateClient) Hooks() []Hook {
|
||||
return c.hooks.ChannelMonitorRequestTemplate
|
||||
}
|
||||
|
||||
// Interceptors returns the client interceptors.
|
||||
func (c *ChannelMonitorRequestTemplateClient) Interceptors() []Interceptor {
|
||||
return c.inters.ChannelMonitorRequestTemplate
|
||||
}
|
||||
|
||||
func (c *ChannelMonitorRequestTemplateClient) mutate(ctx context.Context, m *ChannelMonitorRequestTemplateMutation) (Value, error) {
|
||||
switch m.Op() {
|
||||
case OpCreate:
|
||||
return (&ChannelMonitorRequestTemplateCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
case OpUpdate:
|
||||
return (&ChannelMonitorRequestTemplateUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
case OpUpdateOne:
|
||||
return (&ChannelMonitorRequestTemplateUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
case OpDelete, OpDeleteOne:
|
||||
return (&ChannelMonitorRequestTemplateDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
|
||||
default:
|
||||
return nil, fmt.Errorf("ent: unknown ChannelMonitorRequestTemplate mutation op: %q", m.Op())
|
||||
}
|
||||
}
|
||||
|
||||
// ErrorPassthroughRuleClient is a client for the ErrorPassthroughRule schema.
|
||||
type ErrorPassthroughRuleClient struct {
|
||||
config
|
||||
@@ -5355,21 +6019,23 @@ func (c *UserSubscriptionClient) mutate(ctx context.Context, m *UserSubscription
|
||||
type (
|
||||
hooks struct {
|
||||
APIKey, Account, AccountGroup, Announcement, AnnouncementRead, AuthIdentity,
|
||||
AuthIdentityChannel, ErrorPassthroughRule, Group, IdempotencyRecord,
|
||||
IdentityAdoptionDecision, PaymentAuditLog, PaymentOrder,
|
||||
PaymentProviderInstance, PendingAuthSession, PromoCode, PromoCodeUsage, Proxy,
|
||||
RedeemCode, SecuritySecret, Setting, SubscriptionPlan, TLSFingerprintProfile,
|
||||
UsageCleanupTask, UsageLog, User, UserAllowedGroup, UserAttributeDefinition,
|
||||
UserAttributeValue, UserSubscription []ent.Hook
|
||||
AuthIdentityChannel, ChannelMonitor, ChannelMonitorDailyRollup,
|
||||
ChannelMonitorHistory, ChannelMonitorRequestTemplate, ErrorPassthroughRule,
|
||||
Group, IdempotencyRecord, IdentityAdoptionDecision, PaymentAuditLog,
|
||||
PaymentOrder, PaymentProviderInstance, PendingAuthSession, PromoCode,
|
||||
PromoCodeUsage, Proxy, RedeemCode, SecuritySecret, Setting, SubscriptionPlan,
|
||||
TLSFingerprintProfile, UsageCleanupTask, UsageLog, User, UserAllowedGroup,
|
||||
UserAttributeDefinition, UserAttributeValue, UserSubscription []ent.Hook
|
||||
}
|
||||
inters struct {
|
||||
APIKey, Account, AccountGroup, Announcement, AnnouncementRead, AuthIdentity,
|
||||
AuthIdentityChannel, ErrorPassthroughRule, Group, IdempotencyRecord,
|
||||
IdentityAdoptionDecision, PaymentAuditLog, PaymentOrder,
|
||||
PaymentProviderInstance, PendingAuthSession, PromoCode, PromoCodeUsage, Proxy,
|
||||
RedeemCode, SecuritySecret, Setting, SubscriptionPlan, TLSFingerprintProfile,
|
||||
UsageCleanupTask, UsageLog, User, UserAllowedGroup, UserAttributeDefinition,
|
||||
UserAttributeValue, UserSubscription []ent.Interceptor
|
||||
AuthIdentityChannel, ChannelMonitor, ChannelMonitorDailyRollup,
|
||||
ChannelMonitorHistory, ChannelMonitorRequestTemplate, ErrorPassthroughRule,
|
||||
Group, IdempotencyRecord, IdentityAdoptionDecision, PaymentAuditLog,
|
||||
PaymentOrder, PaymentProviderInstance, PendingAuthSession, PromoCode,
|
||||
PromoCodeUsage, Proxy, RedeemCode, SecuritySecret, Setting, SubscriptionPlan,
|
||||
TLSFingerprintProfile, UsageCleanupTask, UsageLog, User, UserAllowedGroup,
|
||||
UserAttributeDefinition, UserAttributeValue, UserSubscription []ent.Interceptor
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@@ -19,6 +19,10 @@ import (
|
||||
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
||||
"github.com/Wei-Shaw/sub2api/ent/authidentity"
|
||||
"github.com/Wei-Shaw/sub2api/ent/authidentitychannel"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorrequesttemplate"
|
||||
"github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule"
|
||||
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||
"github.com/Wei-Shaw/sub2api/ent/idempotencyrecord"
|
||||
@@ -102,36 +106,40 @@ var (
|
||||
func checkColumn(t, c string) error {
|
||||
initCheck.Do(func() {
|
||||
columnCheck = sql.NewColumnCheck(map[string]func(string) bool{
|
||||
apikey.Table: apikey.ValidColumn,
|
||||
account.Table: account.ValidColumn,
|
||||
accountgroup.Table: accountgroup.ValidColumn,
|
||||
announcement.Table: announcement.ValidColumn,
|
||||
announcementread.Table: announcementread.ValidColumn,
|
||||
authidentity.Table: authidentity.ValidColumn,
|
||||
authidentitychannel.Table: authidentitychannel.ValidColumn,
|
||||
errorpassthroughrule.Table: errorpassthroughrule.ValidColumn,
|
||||
group.Table: group.ValidColumn,
|
||||
idempotencyrecord.Table: idempotencyrecord.ValidColumn,
|
||||
identityadoptiondecision.Table: identityadoptiondecision.ValidColumn,
|
||||
paymentauditlog.Table: paymentauditlog.ValidColumn,
|
||||
paymentorder.Table: paymentorder.ValidColumn,
|
||||
paymentproviderinstance.Table: paymentproviderinstance.ValidColumn,
|
||||
pendingauthsession.Table: pendingauthsession.ValidColumn,
|
||||
promocode.Table: promocode.ValidColumn,
|
||||
promocodeusage.Table: promocodeusage.ValidColumn,
|
||||
proxy.Table: proxy.ValidColumn,
|
||||
redeemcode.Table: redeemcode.ValidColumn,
|
||||
securitysecret.Table: securitysecret.ValidColumn,
|
||||
setting.Table: setting.ValidColumn,
|
||||
subscriptionplan.Table: subscriptionplan.ValidColumn,
|
||||
tlsfingerprintprofile.Table: tlsfingerprintprofile.ValidColumn,
|
||||
usagecleanuptask.Table: usagecleanuptask.ValidColumn,
|
||||
usagelog.Table: usagelog.ValidColumn,
|
||||
user.Table: user.ValidColumn,
|
||||
userallowedgroup.Table: userallowedgroup.ValidColumn,
|
||||
userattributedefinition.Table: userattributedefinition.ValidColumn,
|
||||
userattributevalue.Table: userattributevalue.ValidColumn,
|
||||
usersubscription.Table: usersubscription.ValidColumn,
|
||||
apikey.Table: apikey.ValidColumn,
|
||||
account.Table: account.ValidColumn,
|
||||
accountgroup.Table: accountgroup.ValidColumn,
|
||||
announcement.Table: announcement.ValidColumn,
|
||||
announcementread.Table: announcementread.ValidColumn,
|
||||
authidentity.Table: authidentity.ValidColumn,
|
||||
authidentitychannel.Table: authidentitychannel.ValidColumn,
|
||||
channelmonitor.Table: channelmonitor.ValidColumn,
|
||||
channelmonitordailyrollup.Table: channelmonitordailyrollup.ValidColumn,
|
||||
channelmonitorhistory.Table: channelmonitorhistory.ValidColumn,
|
||||
channelmonitorrequesttemplate.Table: channelmonitorrequesttemplate.ValidColumn,
|
||||
errorpassthroughrule.Table: errorpassthroughrule.ValidColumn,
|
||||
group.Table: group.ValidColumn,
|
||||
idempotencyrecord.Table: idempotencyrecord.ValidColumn,
|
||||
identityadoptiondecision.Table: identityadoptiondecision.ValidColumn,
|
||||
paymentauditlog.Table: paymentauditlog.ValidColumn,
|
||||
paymentorder.Table: paymentorder.ValidColumn,
|
||||
paymentproviderinstance.Table: paymentproviderinstance.ValidColumn,
|
||||
pendingauthsession.Table: pendingauthsession.ValidColumn,
|
||||
promocode.Table: promocode.ValidColumn,
|
||||
promocodeusage.Table: promocodeusage.ValidColumn,
|
||||
proxy.Table: proxy.ValidColumn,
|
||||
redeemcode.Table: redeemcode.ValidColumn,
|
||||
securitysecret.Table: securitysecret.ValidColumn,
|
||||
setting.Table: setting.ValidColumn,
|
||||
subscriptionplan.Table: subscriptionplan.ValidColumn,
|
||||
tlsfingerprintprofile.Table: tlsfingerprintprofile.ValidColumn,
|
||||
usagecleanuptask.Table: usagecleanuptask.ValidColumn,
|
||||
usagelog.Table: usagelog.ValidColumn,
|
||||
user.Table: user.ValidColumn,
|
||||
userallowedgroup.Table: userallowedgroup.ValidColumn,
|
||||
userattributedefinition.Table: userattributedefinition.ValidColumn,
|
||||
userattributevalue.Table: userattributevalue.ValidColumn,
|
||||
usersubscription.Table: usersubscription.ValidColumn,
|
||||
})
|
||||
})
|
||||
return columnCheck(t, c)
|
||||
|
||||
@@ -93,6 +93,54 @@ func (f AuthIdentityChannelFunc) Mutate(ctx context.Context, m ent.Mutation) (en
|
||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AuthIdentityChannelMutation", m)
|
||||
}
|
||||
|
||||
// The ChannelMonitorFunc type is an adapter to allow the use of ordinary
|
||||
// function as ChannelMonitor mutator.
|
||||
type ChannelMonitorFunc func(context.Context, *ent.ChannelMonitorMutation) (ent.Value, error)
|
||||
|
||||
// Mutate calls f(ctx, m).
|
||||
func (f ChannelMonitorFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||
if mv, ok := m.(*ent.ChannelMonitorMutation); ok {
|
||||
return f(ctx, mv)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ChannelMonitorMutation", m)
|
||||
}
|
||||
|
||||
// The ChannelMonitorDailyRollupFunc type is an adapter to allow the use of ordinary
|
||||
// function as ChannelMonitorDailyRollup mutator.
|
||||
type ChannelMonitorDailyRollupFunc func(context.Context, *ent.ChannelMonitorDailyRollupMutation) (ent.Value, error)
|
||||
|
||||
// Mutate calls f(ctx, m).
|
||||
func (f ChannelMonitorDailyRollupFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||
if mv, ok := m.(*ent.ChannelMonitorDailyRollupMutation); ok {
|
||||
return f(ctx, mv)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ChannelMonitorDailyRollupMutation", m)
|
||||
}
|
||||
|
||||
// The ChannelMonitorHistoryFunc type is an adapter to allow the use of ordinary
|
||||
// function as ChannelMonitorHistory mutator.
|
||||
type ChannelMonitorHistoryFunc func(context.Context, *ent.ChannelMonitorHistoryMutation) (ent.Value, error)
|
||||
|
||||
// Mutate calls f(ctx, m).
|
||||
func (f ChannelMonitorHistoryFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||
if mv, ok := m.(*ent.ChannelMonitorHistoryMutation); ok {
|
||||
return f(ctx, mv)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ChannelMonitorHistoryMutation", m)
|
||||
}
|
||||
|
||||
// The ChannelMonitorRequestTemplateFunc type is an adapter to allow the use of ordinary
|
||||
// function as ChannelMonitorRequestTemplate mutator.
|
||||
type ChannelMonitorRequestTemplateFunc func(context.Context, *ent.ChannelMonitorRequestTemplateMutation) (ent.Value, error)
|
||||
|
||||
// Mutate calls f(ctx, m).
|
||||
func (f ChannelMonitorRequestTemplateFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||
if mv, ok := m.(*ent.ChannelMonitorRequestTemplateMutation); ok {
|
||||
return f(ctx, mv)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ChannelMonitorRequestTemplateMutation", m)
|
||||
}
|
||||
|
||||
// The ErrorPassthroughRuleFunc type is an adapter to allow the use of ordinary
|
||||
// function as ErrorPassthroughRule mutator.
|
||||
type ErrorPassthroughRuleFunc func(context.Context, *ent.ErrorPassthroughRuleMutation) (ent.Value, error)
|
||||
|
||||
@@ -15,6 +15,10 @@ import (
|
||||
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
||||
"github.com/Wei-Shaw/sub2api/ent/authidentity"
|
||||
"github.com/Wei-Shaw/sub2api/ent/authidentitychannel"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorrequesttemplate"
|
||||
"github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule"
|
||||
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||
"github.com/Wei-Shaw/sub2api/ent/idempotencyrecord"
|
||||
@@ -286,6 +290,114 @@ func (f TraverseAuthIdentityChannel) Traverse(ctx context.Context, q ent.Query)
|
||||
return fmt.Errorf("unexpected query type %T. expect *ent.AuthIdentityChannelQuery", q)
|
||||
}
|
||||
|
||||
// The ChannelMonitorFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||
type ChannelMonitorFunc func(context.Context, *ent.ChannelMonitorQuery) (ent.Value, error)
|
||||
|
||||
// Query calls f(ctx, q).
|
||||
func (f ChannelMonitorFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||
if q, ok := q.(*ent.ChannelMonitorQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected query type %T. expect *ent.ChannelMonitorQuery", q)
|
||||
}
|
||||
|
||||
// The TraverseChannelMonitor type is an adapter to allow the use of ordinary function as Traverser.
|
||||
type TraverseChannelMonitor func(context.Context, *ent.ChannelMonitorQuery) error
|
||||
|
||||
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||
func (f TraverseChannelMonitor) Intercept(next ent.Querier) ent.Querier {
|
||||
return next
|
||||
}
|
||||
|
||||
// Traverse calls f(ctx, q).
|
||||
func (f TraverseChannelMonitor) Traverse(ctx context.Context, q ent.Query) error {
|
||||
if q, ok := q.(*ent.ChannelMonitorQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return fmt.Errorf("unexpected query type %T. expect *ent.ChannelMonitorQuery", q)
|
||||
}
|
||||
|
||||
// The ChannelMonitorDailyRollupFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||
type ChannelMonitorDailyRollupFunc func(context.Context, *ent.ChannelMonitorDailyRollupQuery) (ent.Value, error)
|
||||
|
||||
// Query calls f(ctx, q).
|
||||
func (f ChannelMonitorDailyRollupFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||
if q, ok := q.(*ent.ChannelMonitorDailyRollupQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected query type %T. expect *ent.ChannelMonitorDailyRollupQuery", q)
|
||||
}
|
||||
|
||||
// The TraverseChannelMonitorDailyRollup type is an adapter to allow the use of ordinary function as Traverser.
|
||||
type TraverseChannelMonitorDailyRollup func(context.Context, *ent.ChannelMonitorDailyRollupQuery) error
|
||||
|
||||
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||
func (f TraverseChannelMonitorDailyRollup) Intercept(next ent.Querier) ent.Querier {
|
||||
return next
|
||||
}
|
||||
|
||||
// Traverse calls f(ctx, q).
|
||||
func (f TraverseChannelMonitorDailyRollup) Traverse(ctx context.Context, q ent.Query) error {
|
||||
if q, ok := q.(*ent.ChannelMonitorDailyRollupQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return fmt.Errorf("unexpected query type %T. expect *ent.ChannelMonitorDailyRollupQuery", q)
|
||||
}
|
||||
|
||||
// The ChannelMonitorHistoryFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||
type ChannelMonitorHistoryFunc func(context.Context, *ent.ChannelMonitorHistoryQuery) (ent.Value, error)
|
||||
|
||||
// Query calls f(ctx, q).
|
||||
func (f ChannelMonitorHistoryFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||
if q, ok := q.(*ent.ChannelMonitorHistoryQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected query type %T. expect *ent.ChannelMonitorHistoryQuery", q)
|
||||
}
|
||||
|
||||
// The TraverseChannelMonitorHistory type is an adapter to allow the use of ordinary function as Traverser.
|
||||
type TraverseChannelMonitorHistory func(context.Context, *ent.ChannelMonitorHistoryQuery) error
|
||||
|
||||
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||
func (f TraverseChannelMonitorHistory) Intercept(next ent.Querier) ent.Querier {
|
||||
return next
|
||||
}
|
||||
|
||||
// Traverse calls f(ctx, q).
|
||||
func (f TraverseChannelMonitorHistory) Traverse(ctx context.Context, q ent.Query) error {
|
||||
if q, ok := q.(*ent.ChannelMonitorHistoryQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return fmt.Errorf("unexpected query type %T. expect *ent.ChannelMonitorHistoryQuery", q)
|
||||
}
|
||||
|
||||
// The ChannelMonitorRequestTemplateFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||
type ChannelMonitorRequestTemplateFunc func(context.Context, *ent.ChannelMonitorRequestTemplateQuery) (ent.Value, error)
|
||||
|
||||
// Query calls f(ctx, q).
|
||||
func (f ChannelMonitorRequestTemplateFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||
if q, ok := q.(*ent.ChannelMonitorRequestTemplateQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected query type %T. expect *ent.ChannelMonitorRequestTemplateQuery", q)
|
||||
}
|
||||
|
||||
// The TraverseChannelMonitorRequestTemplate type is an adapter to allow the use of ordinary function as Traverser.
|
||||
type TraverseChannelMonitorRequestTemplate func(context.Context, *ent.ChannelMonitorRequestTemplateQuery) error
|
||||
|
||||
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||
func (f TraverseChannelMonitorRequestTemplate) Intercept(next ent.Querier) ent.Querier {
|
||||
return next
|
||||
}
|
||||
|
||||
// Traverse calls f(ctx, q).
|
||||
func (f TraverseChannelMonitorRequestTemplate) Traverse(ctx context.Context, q ent.Query) error {
|
||||
if q, ok := q.(*ent.ChannelMonitorRequestTemplateQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return fmt.Errorf("unexpected query type %T. expect *ent.ChannelMonitorRequestTemplateQuery", q)
|
||||
}
|
||||
|
||||
// The ErrorPassthroughRuleFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||
type ErrorPassthroughRuleFunc func(context.Context, *ent.ErrorPassthroughRuleQuery) (ent.Value, error)
|
||||
|
||||
@@ -924,6 +1036,14 @@ func NewQuery(q ent.Query) (Query, error) {
|
||||
return &query[*ent.AuthIdentityQuery, predicate.AuthIdentity, authidentity.OrderOption]{typ: ent.TypeAuthIdentity, tq: q}, nil
|
||||
case *ent.AuthIdentityChannelQuery:
|
||||
return &query[*ent.AuthIdentityChannelQuery, predicate.AuthIdentityChannel, authidentitychannel.OrderOption]{typ: ent.TypeAuthIdentityChannel, tq: q}, nil
|
||||
case *ent.ChannelMonitorQuery:
|
||||
return &query[*ent.ChannelMonitorQuery, predicate.ChannelMonitor, channelmonitor.OrderOption]{typ: ent.TypeChannelMonitor, tq: q}, nil
|
||||
case *ent.ChannelMonitorDailyRollupQuery:
|
||||
return &query[*ent.ChannelMonitorDailyRollupQuery, predicate.ChannelMonitorDailyRollup, channelmonitordailyrollup.OrderOption]{typ: ent.TypeChannelMonitorDailyRollup, tq: q}, nil
|
||||
case *ent.ChannelMonitorHistoryQuery:
|
||||
return &query[*ent.ChannelMonitorHistoryQuery, predicate.ChannelMonitorHistory, channelmonitorhistory.OrderOption]{typ: ent.TypeChannelMonitorHistory, tq: q}, nil
|
||||
case *ent.ChannelMonitorRequestTemplateQuery:
|
||||
return &query[*ent.ChannelMonitorRequestTemplateQuery, predicate.ChannelMonitorRequestTemplate, channelmonitorrequesttemplate.OrderOption]{typ: ent.TypeChannelMonitorRequestTemplate, tq: q}, nil
|
||||
case *ent.ErrorPassthroughRuleQuery:
|
||||
return &query[*ent.ErrorPassthroughRuleQuery, predicate.ErrorPassthroughRule, errorpassthroughrule.OrderOption]{typ: ent.TypeErrorPassthroughRule, tq: q}, nil
|
||||
case *ent.GroupQuery:
|
||||
|
||||
@@ -421,6 +421,169 @@ var (
|
||||
},
|
||||
},
|
||||
}
|
||||
// ChannelMonitorsColumns holds the columns for the "channel_monitors" table.
|
||||
ChannelMonitorsColumns = []*schema.Column{
|
||||
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "name", Type: field.TypeString, Size: 100},
|
||||
{Name: "provider", Type: field.TypeEnum, Enums: []string{"openai", "anthropic", "gemini"}},
|
||||
{Name: "endpoint", Type: field.TypeString, Size: 500},
|
||||
{Name: "api_key_encrypted", Type: field.TypeString},
|
||||
{Name: "primary_model", Type: field.TypeString, Size: 200},
|
||||
{Name: "extra_models", Type: field.TypeJSON},
|
||||
{Name: "group_name", Type: field.TypeString, Nullable: true, Size: 100, Default: ""},
|
||||
{Name: "enabled", Type: field.TypeBool, Default: true},
|
||||
{Name: "interval_seconds", Type: field.TypeInt},
|
||||
{Name: "last_checked_at", Type: field.TypeTime, Nullable: true},
|
||||
{Name: "created_by", Type: field.TypeInt64},
|
||||
{Name: "extra_headers", Type: field.TypeJSON},
|
||||
{Name: "body_override_mode", Type: field.TypeString, Size: 10, Default: "off"},
|
||||
{Name: "body_override", Type: field.TypeJSON, Nullable: true},
|
||||
{Name: "template_id", Type: field.TypeInt64, Nullable: true},
|
||||
}
|
||||
// ChannelMonitorsTable holds the schema information for the "channel_monitors" table.
|
||||
ChannelMonitorsTable = &schema.Table{
|
||||
Name: "channel_monitors",
|
||||
Columns: ChannelMonitorsColumns,
|
||||
PrimaryKey: []*schema.Column{ChannelMonitorsColumns[0]},
|
||||
ForeignKeys: []*schema.ForeignKey{
|
||||
{
|
||||
Symbol: "channel_monitors_channel_monitor_request_templates_request_template",
|
||||
Columns: []*schema.Column{ChannelMonitorsColumns[17]},
|
||||
RefColumns: []*schema.Column{ChannelMonitorRequestTemplatesColumns[0]},
|
||||
OnDelete: schema.SetNull,
|
||||
},
|
||||
},
|
||||
Indexes: []*schema.Index{
|
||||
{
|
||||
Name: "channelmonitor_enabled_last_checked_at",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{ChannelMonitorsColumns[10], ChannelMonitorsColumns[12]},
|
||||
},
|
||||
{
|
||||
Name: "channelmonitor_provider",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{ChannelMonitorsColumns[4]},
|
||||
},
|
||||
{
|
||||
Name: "channelmonitor_group_name",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{ChannelMonitorsColumns[9]},
|
||||
},
|
||||
{
|
||||
Name: "channelmonitor_template_id",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{ChannelMonitorsColumns[17]},
|
||||
},
|
||||
},
|
||||
}
|
||||
// ChannelMonitorDailyRollupsColumns holds the columns for the "channel_monitor_daily_rollups" table.
|
||||
ChannelMonitorDailyRollupsColumns = []*schema.Column{
|
||||
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||
{Name: "model", Type: field.TypeString, Size: 200},
|
||||
{Name: "bucket_date", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "date"}},
|
||||
{Name: "total_checks", Type: field.TypeInt, Default: 0},
|
||||
{Name: "ok_count", Type: field.TypeInt, Default: 0},
|
||||
{Name: "operational_count", Type: field.TypeInt, Default: 0},
|
||||
{Name: "degraded_count", Type: field.TypeInt, Default: 0},
|
||||
{Name: "failed_count", Type: field.TypeInt, Default: 0},
|
||||
{Name: "error_count", Type: field.TypeInt, Default: 0},
|
||||
{Name: "sum_latency_ms", Type: field.TypeInt64, Default: 0},
|
||||
{Name: "count_latency", Type: field.TypeInt, Default: 0},
|
||||
{Name: "sum_ping_latency_ms", Type: field.TypeInt64, Default: 0},
|
||||
{Name: "count_ping_latency", Type: field.TypeInt, Default: 0},
|
||||
{Name: "computed_at", Type: field.TypeTime},
|
||||
{Name: "monitor_id", Type: field.TypeInt64},
|
||||
}
|
||||
// ChannelMonitorDailyRollupsTable holds the schema information for the "channel_monitor_daily_rollups" table.
|
||||
ChannelMonitorDailyRollupsTable = &schema.Table{
|
||||
Name: "channel_monitor_daily_rollups",
|
||||
Columns: ChannelMonitorDailyRollupsColumns,
|
||||
PrimaryKey: []*schema.Column{ChannelMonitorDailyRollupsColumns[0]},
|
||||
ForeignKeys: []*schema.ForeignKey{
|
||||
{
|
||||
Symbol: "channel_monitor_daily_rollups_channel_monitors_daily_rollups",
|
||||
Columns: []*schema.Column{ChannelMonitorDailyRollupsColumns[14]},
|
||||
RefColumns: []*schema.Column{ChannelMonitorsColumns[0]},
|
||||
OnDelete: schema.Cascade,
|
||||
},
|
||||
},
|
||||
Indexes: []*schema.Index{
|
||||
{
|
||||
Name: "channelmonitordailyrollup_monitor_id_model_bucket_date",
|
||||
Unique: true,
|
||||
Columns: []*schema.Column{ChannelMonitorDailyRollupsColumns[14], ChannelMonitorDailyRollupsColumns[1], ChannelMonitorDailyRollupsColumns[2]},
|
||||
},
|
||||
{
|
||||
Name: "channelmonitordailyrollup_bucket_date",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{ChannelMonitorDailyRollupsColumns[2]},
|
||||
},
|
||||
},
|
||||
}
|
||||
// ChannelMonitorHistoriesColumns holds the columns for the "channel_monitor_histories" table.
|
||||
ChannelMonitorHistoriesColumns = []*schema.Column{
|
||||
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||
{Name: "model", Type: field.TypeString, Size: 200},
|
||||
{Name: "status", Type: field.TypeEnum, Enums: []string{"operational", "degraded", "failed", "error"}},
|
||||
{Name: "latency_ms", Type: field.TypeInt, Nullable: true},
|
||||
{Name: "ping_latency_ms", Type: field.TypeInt, Nullable: true},
|
||||
{Name: "message", Type: field.TypeString, Nullable: true, Size: 500, Default: ""},
|
||||
{Name: "checked_at", Type: field.TypeTime},
|
||||
{Name: "monitor_id", Type: field.TypeInt64},
|
||||
}
|
||||
// ChannelMonitorHistoriesTable holds the schema information for the "channel_monitor_histories" table.
|
||||
ChannelMonitorHistoriesTable = &schema.Table{
|
||||
Name: "channel_monitor_histories",
|
||||
Columns: ChannelMonitorHistoriesColumns,
|
||||
PrimaryKey: []*schema.Column{ChannelMonitorHistoriesColumns[0]},
|
||||
ForeignKeys: []*schema.ForeignKey{
|
||||
{
|
||||
Symbol: "channel_monitor_histories_channel_monitors_history",
|
||||
Columns: []*schema.Column{ChannelMonitorHistoriesColumns[7]},
|
||||
RefColumns: []*schema.Column{ChannelMonitorsColumns[0]},
|
||||
OnDelete: schema.Cascade,
|
||||
},
|
||||
},
|
||||
Indexes: []*schema.Index{
|
||||
{
|
||||
Name: "channelmonitorhistory_monitor_id_model_checked_at",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{ChannelMonitorHistoriesColumns[7], ChannelMonitorHistoriesColumns[1], ChannelMonitorHistoriesColumns[6]},
|
||||
},
|
||||
{
|
||||
Name: "channelmonitorhistory_checked_at",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{ChannelMonitorHistoriesColumns[6]},
|
||||
},
|
||||
},
|
||||
}
|
||||
// ChannelMonitorRequestTemplatesColumns holds the columns for the "channel_monitor_request_templates" table.
|
||||
ChannelMonitorRequestTemplatesColumns = []*schema.Column{
|
||||
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "name", Type: field.TypeString, Size: 100},
|
||||
{Name: "provider", Type: field.TypeEnum, Enums: []string{"openai", "anthropic", "gemini"}},
|
||||
{Name: "description", Type: field.TypeString, Nullable: true, Size: 500, Default: ""},
|
||||
{Name: "extra_headers", Type: field.TypeJSON},
|
||||
{Name: "body_override_mode", Type: field.TypeString, Size: 10, Default: "off"},
|
||||
{Name: "body_override", Type: field.TypeJSON, Nullable: true},
|
||||
}
|
||||
// ChannelMonitorRequestTemplatesTable holds the schema information for the "channel_monitor_request_templates" table.
|
||||
ChannelMonitorRequestTemplatesTable = &schema.Table{
|
||||
Name: "channel_monitor_request_templates",
|
||||
Columns: ChannelMonitorRequestTemplatesColumns,
|
||||
PrimaryKey: []*schema.Column{ChannelMonitorRequestTemplatesColumns[0]},
|
||||
Indexes: []*schema.Index{
|
||||
{
|
||||
Name: "channelmonitorrequesttemplate_provider_name",
|
||||
Unique: true,
|
||||
Columns: []*schema.Column{ChannelMonitorRequestTemplatesColumns[4], ChannelMonitorRequestTemplatesColumns[3]},
|
||||
},
|
||||
},
|
||||
}
|
||||
// ErrorPassthroughRulesColumns holds the columns for the "error_passthrough_rules" table.
|
||||
ErrorPassthroughRulesColumns = []*schema.Column{
|
||||
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||
@@ -1522,6 +1685,10 @@ var (
|
||||
AnnouncementReadsTable,
|
||||
AuthIdentitiesTable,
|
||||
AuthIdentityChannelsTable,
|
||||
ChannelMonitorsTable,
|
||||
ChannelMonitorDailyRollupsTable,
|
||||
ChannelMonitorHistoriesTable,
|
||||
ChannelMonitorRequestTemplatesTable,
|
||||
ErrorPassthroughRulesTable,
|
||||
GroupsTable,
|
||||
IdempotencyRecordsTable,
|
||||
@@ -1579,6 +1746,21 @@ func init() {
|
||||
AuthIdentityChannelsTable.Annotation = &entsql.Annotation{
|
||||
Table: "auth_identity_channels",
|
||||
}
|
||||
ChannelMonitorsTable.ForeignKeys[0].RefTable = ChannelMonitorRequestTemplatesTable
|
||||
ChannelMonitorsTable.Annotation = &entsql.Annotation{
|
||||
Table: "channel_monitors",
|
||||
}
|
||||
ChannelMonitorDailyRollupsTable.ForeignKeys[0].RefTable = ChannelMonitorsTable
|
||||
ChannelMonitorDailyRollupsTable.Annotation = &entsql.Annotation{
|
||||
Table: "channel_monitor_daily_rollups",
|
||||
}
|
||||
ChannelMonitorHistoriesTable.ForeignKeys[0].RefTable = ChannelMonitorsTable
|
||||
ChannelMonitorHistoriesTable.Annotation = &entsql.Annotation{
|
||||
Table: "channel_monitor_histories",
|
||||
}
|
||||
ChannelMonitorRequestTemplatesTable.Annotation = &entsql.Annotation{
|
||||
Table: "channel_monitor_request_templates",
|
||||
}
|
||||
ErrorPassthroughRulesTable.Annotation = &entsql.Annotation{
|
||||
Table: "error_passthrough_rules",
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -27,6 +27,18 @@ type AuthIdentity func(*sql.Selector)
|
||||
// AuthIdentityChannel is the predicate function for authidentitychannel builders.
|
||||
type AuthIdentityChannel func(*sql.Selector)
|
||||
|
||||
// ChannelMonitor is the predicate function for channelmonitor builders.
|
||||
type ChannelMonitor func(*sql.Selector)
|
||||
|
||||
// ChannelMonitorDailyRollup is the predicate function for channelmonitordailyrollup builders.
|
||||
type ChannelMonitorDailyRollup func(*sql.Selector)
|
||||
|
||||
// ChannelMonitorHistory is the predicate function for channelmonitorhistory builders.
|
||||
type ChannelMonitorHistory func(*sql.Selector)
|
||||
|
||||
// ChannelMonitorRequestTemplate is the predicate function for channelmonitorrequesttemplate builders.
|
||||
type ChannelMonitorRequestTemplate func(*sql.Selector)
|
||||
|
||||
// ErrorPassthroughRule is the predicate function for errorpassthroughrule builders.
|
||||
type ErrorPassthroughRule func(*sql.Selector)
|
||||
|
||||
|
||||
@@ -12,6 +12,10 @@ import (
|
||||
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
||||
"github.com/Wei-Shaw/sub2api/ent/authidentity"
|
||||
"github.com/Wei-Shaw/sub2api/ent/authidentitychannel"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorrequesttemplate"
|
||||
"github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule"
|
||||
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||
"github.com/Wei-Shaw/sub2api/ent/idempotencyrecord"
|
||||
@@ -427,6 +431,252 @@ func init() {
|
||||
authidentitychannelDescMetadata := authidentitychannelFields[6].Descriptor()
|
||||
// authidentitychannel.DefaultMetadata holds the default value on creation for the metadata field.
|
||||
authidentitychannel.DefaultMetadata = authidentitychannelDescMetadata.Default.(func() map[string]interface{})
|
||||
channelmonitorMixin := schema.ChannelMonitor{}.Mixin()
|
||||
channelmonitorMixinFields0 := channelmonitorMixin[0].Fields()
|
||||
_ = channelmonitorMixinFields0
|
||||
channelmonitorFields := schema.ChannelMonitor{}.Fields()
|
||||
_ = channelmonitorFields
|
||||
// channelmonitorDescCreatedAt is the schema descriptor for created_at field.
|
||||
channelmonitorDescCreatedAt := channelmonitorMixinFields0[0].Descriptor()
|
||||
// channelmonitor.DefaultCreatedAt holds the default value on creation for the created_at field.
|
||||
channelmonitor.DefaultCreatedAt = channelmonitorDescCreatedAt.Default.(func() time.Time)
|
||||
// channelmonitorDescUpdatedAt is the schema descriptor for updated_at field.
|
||||
channelmonitorDescUpdatedAt := channelmonitorMixinFields0[1].Descriptor()
|
||||
// channelmonitor.DefaultUpdatedAt holds the default value on creation for the updated_at field.
|
||||
channelmonitor.DefaultUpdatedAt = channelmonitorDescUpdatedAt.Default.(func() time.Time)
|
||||
// channelmonitor.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.
|
||||
channelmonitor.UpdateDefaultUpdatedAt = channelmonitorDescUpdatedAt.UpdateDefault.(func() time.Time)
|
||||
// channelmonitorDescName is the schema descriptor for name field.
|
||||
channelmonitorDescName := channelmonitorFields[0].Descriptor()
|
||||
// channelmonitor.NameValidator is a validator for the "name" field. It is called by the builders before save.
|
||||
channelmonitor.NameValidator = func() func(string) error {
|
||||
validators := channelmonitorDescName.Validators
|
||||
fns := [...]func(string) error{
|
||||
validators[0].(func(string) error),
|
||||
validators[1].(func(string) error),
|
||||
}
|
||||
return func(name string) error {
|
||||
for _, fn := range fns {
|
||||
if err := fn(name); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}()
|
||||
// channelmonitorDescEndpoint is the schema descriptor for endpoint field.
|
||||
channelmonitorDescEndpoint := channelmonitorFields[2].Descriptor()
|
||||
// channelmonitor.EndpointValidator is a validator for the "endpoint" field. It is called by the builders before save.
|
||||
channelmonitor.EndpointValidator = func() func(string) error {
|
||||
validators := channelmonitorDescEndpoint.Validators
|
||||
fns := [...]func(string) error{
|
||||
validators[0].(func(string) error),
|
||||
validators[1].(func(string) error),
|
||||
}
|
||||
return func(endpoint string) error {
|
||||
for _, fn := range fns {
|
||||
if err := fn(endpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}()
|
||||
// channelmonitorDescAPIKeyEncrypted is the schema descriptor for api_key_encrypted field.
|
||||
channelmonitorDescAPIKeyEncrypted := channelmonitorFields[3].Descriptor()
|
||||
// channelmonitor.APIKeyEncryptedValidator is a validator for the "api_key_encrypted" field. It is called by the builders before save.
|
||||
channelmonitor.APIKeyEncryptedValidator = channelmonitorDescAPIKeyEncrypted.Validators[0].(func(string) error)
|
||||
// channelmonitorDescPrimaryModel is the schema descriptor for primary_model field.
|
||||
channelmonitorDescPrimaryModel := channelmonitorFields[4].Descriptor()
|
||||
// channelmonitor.PrimaryModelValidator is a validator for the "primary_model" field. It is called by the builders before save.
|
||||
channelmonitor.PrimaryModelValidator = func() func(string) error {
|
||||
validators := channelmonitorDescPrimaryModel.Validators
|
||||
fns := [...]func(string) error{
|
||||
validators[0].(func(string) error),
|
||||
validators[1].(func(string) error),
|
||||
}
|
||||
return func(primary_model string) error {
|
||||
for _, fn := range fns {
|
||||
if err := fn(primary_model); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}()
|
||||
// channelmonitorDescExtraModels is the schema descriptor for extra_models field.
|
||||
channelmonitorDescExtraModels := channelmonitorFields[5].Descriptor()
|
||||
// channelmonitor.DefaultExtraModels holds the default value on creation for the extra_models field.
|
||||
channelmonitor.DefaultExtraModels = channelmonitorDescExtraModels.Default.([]string)
|
||||
// channelmonitorDescGroupName is the schema descriptor for group_name field.
|
||||
channelmonitorDescGroupName := channelmonitorFields[6].Descriptor()
|
||||
// channelmonitor.DefaultGroupName holds the default value on creation for the group_name field.
|
||||
channelmonitor.DefaultGroupName = channelmonitorDescGroupName.Default.(string)
|
||||
// channelmonitor.GroupNameValidator is a validator for the "group_name" field. It is called by the builders before save.
|
||||
channelmonitor.GroupNameValidator = channelmonitorDescGroupName.Validators[0].(func(string) error)
|
||||
// channelmonitorDescEnabled is the schema descriptor for enabled field.
|
||||
channelmonitorDescEnabled := channelmonitorFields[7].Descriptor()
|
||||
// channelmonitor.DefaultEnabled holds the default value on creation for the enabled field.
|
||||
channelmonitor.DefaultEnabled = channelmonitorDescEnabled.Default.(bool)
|
||||
// channelmonitorDescIntervalSeconds is the schema descriptor for interval_seconds field.
|
||||
channelmonitorDescIntervalSeconds := channelmonitorFields[8].Descriptor()
|
||||
// channelmonitor.IntervalSecondsValidator is a validator for the "interval_seconds" field. It is called by the builders before save.
|
||||
channelmonitor.IntervalSecondsValidator = channelmonitorDescIntervalSeconds.Validators[0].(func(int) error)
|
||||
// channelmonitorDescExtraHeaders is the schema descriptor for extra_headers field.
|
||||
channelmonitorDescExtraHeaders := channelmonitorFields[12].Descriptor()
|
||||
// channelmonitor.DefaultExtraHeaders holds the default value on creation for the extra_headers field.
|
||||
channelmonitor.DefaultExtraHeaders = channelmonitorDescExtraHeaders.Default.(map[string]string)
|
||||
// channelmonitorDescBodyOverrideMode is the schema descriptor for body_override_mode field.
|
||||
channelmonitorDescBodyOverrideMode := channelmonitorFields[13].Descriptor()
|
||||
// channelmonitor.DefaultBodyOverrideMode holds the default value on creation for the body_override_mode field.
|
||||
channelmonitor.DefaultBodyOverrideMode = channelmonitorDescBodyOverrideMode.Default.(string)
|
||||
// channelmonitor.BodyOverrideModeValidator is a validator for the "body_override_mode" field. It is called by the builders before save.
|
||||
channelmonitor.BodyOverrideModeValidator = channelmonitorDescBodyOverrideMode.Validators[0].(func(string) error)
|
||||
channelmonitordailyrollupFields := schema.ChannelMonitorDailyRollup{}.Fields()
|
||||
_ = channelmonitordailyrollupFields
|
||||
// channelmonitordailyrollupDescModel is the schema descriptor for model field.
|
||||
channelmonitordailyrollupDescModel := channelmonitordailyrollupFields[1].Descriptor()
|
||||
// channelmonitordailyrollup.ModelValidator is a validator for the "model" field. It is called by the builders before save.
|
||||
channelmonitordailyrollup.ModelValidator = func() func(string) error {
|
||||
validators := channelmonitordailyrollupDescModel.Validators
|
||||
fns := [...]func(string) error{
|
||||
validators[0].(func(string) error),
|
||||
validators[1].(func(string) error),
|
||||
}
|
||||
return func(model string) error {
|
||||
for _, fn := range fns {
|
||||
if err := fn(model); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}()
|
||||
// channelmonitordailyrollupDescTotalChecks is the schema descriptor for total_checks field.
|
||||
channelmonitordailyrollupDescTotalChecks := channelmonitordailyrollupFields[3].Descriptor()
|
||||
// channelmonitordailyrollup.DefaultTotalChecks holds the default value on creation for the total_checks field.
|
||||
channelmonitordailyrollup.DefaultTotalChecks = channelmonitordailyrollupDescTotalChecks.Default.(int)
|
||||
// channelmonitordailyrollupDescOkCount is the schema descriptor for ok_count field.
|
||||
channelmonitordailyrollupDescOkCount := channelmonitordailyrollupFields[4].Descriptor()
|
||||
// channelmonitordailyrollup.DefaultOkCount holds the default value on creation for the ok_count field.
|
||||
channelmonitordailyrollup.DefaultOkCount = channelmonitordailyrollupDescOkCount.Default.(int)
|
||||
// channelmonitordailyrollupDescOperationalCount is the schema descriptor for operational_count field.
|
||||
channelmonitordailyrollupDescOperationalCount := channelmonitordailyrollupFields[5].Descriptor()
|
||||
// channelmonitordailyrollup.DefaultOperationalCount holds the default value on creation for the operational_count field.
|
||||
channelmonitordailyrollup.DefaultOperationalCount = channelmonitordailyrollupDescOperationalCount.Default.(int)
|
||||
// channelmonitordailyrollupDescDegradedCount is the schema descriptor for degraded_count field.
|
||||
channelmonitordailyrollupDescDegradedCount := channelmonitordailyrollupFields[6].Descriptor()
|
||||
// channelmonitordailyrollup.DefaultDegradedCount holds the default value on creation for the degraded_count field.
|
||||
channelmonitordailyrollup.DefaultDegradedCount = channelmonitordailyrollupDescDegradedCount.Default.(int)
|
||||
// channelmonitordailyrollupDescFailedCount is the schema descriptor for failed_count field.
|
||||
channelmonitordailyrollupDescFailedCount := channelmonitordailyrollupFields[7].Descriptor()
|
||||
// channelmonitordailyrollup.DefaultFailedCount holds the default value on creation for the failed_count field.
|
||||
channelmonitordailyrollup.DefaultFailedCount = channelmonitordailyrollupDescFailedCount.Default.(int)
|
||||
// channelmonitordailyrollupDescErrorCount is the schema descriptor for error_count field.
|
||||
channelmonitordailyrollupDescErrorCount := channelmonitordailyrollupFields[8].Descriptor()
|
||||
// channelmonitordailyrollup.DefaultErrorCount holds the default value on creation for the error_count field.
|
||||
channelmonitordailyrollup.DefaultErrorCount = channelmonitordailyrollupDescErrorCount.Default.(int)
|
||||
// channelmonitordailyrollupDescSumLatencyMs is the schema descriptor for sum_latency_ms field.
|
||||
channelmonitordailyrollupDescSumLatencyMs := channelmonitordailyrollupFields[9].Descriptor()
|
||||
// channelmonitordailyrollup.DefaultSumLatencyMs holds the default value on creation for the sum_latency_ms field.
|
||||
channelmonitordailyrollup.DefaultSumLatencyMs = channelmonitordailyrollupDescSumLatencyMs.Default.(int64)
|
||||
// channelmonitordailyrollupDescCountLatency is the schema descriptor for count_latency field.
|
||||
channelmonitordailyrollupDescCountLatency := channelmonitordailyrollupFields[10].Descriptor()
|
||||
// channelmonitordailyrollup.DefaultCountLatency holds the default value on creation for the count_latency field.
|
||||
channelmonitordailyrollup.DefaultCountLatency = channelmonitordailyrollupDescCountLatency.Default.(int)
|
||||
// channelmonitordailyrollupDescSumPingLatencyMs is the schema descriptor for sum_ping_latency_ms field.
|
||||
channelmonitordailyrollupDescSumPingLatencyMs := channelmonitordailyrollupFields[11].Descriptor()
|
||||
// channelmonitordailyrollup.DefaultSumPingLatencyMs holds the default value on creation for the sum_ping_latency_ms field.
|
||||
channelmonitordailyrollup.DefaultSumPingLatencyMs = channelmonitordailyrollupDescSumPingLatencyMs.Default.(int64)
|
||||
// channelmonitordailyrollupDescCountPingLatency is the schema descriptor for count_ping_latency field.
|
||||
channelmonitordailyrollupDescCountPingLatency := channelmonitordailyrollupFields[12].Descriptor()
|
||||
// channelmonitordailyrollup.DefaultCountPingLatency holds the default value on creation for the count_ping_latency field.
|
||||
channelmonitordailyrollup.DefaultCountPingLatency = channelmonitordailyrollupDescCountPingLatency.Default.(int)
|
||||
// channelmonitordailyrollupDescComputedAt is the schema descriptor for computed_at field.
|
||||
channelmonitordailyrollupDescComputedAt := channelmonitordailyrollupFields[13].Descriptor()
|
||||
// channelmonitordailyrollup.DefaultComputedAt holds the default value on creation for the computed_at field.
|
||||
channelmonitordailyrollup.DefaultComputedAt = channelmonitordailyrollupDescComputedAt.Default.(func() time.Time)
|
||||
// channelmonitordailyrollup.UpdateDefaultComputedAt holds the default value on update for the computed_at field.
|
||||
channelmonitordailyrollup.UpdateDefaultComputedAt = channelmonitordailyrollupDescComputedAt.UpdateDefault.(func() time.Time)
|
||||
channelmonitorhistoryFields := schema.ChannelMonitorHistory{}.Fields()
|
||||
_ = channelmonitorhistoryFields
|
||||
// channelmonitorhistoryDescModel is the schema descriptor for model field.
|
||||
channelmonitorhistoryDescModel := channelmonitorhistoryFields[1].Descriptor()
|
||||
// channelmonitorhistory.ModelValidator is a validator for the "model" field. It is called by the builders before save.
|
||||
channelmonitorhistory.ModelValidator = func() func(string) error {
|
||||
validators := channelmonitorhistoryDescModel.Validators
|
||||
fns := [...]func(string) error{
|
||||
validators[0].(func(string) error),
|
||||
validators[1].(func(string) error),
|
||||
}
|
||||
return func(model string) error {
|
||||
for _, fn := range fns {
|
||||
if err := fn(model); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}()
|
||||
// channelmonitorhistoryDescMessage is the schema descriptor for message field.
|
||||
channelmonitorhistoryDescMessage := channelmonitorhistoryFields[5].Descriptor()
|
||||
// channelmonitorhistory.DefaultMessage holds the default value on creation for the message field.
|
||||
channelmonitorhistory.DefaultMessage = channelmonitorhistoryDescMessage.Default.(string)
|
||||
// channelmonitorhistory.MessageValidator is a validator for the "message" field. It is called by the builders before save.
|
||||
channelmonitorhistory.MessageValidator = channelmonitorhistoryDescMessage.Validators[0].(func(string) error)
|
||||
// channelmonitorhistoryDescCheckedAt is the schema descriptor for checked_at field.
|
||||
channelmonitorhistoryDescCheckedAt := channelmonitorhistoryFields[6].Descriptor()
|
||||
// channelmonitorhistory.DefaultCheckedAt holds the default value on creation for the checked_at field.
|
||||
channelmonitorhistory.DefaultCheckedAt = channelmonitorhistoryDescCheckedAt.Default.(func() time.Time)
|
||||
channelmonitorrequesttemplateMixin := schema.ChannelMonitorRequestTemplate{}.Mixin()
|
||||
channelmonitorrequesttemplateMixinFields0 := channelmonitorrequesttemplateMixin[0].Fields()
|
||||
_ = channelmonitorrequesttemplateMixinFields0
|
||||
channelmonitorrequesttemplateFields := schema.ChannelMonitorRequestTemplate{}.Fields()
|
||||
_ = channelmonitorrequesttemplateFields
|
||||
// channelmonitorrequesttemplateDescCreatedAt is the schema descriptor for created_at field.
|
||||
channelmonitorrequesttemplateDescCreatedAt := channelmonitorrequesttemplateMixinFields0[0].Descriptor()
|
||||
// channelmonitorrequesttemplate.DefaultCreatedAt holds the default value on creation for the created_at field.
|
||||
channelmonitorrequesttemplate.DefaultCreatedAt = channelmonitorrequesttemplateDescCreatedAt.Default.(func() time.Time)
|
||||
// channelmonitorrequesttemplateDescUpdatedAt is the schema descriptor for updated_at field.
|
||||
channelmonitorrequesttemplateDescUpdatedAt := channelmonitorrequesttemplateMixinFields0[1].Descriptor()
|
||||
// channelmonitorrequesttemplate.DefaultUpdatedAt holds the default value on creation for the updated_at field.
|
||||
channelmonitorrequesttemplate.DefaultUpdatedAt = channelmonitorrequesttemplateDescUpdatedAt.Default.(func() time.Time)
|
||||
// channelmonitorrequesttemplate.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.
|
||||
channelmonitorrequesttemplate.UpdateDefaultUpdatedAt = channelmonitorrequesttemplateDescUpdatedAt.UpdateDefault.(func() time.Time)
|
||||
// channelmonitorrequesttemplateDescName is the schema descriptor for name field.
|
||||
channelmonitorrequesttemplateDescName := channelmonitorrequesttemplateFields[0].Descriptor()
|
||||
// channelmonitorrequesttemplate.NameValidator is a validator for the "name" field. It is called by the builders before save.
|
||||
channelmonitorrequesttemplate.NameValidator = func() func(string) error {
|
||||
validators := channelmonitorrequesttemplateDescName.Validators
|
||||
fns := [...]func(string) error{
|
||||
validators[0].(func(string) error),
|
||||
validators[1].(func(string) error),
|
||||
}
|
||||
return func(name string) error {
|
||||
for _, fn := range fns {
|
||||
if err := fn(name); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}()
|
||||
// channelmonitorrequesttemplateDescDescription is the schema descriptor for description field.
|
||||
channelmonitorrequesttemplateDescDescription := channelmonitorrequesttemplateFields[2].Descriptor()
|
||||
// channelmonitorrequesttemplate.DefaultDescription holds the default value on creation for the description field.
|
||||
channelmonitorrequesttemplate.DefaultDescription = channelmonitorrequesttemplateDescDescription.Default.(string)
|
||||
// channelmonitorrequesttemplate.DescriptionValidator is a validator for the "description" field. It is called by the builders before save.
|
||||
channelmonitorrequesttemplate.DescriptionValidator = channelmonitorrequesttemplateDescDescription.Validators[0].(func(string) error)
|
||||
// channelmonitorrequesttemplateDescExtraHeaders is the schema descriptor for extra_headers field.
|
||||
channelmonitorrequesttemplateDescExtraHeaders := channelmonitorrequesttemplateFields[3].Descriptor()
|
||||
// channelmonitorrequesttemplate.DefaultExtraHeaders holds the default value on creation for the extra_headers field.
|
||||
channelmonitorrequesttemplate.DefaultExtraHeaders = channelmonitorrequesttemplateDescExtraHeaders.Default.(map[string]string)
|
||||
// channelmonitorrequesttemplateDescBodyOverrideMode is the schema descriptor for body_override_mode field.
|
||||
channelmonitorrequesttemplateDescBodyOverrideMode := channelmonitorrequesttemplateFields[4].Descriptor()
|
||||
// channelmonitorrequesttemplate.DefaultBodyOverrideMode holds the default value on creation for the body_override_mode field.
|
||||
channelmonitorrequesttemplate.DefaultBodyOverrideMode = channelmonitorrequesttemplateDescBodyOverrideMode.Default.(string)
|
||||
// channelmonitorrequesttemplate.BodyOverrideModeValidator is a validator for the "body_override_mode" field. It is called by the builders before save.
|
||||
channelmonitorrequesttemplate.BodyOverrideModeValidator = channelmonitorrequesttemplateDescBodyOverrideMode.Validators[0].(func(string) error)
|
||||
errorpassthroughruleMixin := schema.ErrorPassthroughRule{}.Mixin()
|
||||
errorpassthroughruleMixinFields0 := errorpassthroughruleMixin[0].Fields()
|
||||
_ = errorpassthroughruleMixinFields0
|
||||
|
||||
110
backend/ent/schema/channel_monitor.go
Normal file
110
backend/ent/schema/channel_monitor.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package schema
|
||||
|
||||
import (
|
||||
"github.com/Wei-Shaw/sub2api/ent/schema/mixins"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/entsql"
|
||||
"entgo.io/ent/schema"
|
||||
"entgo.io/ent/schema/edge"
|
||||
"entgo.io/ent/schema/field"
|
||||
"entgo.io/ent/schema/index"
|
||||
)
|
||||
|
||||
// ChannelMonitor holds the schema definition for the ChannelMonitor entity.
|
||||
// 渠道监控配置:定期对指定 provider/endpoint/api_key 下的模型做心跳测试。
|
||||
type ChannelMonitor struct {
|
||||
ent.Schema
|
||||
}
|
||||
|
||||
func (ChannelMonitor) Annotations() []schema.Annotation {
|
||||
return []schema.Annotation{
|
||||
entsql.Annotation{Table: "channel_monitors"},
|
||||
}
|
||||
}
|
||||
|
||||
func (ChannelMonitor) Mixin() []ent.Mixin {
|
||||
return []ent.Mixin{
|
||||
mixins.TimeMixin{},
|
||||
}
|
||||
}
|
||||
|
||||
func (ChannelMonitor) Fields() []ent.Field {
|
||||
return []ent.Field{
|
||||
field.String("name").
|
||||
NotEmpty().
|
||||
MaxLen(100),
|
||||
field.Enum("provider").
|
||||
Values("openai", "anthropic", "gemini"),
|
||||
field.String("endpoint").
|
||||
NotEmpty().
|
||||
MaxLen(500).
|
||||
Comment("Provider base origin, e.g. https://api.openai.com"),
|
||||
field.String("api_key_encrypted").
|
||||
NotEmpty().
|
||||
Sensitive().
|
||||
Comment("AES-256-GCM encrypted API key"),
|
||||
field.String("primary_model").
|
||||
NotEmpty().
|
||||
MaxLen(200),
|
||||
field.JSON("extra_models", []string{}).
|
||||
Default([]string{}).
|
||||
Comment("Additional model names to test alongside primary_model"),
|
||||
field.String("group_name").
|
||||
Optional().
|
||||
Default("").
|
||||
MaxLen(100),
|
||||
field.Bool("enabled").
|
||||
Default(true),
|
||||
field.Int("interval_seconds").
|
||||
Range(15, 3600),
|
||||
field.Time("last_checked_at").
|
||||
Optional().
|
||||
Nillable(),
|
||||
field.Int64("created_by"),
|
||||
|
||||
// ---- 自定义请求快照字段(来自模板 / 手动编辑) ----
|
||||
|
||||
// template_id: 关联的请求模板 ID(仅用于 UI 分组 + 一键应用)。
|
||||
// 实际运行时 checker 只读下面 3 个快照字段,**不再回查模板表**。
|
||||
// 模板被删除时此字段会被 SET NULL(见 Edges 的 OnDelete 注解)。
|
||||
field.Int64("template_id").
|
||||
Optional().
|
||||
Nillable(),
|
||||
// extra_headers: 自定义 HTTP 头快照(来自模板 or 用户手填)。
|
||||
// 运行时 merge 进 adapter 默认 headers。
|
||||
field.JSON("extra_headers", map[string]string{}).
|
||||
Default(map[string]string{}),
|
||||
// body_override_mode: 同 ChannelMonitorRequestTemplate.body_override_mode
|
||||
field.String("body_override_mode").
|
||||
Default("off").
|
||||
MaxLen(10),
|
||||
// body_override: 同 ChannelMonitorRequestTemplate.body_override
|
||||
field.JSON("body_override", map[string]any{}).
|
||||
Optional(),
|
||||
}
|
||||
}
|
||||
|
||||
func (ChannelMonitor) Edges() []ent.Edge {
|
||||
return []ent.Edge{
|
||||
edge.To("history", ChannelMonitorHistory.Type).
|
||||
Annotations(entsql.OnDelete(entsql.Cascade)),
|
||||
edge.To("daily_rollups", ChannelMonitorDailyRollup.Type).
|
||||
Annotations(entsql.OnDelete(entsql.Cascade)),
|
||||
// 关联请求模板:模板被删除时 template_id 自动置空,
|
||||
// 监控本身保留(继续用快照字段跑)。
|
||||
edge.To("request_template", ChannelMonitorRequestTemplate.Type).
|
||||
Field("template_id").
|
||||
Unique().
|
||||
Annotations(entsql.OnDelete(entsql.SetNull)),
|
||||
}
|
||||
}
|
||||
|
||||
func (ChannelMonitor) Indexes() []ent.Index {
|
||||
return []ent.Index{
|
||||
index.Fields("enabled", "last_checked_at"),
|
||||
index.Fields("provider"),
|
||||
index.Fields("group_name"),
|
||||
index.Fields("template_id"),
|
||||
}
|
||||
}
|
||||
66
backend/ent/schema/channel_monitor_daily_rollup.go
Normal file
66
backend/ent/schema/channel_monitor_daily_rollup.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package schema
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect"
|
||||
"entgo.io/ent/dialect/entsql"
|
||||
"entgo.io/ent/schema"
|
||||
"entgo.io/ent/schema/edge"
|
||||
"entgo.io/ent/schema/field"
|
||||
"entgo.io/ent/schema/index"
|
||||
)
|
||||
|
||||
// ChannelMonitorDailyRollup 按 (monitor_id, model, bucket_date) 维度聚合的渠道监控日统计。
|
||||
// 每天的明细被收敛为一行(保留 status 分布 + 延迟和),用于 7d/15d/30d 窗口的可用率
|
||||
// 加权计算(avg_latency = sum_latency_ms / count_latency;availability = ok_count / total_checks)。
|
||||
// 超过保留期由每日维护任务分批物理删(不用软删除,理由同 channel_monitor_history)。
|
||||
type ChannelMonitorDailyRollup struct {
|
||||
ent.Schema
|
||||
}
|
||||
|
||||
func (ChannelMonitorDailyRollup) Annotations() []schema.Annotation {
|
||||
return []schema.Annotation{
|
||||
entsql.Annotation{Table: "channel_monitor_daily_rollups"},
|
||||
}
|
||||
}
|
||||
|
||||
func (ChannelMonitorDailyRollup) Fields() []ent.Field {
|
||||
return []ent.Field{
|
||||
field.Int64("monitor_id"),
|
||||
field.String("model").
|
||||
NotEmpty().
|
||||
MaxLen(200),
|
||||
field.Time("bucket_date").
|
||||
SchemaType(map[string]string{dialect.Postgres: "date"}),
|
||||
field.Int("total_checks").Default(0),
|
||||
field.Int("ok_count").Default(0),
|
||||
field.Int("operational_count").Default(0),
|
||||
field.Int("degraded_count").Default(0),
|
||||
field.Int("failed_count").Default(0),
|
||||
field.Int("error_count").Default(0),
|
||||
field.Int64("sum_latency_ms").Default(0),
|
||||
field.Int("count_latency").Default(0),
|
||||
field.Int64("sum_ping_latency_ms").Default(0),
|
||||
field.Int("count_ping_latency").Default(0),
|
||||
field.Time("computed_at").Default(time.Now).UpdateDefault(time.Now),
|
||||
}
|
||||
}
|
||||
|
||||
func (ChannelMonitorDailyRollup) Edges() []ent.Edge {
|
||||
return []ent.Edge{
|
||||
edge.From("monitor", ChannelMonitor.Type).
|
||||
Ref("daily_rollups").
|
||||
Field("monitor_id").
|
||||
Unique().
|
||||
Required(),
|
||||
}
|
||||
}
|
||||
|
||||
func (ChannelMonitorDailyRollup) Indexes() []ent.Index {
|
||||
return []ent.Index{
|
||||
index.Fields("monitor_id", "model", "bucket_date").Unique(),
|
||||
index.Fields("bucket_date"),
|
||||
}
|
||||
}
|
||||
66
backend/ent/schema/channel_monitor_history.go
Normal file
66
backend/ent/schema/channel_monitor_history.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package schema
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/entsql"
|
||||
"entgo.io/ent/schema"
|
||||
"entgo.io/ent/schema/edge"
|
||||
"entgo.io/ent/schema/field"
|
||||
"entgo.io/ent/schema/index"
|
||||
)
|
||||
|
||||
// ChannelMonitorHistory holds the schema definition for the ChannelMonitorHistory entity.
|
||||
// 渠道监控历史:每次检测每个模型一行记录。明细只保留 1 天,超过 1 天由每日维护任务
|
||||
// 先聚合到 channel_monitor_daily_rollups,再分批物理删(不用软删除:日志类表无恢复
|
||||
// 需求,软删会让行和索引只增不减,徒增磁盘和查询开销)。
|
||||
type ChannelMonitorHistory struct {
|
||||
ent.Schema
|
||||
}
|
||||
|
||||
func (ChannelMonitorHistory) Annotations() []schema.Annotation {
|
||||
return []schema.Annotation{
|
||||
entsql.Annotation{Table: "channel_monitor_histories"},
|
||||
}
|
||||
}
|
||||
|
||||
func (ChannelMonitorHistory) Fields() []ent.Field {
|
||||
return []ent.Field{
|
||||
field.Int64("monitor_id"),
|
||||
field.String("model").
|
||||
NotEmpty().
|
||||
MaxLen(200),
|
||||
field.Enum("status").
|
||||
Values("operational", "degraded", "failed", "error"),
|
||||
field.Int("latency_ms").
|
||||
Optional().
|
||||
Nillable(),
|
||||
field.Int("ping_latency_ms").
|
||||
Optional().
|
||||
Nillable(),
|
||||
field.String("message").
|
||||
Optional().
|
||||
Default("").
|
||||
MaxLen(500),
|
||||
field.Time("checked_at").
|
||||
Default(time.Now),
|
||||
}
|
||||
}
|
||||
|
||||
func (ChannelMonitorHistory) Edges() []ent.Edge {
|
||||
return []ent.Edge{
|
||||
edge.From("monitor", ChannelMonitor.Type).
|
||||
Ref("history").
|
||||
Field("monitor_id").
|
||||
Unique().
|
||||
Required(),
|
||||
}
|
||||
}
|
||||
|
||||
func (ChannelMonitorHistory) Indexes() []ent.Index {
|
||||
return []ent.Index{
|
||||
index.Fields("monitor_id", "model", "checked_at"),
|
||||
index.Fields("checked_at"),
|
||||
}
|
||||
}
|
||||
80
backend/ent/schema/channel_monitor_request_template.go
Normal file
80
backend/ent/schema/channel_monitor_request_template.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package schema
|
||||
|
||||
import (
|
||||
"github.com/Wei-Shaw/sub2api/ent/schema/mixins"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/entsql"
|
||||
"entgo.io/ent/schema"
|
||||
"entgo.io/ent/schema/edge"
|
||||
"entgo.io/ent/schema/field"
|
||||
"entgo.io/ent/schema/index"
|
||||
)
|
||||
|
||||
// ChannelMonitorRequestTemplate 请求模板:一组可复用的 headers + 可选 body 覆盖配置。
|
||||
//
|
||||
// 语义为快照:模板被"应用"到监控时,extra_headers / body_override_mode / body_override
|
||||
// 会被**拷贝**到 channel_monitors 同名字段;后续模板变动不会自动影响已应用的监控——
|
||||
// 必须用户主动在模板编辑 Dialog 里点「应用到关联监控」才会覆盖快照。
|
||||
// 这样模板改错不会瞬间打挂所有已经跑起来的监控。
|
||||
type ChannelMonitorRequestTemplate struct {
|
||||
ent.Schema
|
||||
}
|
||||
|
||||
func (ChannelMonitorRequestTemplate) Annotations() []schema.Annotation {
|
||||
return []schema.Annotation{
|
||||
entsql.Annotation{Table: "channel_monitor_request_templates"},
|
||||
}
|
||||
}
|
||||
|
||||
func (ChannelMonitorRequestTemplate) Mixin() []ent.Mixin {
|
||||
return []ent.Mixin{
|
||||
mixins.TimeMixin{},
|
||||
}
|
||||
}
|
||||
|
||||
func (ChannelMonitorRequestTemplate) Fields() []ent.Field {
|
||||
return []ent.Field{
|
||||
field.String("name").
|
||||
NotEmpty().
|
||||
MaxLen(100),
|
||||
field.Enum("provider").
|
||||
Values("openai", "anthropic", "gemini"),
|
||||
field.String("description").
|
||||
Optional().
|
||||
Default("").
|
||||
MaxLen(500),
|
||||
// extra_headers: 用户自定义 HTTP 头(如 User-Agent 伪装)。
|
||||
// 运行时 merge 进 adapter 默认 headers,用户值优先;
|
||||
// hop-by-hop 黑名单(Host/Content-Length/...)由 checker 过滤。
|
||||
field.JSON("extra_headers", map[string]string{}).
|
||||
Default(map[string]string{}),
|
||||
// body_override_mode: 'off' | 'merge' | 'replace'
|
||||
// off - 用 adapter 默认 body(忽略 body_override)
|
||||
// merge - adapter 默认 body 与 body_override 浅合并(body_override 优先,
|
||||
// model/messages/contents 等关键字段在 checker 里走黑名单跳过)
|
||||
// replace - 直接用 body_override 作为完整 body;此时跳过 challenge 校验,
|
||||
// 改为 HTTP 2xx + 响应文本非空即视为可用
|
||||
field.String("body_override_mode").
|
||||
Default("off").
|
||||
MaxLen(10),
|
||||
// body_override: JSON 对象,根据 body_override_mode 使用。
|
||||
// 用 map[string]any 以便前端传任意结构(含嵌套)。
|
||||
field.JSON("body_override", map[string]any{}).
|
||||
Optional(),
|
||||
}
|
||||
}
|
||||
|
||||
func (ChannelMonitorRequestTemplate) Edges() []ent.Edge {
|
||||
return []ent.Edge{
|
||||
edge.From("monitors", ChannelMonitor.Type).
|
||||
Ref("request_template"),
|
||||
}
|
||||
}
|
||||
|
||||
func (ChannelMonitorRequestTemplate) Indexes() []ent.Index {
|
||||
return []ent.Index{
|
||||
// 同一 provider 内 name 唯一:允许 Anthropic + OpenAI 重名 "伪装官方客户端"。
|
||||
index.Fields("provider", "name").Unique(),
|
||||
}
|
||||
}
|
||||
@@ -28,6 +28,14 @@ type Tx struct {
|
||||
AuthIdentity *AuthIdentityClient
|
||||
// AuthIdentityChannel is the client for interacting with the AuthIdentityChannel builders.
|
||||
AuthIdentityChannel *AuthIdentityChannelClient
|
||||
// ChannelMonitor is the client for interacting with the ChannelMonitor builders.
|
||||
ChannelMonitor *ChannelMonitorClient
|
||||
// ChannelMonitorDailyRollup is the client for interacting with the ChannelMonitorDailyRollup builders.
|
||||
ChannelMonitorDailyRollup *ChannelMonitorDailyRollupClient
|
||||
// ChannelMonitorHistory is the client for interacting with the ChannelMonitorHistory builders.
|
||||
ChannelMonitorHistory *ChannelMonitorHistoryClient
|
||||
// ChannelMonitorRequestTemplate is the client for interacting with the ChannelMonitorRequestTemplate builders.
|
||||
ChannelMonitorRequestTemplate *ChannelMonitorRequestTemplateClient
|
||||
// ErrorPassthroughRule is the client for interacting with the ErrorPassthroughRule builders.
|
||||
ErrorPassthroughRule *ErrorPassthroughRuleClient
|
||||
// Group is the client for interacting with the Group builders.
|
||||
@@ -212,6 +220,10 @@ func (tx *Tx) init() {
|
||||
tx.AnnouncementRead = NewAnnouncementReadClient(tx.config)
|
||||
tx.AuthIdentity = NewAuthIdentityClient(tx.config)
|
||||
tx.AuthIdentityChannel = NewAuthIdentityChannelClient(tx.config)
|
||||
tx.ChannelMonitor = NewChannelMonitorClient(tx.config)
|
||||
tx.ChannelMonitorDailyRollup = NewChannelMonitorDailyRollupClient(tx.config)
|
||||
tx.ChannelMonitorHistory = NewChannelMonitorHistoryClient(tx.config)
|
||||
tx.ChannelMonitorRequestTemplate = NewChannelMonitorRequestTemplateClient(tx.config)
|
||||
tx.ErrorPassthroughRule = NewErrorPassthroughRuleClient(tx.config)
|
||||
tx.Group = NewGroupClient(tx.config)
|
||||
tx.IdempotencyRecord = NewIdempotencyRecordClient(tx.config)
|
||||
|
||||
@@ -183,6 +183,8 @@ github.com/icholy/digest v1.1.0 h1:HfGg9Irj7i+IX1o1QAmPfIBNu/Q5A5Tu3n/MED9k9H4=
|
||||
github.com/icholy/digest v1.1.0/go.mod h1:QNrsSGQ5v7v9cReDI0+eyjsXGUoRSUZQHeQ5C4XLa0Y=
|
||||
github.com/imroc/req/v3 v3.57.0 h1:LMTUjNRUybUkTPn8oJDq8Kg3JRBOBTcnDhKu7mzupKI=
|
||||
github.com/imroc/req/v3 v3.57.0/go.mod h1:JL62ey1nvSLq81HORNcosvlf7SxZStONNqOprg0Pz00=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
|
||||
|
||||
@@ -652,6 +652,7 @@ func (h *AccountHandler) Delete(c *gin.Context) {
|
||||
type TestAccountRequest struct {
|
||||
ModelID string `json:"model_id"`
|
||||
Prompt string `json:"prompt"`
|
||||
Mode string `json:"mode"`
|
||||
}
|
||||
|
||||
type SyncFromCRSRequest struct {
|
||||
@@ -682,7 +683,7 @@ func (h *AccountHandler) Test(c *gin.Context) {
|
||||
_ = c.ShouldBindJSON(&req)
|
||||
|
||||
// Use AccountTestService to test the account with SSE streaming
|
||||
if err := h.accountTestService.TestAccountConnection(c, accountID, req.ModelID, req.Prompt); err != nil {
|
||||
if err := h.accountTestService.TestAccountConnection(c, accountID, req.ModelID, req.Prompt, req.Mode); err != nil {
|
||||
// Error already sent via SSE, just log
|
||||
return
|
||||
}
|
||||
|
||||
183
backend/internal/handler/admin/affiliate_handler.go
Normal file
183
backend/internal/handler/admin/affiliate_handler.go
Normal file
@@ -0,0 +1,183 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/response"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// AffiliateHandler handles admin affiliate (邀请返利) management:
|
||||
// listing users with custom settings, updating per-user invite codes
|
||||
// and exclusive rebate rates, and batch operations.
|
||||
type AffiliateHandler struct {
|
||||
affiliateService *service.AffiliateService
|
||||
adminService service.AdminService
|
||||
}
|
||||
|
||||
// NewAffiliateHandler creates a new admin affiliate handler.
|
||||
func NewAffiliateHandler(affiliateService *service.AffiliateService, adminService service.AdminService) *AffiliateHandler {
|
||||
return &AffiliateHandler{
|
||||
affiliateService: affiliateService,
|
||||
adminService: adminService,
|
||||
}
|
||||
}
|
||||
|
||||
// ListUsers returns paginated users with custom affiliate settings.
|
||||
// GET /api/v1/admin/affiliates/users
|
||||
func (h *AffiliateHandler) ListUsers(c *gin.Context) {
|
||||
page, pageSize := response.ParsePagination(c)
|
||||
search := c.Query("search")
|
||||
|
||||
entries, total, err := h.affiliateService.AdminListCustomUsers(c.Request.Context(), service.AffiliateAdminFilter{
|
||||
Search: search,
|
||||
Page: page,
|
||||
PageSize: pageSize,
|
||||
})
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Paginated(c, entries, total, page, pageSize)
|
||||
}
|
||||
|
||||
// UpdateUserSettings updates a user's affiliate settings.
|
||||
// PUT /api/v1/admin/affiliates/users/:user_id
|
||||
//
|
||||
// Both fields are optional and applied independently.
|
||||
type UpdateAffiliateUserRequest struct {
|
||||
AffCode *string `json:"aff_code"`
|
||||
AffRebateRatePercent *float64 `json:"aff_rebate_rate_percent"`
|
||||
// ClearRebateRate explicitly clears the per-user rate (sets it to NULL).
|
||||
// Used to disambiguate from "field not provided".
|
||||
ClearRebateRate bool `json:"clear_rebate_rate"`
|
||||
}
|
||||
|
||||
func (h *AffiliateHandler) UpdateUserSettings(c *gin.Context) {
|
||||
userID, err := strconv.ParseInt(c.Param("user_id"), 10, 64)
|
||||
if err != nil || userID <= 0 {
|
||||
response.BadRequest(c, "Invalid user_id")
|
||||
return
|
||||
}
|
||||
|
||||
var req UpdateAffiliateUserRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
response.BadRequest(c, "Invalid request: "+err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if req.AffCode != nil {
|
||||
if err := h.affiliateService.AdminUpdateUserAffCode(c.Request.Context(), userID, *req.AffCode); err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if req.ClearRebateRate {
|
||||
if err := h.affiliateService.AdminSetUserRebateRate(c.Request.Context(), userID, nil); err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
} else if req.AffRebateRatePercent != nil {
|
||||
if err := h.affiliateService.AdminSetUserRebateRate(c.Request.Context(), userID, req.AffRebateRatePercent); err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
response.Success(c, gin.H{"user_id": userID})
|
||||
}
|
||||
|
||||
// ClearUserSettings removes ALL of a user's custom affiliate settings — clears
|
||||
// the exclusive rebate rate AND regenerates the invite code as a new system
|
||||
// random one. Conceptually this "removes the user from the custom list".
|
||||
//
|
||||
// Both writes happen in this handler; failure of one leaves the other applied,
|
||||
// but the operation is idempotent so the admin can re-run it safely.
|
||||
// DELETE /api/v1/admin/affiliates/users/:user_id
|
||||
func (h *AffiliateHandler) ClearUserSettings(c *gin.Context) {
|
||||
userID, err := strconv.ParseInt(c.Param("user_id"), 10, 64)
|
||||
if err != nil || userID <= 0 {
|
||||
response.BadRequest(c, "Invalid user_id")
|
||||
return
|
||||
}
|
||||
if err := h.affiliateService.AdminSetUserRebateRate(c.Request.Context(), userID, nil); err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
if _, err := h.affiliateService.AdminResetUserAffCode(c.Request.Context(), userID); err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, gin.H{"user_id": userID})
|
||||
}
|
||||
|
||||
// BatchSetRate applies the same rebate rate (or clears it) to multiple users.
|
||||
//
|
||||
// Protocol: pass `clear: true` to clear rates (aff_rebate_rate_percent is
|
||||
// ignored). Otherwise aff_rebate_rate_percent is required and applied to
|
||||
// every user_id. The explicit `clear` flag exists because Go's JSON unmarshal
|
||||
// can't distinguish a missing field from `null`, and a silent clear from a
|
||||
// frontend that forgot to include the rate would be a footgun.
|
||||
//
|
||||
// POST /api/v1/admin/affiliates/users/batch-rate
|
||||
type BatchSetRateRequest struct {
|
||||
UserIDs []int64 `json:"user_ids" binding:"required"`
|
||||
AffRebateRatePercent *float64 `json:"aff_rebate_rate_percent"`
|
||||
Clear bool `json:"clear"`
|
||||
}
|
||||
|
||||
func (h *AffiliateHandler) BatchSetRate(c *gin.Context) {
|
||||
var req BatchSetRateRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
response.BadRequest(c, "Invalid request: "+err.Error())
|
||||
return
|
||||
}
|
||||
if len(req.UserIDs) == 0 {
|
||||
response.BadRequest(c, "user_ids cannot be empty")
|
||||
return
|
||||
}
|
||||
if !req.Clear && req.AffRebateRatePercent == nil {
|
||||
response.BadRequest(c, "aff_rebate_rate_percent is required unless clear=true")
|
||||
return
|
||||
}
|
||||
rate := req.AffRebateRatePercent
|
||||
if req.Clear {
|
||||
rate = nil
|
||||
}
|
||||
if err := h.affiliateService.AdminBatchSetUserRebateRate(c.Request.Context(), req.UserIDs, rate); err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, gin.H{"affected": len(req.UserIDs)})
|
||||
}
|
||||
|
||||
// AffiliateUserSummary is the minimal user shape returned by LookupUsers,
|
||||
// shared with the frontend's add-custom-user picker.
|
||||
type AffiliateUserSummary struct {
|
||||
ID int64 `json:"id"`
|
||||
Email string `json:"email"`
|
||||
Username string `json:"username"`
|
||||
}
|
||||
|
||||
// LookupUsers searches users by email/username for the "add custom user" modal.
|
||||
// GET /api/v1/admin/affiliates/users/lookup?q=
|
||||
func (h *AffiliateHandler) LookupUsers(c *gin.Context) {
|
||||
keyword := c.Query("q")
|
||||
if keyword == "" {
|
||||
response.Success(c, []AffiliateUserSummary{})
|
||||
return
|
||||
}
|
||||
users, _, err := h.adminService.ListUsers(c.Request.Context(), 1, 20, service.UserListFilters{Search: keyword}, "email", "asc")
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
result := make([]AffiliateUserSummary, len(users))
|
||||
for i, u := range users {
|
||||
result[i] = AffiliateUserSummary{ID: u.ID, Email: u.Email, Username: u.Username}
|
||||
}
|
||||
response.Success(c, result)
|
||||
}
|
||||
@@ -158,9 +158,6 @@ func channelToResponse(ch *service.Channel) *channelResponse {
|
||||
UpdatedAt: ch.UpdatedAt.Format("2006-01-02T15:04:05Z"),
|
||||
}
|
||||
resp.BillingModelSource = ch.BillingModelSource
|
||||
if resp.BillingModelSource == "" {
|
||||
resp.BillingModelSource = service.BillingModelSourceChannelMapped
|
||||
}
|
||||
if resp.GroupIDs == nil {
|
||||
resp.GroupIDs = []int64{}
|
||||
}
|
||||
|
||||
@@ -91,7 +91,7 @@ func TestChannelToResponse_EmptyDefaults(t *testing.T) {
|
||||
ch := &service.Channel{
|
||||
ID: 1,
|
||||
Name: "ch",
|
||||
BillingModelSource: "",
|
||||
BillingModelSource: service.BillingModelSourceChannelMapped,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
GroupIDs: nil,
|
||||
@@ -105,6 +105,9 @@ func TestChannelToResponse_EmptyDefaults(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
// handler 层 channelToResponse 现在是纯透传:BillingModelSource 的空值兜底
|
||||
// 已下放到 service 层(Create/GetByID/List/Update/ListAvailable 出口统一处理),
|
||||
// 因此这里构造 fixture 时直接传入归一化后的值。
|
||||
resp := channelToResponse(ch)
|
||||
require.Equal(t, "channel_mapped", resp.BillingModelSource)
|
||||
require.NotNil(t, resp.GroupIDs)
|
||||
@@ -117,6 +120,19 @@ func TestChannelToResponse_EmptyDefaults(t *testing.T) {
|
||||
require.Equal(t, "token", resp.ModelPricing[0].BillingMode)
|
||||
}
|
||||
|
||||
func TestChannelToResponse_BillingModelSourcePassthrough(t *testing.T) {
|
||||
// handler 不再兜底 BillingModelSource:空值应原样透传(由 service 层负责默认回填)。
|
||||
ch := &service.Channel{
|
||||
ID: 1,
|
||||
Name: "ch",
|
||||
BillingModelSource: "",
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
resp := channelToResponse(ch)
|
||||
require.Equal(t, "", resp.BillingModelSource, "handler 应纯透传,默认值由 service.normalizeBillingModelSource 负责")
|
||||
}
|
||||
|
||||
func TestChannelToResponse_NilModels(t *testing.T) {
|
||||
now := time.Now()
|
||||
ch := &service.Channel{
|
||||
|
||||
427
backend/internal/handler/admin/channel_monitor_handler.go
Normal file
427
backend/internal/handler/admin/channel_monitor_handler.go
Normal file
@@ -0,0 +1,427 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Wei-Shaw/sub2api/internal/handler/dto"
|
||||
infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/response"
|
||||
middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
const (
|
||||
// monitorMaxPageSize 列表分页上限。
|
||||
monitorMaxPageSize = 100
|
||||
// monitorAPIKeyMaskPrefix 脱敏时保留的明文前缀长度。
|
||||
monitorAPIKeyMaskPrefix = 4
|
||||
// monitorAPIKeyMaskSuffix 脱敏后追加的占位字符串。
|
||||
monitorAPIKeyMaskSuffix = "***"
|
||||
)
|
||||
|
||||
// ChannelMonitorHandler 渠道监控管理后台 handler。
|
||||
type ChannelMonitorHandler struct {
|
||||
monitorService *service.ChannelMonitorService
|
||||
}
|
||||
|
||||
// NewChannelMonitorHandler 创建 handler。
|
||||
func NewChannelMonitorHandler(monitorService *service.ChannelMonitorService) *ChannelMonitorHandler {
|
||||
return &ChannelMonitorHandler{monitorService: monitorService}
|
||||
}
|
||||
|
||||
// --- Request / Response ---
|
||||
|
||||
type channelMonitorCreateRequest struct {
|
||||
Name string `json:"name" binding:"required,max=100"`
|
||||
Provider string `json:"provider" binding:"required,oneof=openai anthropic gemini"`
|
||||
Endpoint string `json:"endpoint" binding:"required,max=500"`
|
||||
APIKey string `json:"api_key" binding:"required,max=2000"`
|
||||
PrimaryModel string `json:"primary_model" binding:"required,max=200"`
|
||||
ExtraModels []string `json:"extra_models"`
|
||||
GroupName string `json:"group_name" binding:"max=100"`
|
||||
Enabled *bool `json:"enabled"`
|
||||
IntervalSeconds int `json:"interval_seconds" binding:"required,min=15,max=3600"`
|
||||
TemplateID *int64 `json:"template_id"`
|
||||
ExtraHeaders map[string]string `json:"extra_headers"`
|
||||
BodyOverrideMode string `json:"body_override_mode" binding:"omitempty,oneof=off merge replace"`
|
||||
BodyOverride map[string]any `json:"body_override"`
|
||||
}
|
||||
|
||||
type channelMonitorUpdateRequest struct {
|
||||
Name *string `json:"name" binding:"omitempty,max=100"`
|
||||
Provider *string `json:"provider" binding:"omitempty,oneof=openai anthropic gemini"`
|
||||
Endpoint *string `json:"endpoint" binding:"omitempty,max=500"`
|
||||
APIKey *string `json:"api_key" binding:"omitempty,max=2000"`
|
||||
PrimaryModel *string `json:"primary_model" binding:"omitempty,max=200"`
|
||||
ExtraModels *[]string `json:"extra_models"`
|
||||
GroupName *string `json:"group_name" binding:"omitempty,max=100"`
|
||||
Enabled *bool `json:"enabled"`
|
||||
IntervalSeconds *int `json:"interval_seconds" binding:"omitempty,min=15,max=3600"`
|
||||
TemplateID *int64 `json:"template_id"`
|
||||
ClearTemplate bool `json:"clear_template"` // true 时把 template_id 置空,忽略 TemplateID
|
||||
ExtraHeaders *map[string]string `json:"extra_headers"`
|
||||
BodyOverrideMode *string `json:"body_override_mode" binding:"omitempty,oneof=off merge replace"`
|
||||
BodyOverride *map[string]any `json:"body_override"`
|
||||
}
|
||||
|
||||
type channelMonitorResponse struct {
|
||||
ID int64 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Provider string `json:"provider"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
APIKeyMasked string `json:"api_key_masked"`
|
||||
APIKeyDecryptFailed bool `json:"api_key_decrypt_failed"`
|
||||
PrimaryModel string `json:"primary_model"`
|
||||
ExtraModels []string `json:"extra_models"`
|
||||
GroupName string `json:"group_name"`
|
||||
Enabled bool `json:"enabled"`
|
||||
IntervalSeconds int `json:"interval_seconds"`
|
||||
LastCheckedAt *string `json:"last_checked_at"`
|
||||
CreatedBy int64 `json:"created_by"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
UpdatedAt string `json:"updated_at"`
|
||||
PrimaryStatus string `json:"primary_status"`
|
||||
PrimaryLatencyMs *int `json:"primary_latency_ms"`
|
||||
Availability7d float64 `json:"availability_7d"`
|
||||
ExtraModelsStatus []dto.ChannelMonitorExtraModelStatus `json:"extra_models_status"`
|
||||
// 请求自定义快照:前端编辑 / 展示「高级设置」用
|
||||
TemplateID *int64 `json:"template_id"`
|
||||
ExtraHeaders map[string]string `json:"extra_headers"`
|
||||
BodyOverrideMode string `json:"body_override_mode"`
|
||||
BodyOverride map[string]any `json:"body_override"`
|
||||
}
|
||||
|
||||
type channelMonitorCheckResultResponse struct {
|
||||
Model string `json:"model"`
|
||||
Status string `json:"status"`
|
||||
LatencyMs *int `json:"latency_ms"`
|
||||
PingLatencyMs *int `json:"ping_latency_ms"`
|
||||
Message string `json:"message"`
|
||||
CheckedAt string `json:"checked_at"`
|
||||
}
|
||||
|
||||
type channelMonitorHistoryItemResponse struct {
|
||||
ID int64 `json:"id"`
|
||||
Model string `json:"model"`
|
||||
Status string `json:"status"`
|
||||
LatencyMs *int `json:"latency_ms"`
|
||||
PingLatencyMs *int `json:"ping_latency_ms"`
|
||||
Message string `json:"message"`
|
||||
CheckedAt string `json:"checked_at"`
|
||||
}
|
||||
|
||||
// maskAPIKey 对 API Key 明文做脱敏:前 4 字符 + "***",长度 ≤ 4 时只显示 "***"。
|
||||
func maskAPIKey(plain string) string {
|
||||
if len(plain) <= monitorAPIKeyMaskPrefix {
|
||||
return monitorAPIKeyMaskSuffix
|
||||
}
|
||||
return plain[:monitorAPIKeyMaskPrefix] + monitorAPIKeyMaskSuffix
|
||||
}
|
||||
|
||||
func channelMonitorToResponse(m *service.ChannelMonitor) *channelMonitorResponse {
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
extras := m.ExtraModels
|
||||
if extras == nil {
|
||||
extras = []string{}
|
||||
}
|
||||
headers := m.ExtraHeaders
|
||||
if headers == nil {
|
||||
headers = map[string]string{}
|
||||
}
|
||||
resp := &channelMonitorResponse{
|
||||
ID: m.ID,
|
||||
Name: m.Name,
|
||||
Provider: m.Provider,
|
||||
Endpoint: m.Endpoint,
|
||||
APIKeyMasked: maskAPIKey(m.APIKey),
|
||||
APIKeyDecryptFailed: m.APIKeyDecryptFailed,
|
||||
PrimaryModel: m.PrimaryModel,
|
||||
ExtraModels: extras,
|
||||
GroupName: m.GroupName,
|
||||
Enabled: m.Enabled,
|
||||
IntervalSeconds: m.IntervalSeconds,
|
||||
CreatedBy: m.CreatedBy,
|
||||
CreatedAt: m.CreatedAt.UTC().Format(time.RFC3339),
|
||||
UpdatedAt: m.UpdatedAt.UTC().Format(time.RFC3339),
|
||||
TemplateID: m.TemplateID,
|
||||
ExtraHeaders: headers,
|
||||
BodyOverrideMode: m.BodyOverrideMode,
|
||||
BodyOverride: m.BodyOverride,
|
||||
// PrimaryStatus / PrimaryLatencyMs / Availability7d 由 List handler 在批量聚合后填充。
|
||||
}
|
||||
if m.LastCheckedAt != nil {
|
||||
s := m.LastCheckedAt.UTC().Format(time.RFC3339)
|
||||
resp.LastCheckedAt = &s
|
||||
}
|
||||
return resp
|
||||
}
|
||||
|
||||
func checkResultToResponse(r *service.CheckResult) channelMonitorCheckResultResponse {
|
||||
return channelMonitorCheckResultResponse{
|
||||
Model: r.Model,
|
||||
Status: r.Status,
|
||||
LatencyMs: r.LatencyMs,
|
||||
PingLatencyMs: r.PingLatencyMs,
|
||||
Message: r.Message,
|
||||
CheckedAt: r.CheckedAt.UTC().Format(time.RFC3339),
|
||||
}
|
||||
}
|
||||
|
||||
func historyEntryToResponse(e *service.ChannelMonitorHistoryEntry) channelMonitorHistoryItemResponse {
|
||||
return channelMonitorHistoryItemResponse{
|
||||
ID: e.ID,
|
||||
Model: e.Model,
|
||||
Status: e.Status,
|
||||
LatencyMs: e.LatencyMs,
|
||||
PingLatencyMs: e.PingLatencyMs,
|
||||
Message: e.Message,
|
||||
CheckedAt: e.CheckedAt.UTC().Format(time.RFC3339),
|
||||
}
|
||||
}
|
||||
|
||||
// ParseChannelMonitorID 提取并校验路径参数 :id(admin 与 user handler 共享)。
|
||||
// 校验失败时已写入 4xx 响应,调用方只需 return。
|
||||
func ParseChannelMonitorID(c *gin.Context) (int64, bool) {
|
||||
id, err := strconv.ParseInt(c.Param("id"), 10, 64)
|
||||
if err != nil || id <= 0 {
|
||||
response.ErrorFrom(c, infraerrors.BadRequest("INVALID_MONITOR_ID", "invalid monitor id"))
|
||||
return 0, false
|
||||
}
|
||||
return id, true
|
||||
}
|
||||
|
||||
// parseListEnabled 解析 enabled query 参数:true/false 转为 *bool,空或非法则返回 nil。
|
||||
func parseListEnabled(raw string) *bool {
|
||||
switch strings.ToLower(strings.TrimSpace(raw)) {
|
||||
case "true", "1", "yes":
|
||||
v := true
|
||||
return &v
|
||||
case "false", "0", "no":
|
||||
v := false
|
||||
return &v
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// --- Handlers ---
|
||||
|
||||
// List GET /api/v1/admin/channel-monitors
|
||||
func (h *ChannelMonitorHandler) List(c *gin.Context) {
|
||||
page, pageSize := response.ParsePagination(c)
|
||||
if pageSize > monitorMaxPageSize {
|
||||
pageSize = monitorMaxPageSize
|
||||
}
|
||||
|
||||
params := service.ChannelMonitorListParams{
|
||||
Page: page,
|
||||
PageSize: pageSize,
|
||||
Provider: strings.TrimSpace(c.Query("provider")),
|
||||
Enabled: parseListEnabled(c.Query("enabled")),
|
||||
Search: strings.TrimSpace(c.Query("search")),
|
||||
}
|
||||
|
||||
items, total, err := h.monitorService.List(c.Request.Context(), params)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
|
||||
summaries := h.batchSummaryFor(c, items)
|
||||
out := make([]*channelMonitorResponse, 0, len(items))
|
||||
for _, m := range items {
|
||||
out = append(out, buildListItemResponse(m, summaries[m.ID]))
|
||||
}
|
||||
response.Paginated(c, out, total, page, pageSize)
|
||||
}
|
||||
|
||||
// batchSummaryFor 批量聚合 latest + 7d 可用率,避免每行 2 次 SQL(消除 N+1)。
|
||||
func (h *ChannelMonitorHandler) batchSummaryFor(c *gin.Context, items []*service.ChannelMonitor) map[int64]service.MonitorStatusSummary {
|
||||
ids := make([]int64, 0, len(items))
|
||||
primaryByID := make(map[int64]string, len(items))
|
||||
extrasByID := make(map[int64][]string, len(items))
|
||||
for _, m := range items {
|
||||
ids = append(ids, m.ID)
|
||||
primaryByID[m.ID] = m.PrimaryModel
|
||||
extrasByID[m.ID] = m.ExtraModels
|
||||
}
|
||||
return h.monitorService.BatchMonitorStatusSummary(c.Request.Context(), ids, primaryByID, extrasByID)
|
||||
}
|
||||
|
||||
// buildListItemResponse 把 monitor + summary 装成 admin list 的响应行。
|
||||
func buildListItemResponse(m *service.ChannelMonitor, summary service.MonitorStatusSummary) *channelMonitorResponse {
|
||||
resp := channelMonitorToResponse(m)
|
||||
resp.PrimaryStatus = summary.PrimaryStatus
|
||||
resp.PrimaryLatencyMs = summary.PrimaryLatencyMs
|
||||
resp.Availability7d = summary.Availability7d
|
||||
resp.ExtraModelsStatus = make([]dto.ChannelMonitorExtraModelStatus, 0, len(summary.ExtraModels))
|
||||
for _, e := range summary.ExtraModels {
|
||||
resp.ExtraModelsStatus = append(resp.ExtraModelsStatus, dto.ChannelMonitorExtraModelStatus{
|
||||
Model: e.Model,
|
||||
Status: e.Status,
|
||||
LatencyMs: e.LatencyMs,
|
||||
})
|
||||
}
|
||||
return resp
|
||||
}
|
||||
|
||||
// Get GET /api/v1/admin/channel-monitors/:id
|
||||
func (h *ChannelMonitorHandler) Get(c *gin.Context) {
|
||||
id, ok := ParseChannelMonitorID(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
m, err := h.monitorService.Get(c.Request.Context(), id)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, channelMonitorToResponse(m))
|
||||
}
|
||||
|
||||
// Create POST /api/v1/admin/channel-monitors
|
||||
func (h *ChannelMonitorHandler) Create(c *gin.Context) {
|
||||
var req channelMonitorCreateRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
response.ErrorFrom(c, infraerrors.BadRequest("VALIDATION_ERROR", err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
subject, _ := middleware2.GetAuthSubjectFromContext(c)
|
||||
|
||||
enabled := true
|
||||
if req.Enabled != nil {
|
||||
enabled = *req.Enabled
|
||||
}
|
||||
|
||||
m, err := h.monitorService.Create(c.Request.Context(), service.ChannelMonitorCreateParams{
|
||||
Name: req.Name,
|
||||
Provider: req.Provider,
|
||||
Endpoint: req.Endpoint,
|
||||
APIKey: req.APIKey,
|
||||
PrimaryModel: req.PrimaryModel,
|
||||
ExtraModels: req.ExtraModels,
|
||||
GroupName: req.GroupName,
|
||||
Enabled: enabled,
|
||||
IntervalSeconds: req.IntervalSeconds,
|
||||
CreatedBy: subject.UserID,
|
||||
TemplateID: req.TemplateID,
|
||||
ExtraHeaders: req.ExtraHeaders,
|
||||
BodyOverrideMode: req.BodyOverrideMode,
|
||||
BodyOverride: req.BodyOverride,
|
||||
})
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Created(c, channelMonitorToResponse(m))
|
||||
}
|
||||
|
||||
// Update PUT /api/v1/admin/channel-monitors/:id
|
||||
func (h *ChannelMonitorHandler) Update(c *gin.Context) {
|
||||
id, ok := ParseChannelMonitorID(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
var req channelMonitorUpdateRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
response.ErrorFrom(c, infraerrors.BadRequest("VALIDATION_ERROR", err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
m, err := h.monitorService.Update(c.Request.Context(), id, service.ChannelMonitorUpdateParams{
|
||||
Name: req.Name,
|
||||
Provider: req.Provider,
|
||||
Endpoint: req.Endpoint,
|
||||
APIKey: req.APIKey,
|
||||
PrimaryModel: req.PrimaryModel,
|
||||
ExtraModels: req.ExtraModels,
|
||||
GroupName: req.GroupName,
|
||||
Enabled: req.Enabled,
|
||||
IntervalSeconds: req.IntervalSeconds,
|
||||
TemplateID: req.TemplateID,
|
||||
ClearTemplate: req.ClearTemplate,
|
||||
ExtraHeaders: req.ExtraHeaders,
|
||||
BodyOverrideMode: req.BodyOverrideMode,
|
||||
BodyOverride: req.BodyOverride,
|
||||
})
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, channelMonitorToResponse(m))
|
||||
}
|
||||
|
||||
// Delete DELETE /api/v1/admin/channel-monitors/:id
|
||||
func (h *ChannelMonitorHandler) Delete(c *gin.Context) {
|
||||
id, ok := ParseChannelMonitorID(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if err := h.monitorService.Delete(c.Request.Context(), id); err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, nil)
|
||||
}
|
||||
|
||||
// Run POST /api/v1/admin/channel-monitors/:id/run
|
||||
func (h *ChannelMonitorHandler) Run(c *gin.Context) {
|
||||
id, ok := ParseChannelMonitorID(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
results, err := h.monitorService.RunCheck(c.Request.Context(), id)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
out := make([]channelMonitorCheckResultResponse, 0, len(results))
|
||||
for _, r := range results {
|
||||
out = append(out, checkResultToResponse(r))
|
||||
}
|
||||
response.Success(c, gin.H{"results": out})
|
||||
}
|
||||
|
||||
// History GET /api/v1/admin/channel-monitors/:id/history
|
||||
func (h *ChannelMonitorHandler) History(c *gin.Context) {
|
||||
id, ok := ParseChannelMonitorID(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
limit := parseHistoryLimit(c.Query("limit"))
|
||||
model := strings.TrimSpace(c.Query("model"))
|
||||
|
||||
entries, err := h.monitorService.ListHistory(c.Request.Context(), id, model, limit)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
out := make([]channelMonitorHistoryItemResponse, 0, len(entries))
|
||||
for _, e := range entries {
|
||||
out = append(out, historyEntryToResponse(e))
|
||||
}
|
||||
response.Success(c, gin.H{"items": out})
|
||||
}
|
||||
|
||||
// parseHistoryLimit 解析 history 接口的 limit query。
|
||||
// 使用 service 包的统一上下限常量,避免在 handler 重复定义同名魔法值。
|
||||
func parseHistoryLimit(raw string) int {
|
||||
if strings.TrimSpace(raw) == "" {
|
||||
return service.MonitorHistoryDefaultLimit
|
||||
}
|
||||
v, err := strconv.Atoi(raw)
|
||||
if err != nil || v <= 0 {
|
||||
return service.MonitorHistoryDefaultLimit
|
||||
}
|
||||
if v > service.MonitorHistoryMaxLimit {
|
||||
return service.MonitorHistoryMaxLimit
|
||||
}
|
||||
return v
|
||||
}
|
||||
@@ -0,0 +1,234 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/response"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// ChannelMonitorRequestTemplateHandler 请求模板管理后台 handler。
|
||||
type ChannelMonitorRequestTemplateHandler struct {
|
||||
templateService *service.ChannelMonitorRequestTemplateService
|
||||
}
|
||||
|
||||
// NewChannelMonitorRequestTemplateHandler 创建 handler。
|
||||
func NewChannelMonitorRequestTemplateHandler(templateService *service.ChannelMonitorRequestTemplateService) *ChannelMonitorRequestTemplateHandler {
|
||||
return &ChannelMonitorRequestTemplateHandler{templateService: templateService}
|
||||
}
|
||||
|
||||
// --- DTO ---
|
||||
|
||||
type channelMonitorTemplateCreateRequest struct {
|
||||
Name string `json:"name" binding:"required,max=100"`
|
||||
Provider string `json:"provider" binding:"required,oneof=openai anthropic gemini"`
|
||||
Description string `json:"description" binding:"max=500"`
|
||||
ExtraHeaders map[string]string `json:"extra_headers"`
|
||||
BodyOverrideMode string `json:"body_override_mode" binding:"omitempty,oneof=off merge replace"`
|
||||
BodyOverride map[string]any `json:"body_override"`
|
||||
}
|
||||
|
||||
type channelMonitorTemplateUpdateRequest struct {
|
||||
Name *string `json:"name" binding:"omitempty,max=100"`
|
||||
Description *string `json:"description" binding:"omitempty,max=500"`
|
||||
ExtraHeaders *map[string]string `json:"extra_headers"`
|
||||
BodyOverrideMode *string `json:"body_override_mode" binding:"omitempty,oneof=off merge replace"`
|
||||
BodyOverride *map[string]any `json:"body_override"`
|
||||
}
|
||||
|
||||
type channelMonitorTemplateResponse struct {
|
||||
ID int64 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Provider string `json:"provider"`
|
||||
Description string `json:"description"`
|
||||
ExtraHeaders map[string]string `json:"extra_headers"`
|
||||
BodyOverrideMode string `json:"body_override_mode"`
|
||||
BodyOverride map[string]any `json:"body_override"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
UpdatedAt string `json:"updated_at"`
|
||||
AssociatedMonitors int64 `json:"associated_monitors"`
|
||||
}
|
||||
|
||||
func (h *ChannelMonitorRequestTemplateHandler) toResponse(c *gin.Context, t *service.ChannelMonitorRequestTemplate) *channelMonitorTemplateResponse {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
headers := t.ExtraHeaders
|
||||
if headers == nil {
|
||||
headers = map[string]string{}
|
||||
}
|
||||
count, _ := h.templateService.CountAssociatedMonitors(c.Request.Context(), t.ID)
|
||||
return &channelMonitorTemplateResponse{
|
||||
ID: t.ID,
|
||||
Name: t.Name,
|
||||
Provider: t.Provider,
|
||||
Description: t.Description,
|
||||
ExtraHeaders: headers,
|
||||
BodyOverrideMode: t.BodyOverrideMode,
|
||||
BodyOverride: t.BodyOverride,
|
||||
CreatedAt: t.CreatedAt.UTC().Format(time.RFC3339),
|
||||
UpdatedAt: t.UpdatedAt.UTC().Format(time.RFC3339),
|
||||
AssociatedMonitors: count,
|
||||
}
|
||||
}
|
||||
|
||||
// parseTemplateID 提取并校验 :id。
|
||||
func parseTemplateID(c *gin.Context) (int64, bool) {
|
||||
id, err := strconv.ParseInt(c.Param("id"), 10, 64)
|
||||
if err != nil || id <= 0 {
|
||||
response.ErrorFrom(c, infraerrors.BadRequest("INVALID_TEMPLATE_ID", "invalid template id"))
|
||||
return 0, false
|
||||
}
|
||||
return id, true
|
||||
}
|
||||
|
||||
// --- Handlers ---
|
||||
|
||||
// List GET /api/v1/admin/channel-monitor-templates?provider=anthropic
|
||||
func (h *ChannelMonitorRequestTemplateHandler) List(c *gin.Context) {
|
||||
items, err := h.templateService.List(c.Request.Context(), service.ChannelMonitorRequestTemplateListParams{
|
||||
Provider: strings.TrimSpace(c.Query("provider")),
|
||||
})
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
out := make([]*channelMonitorTemplateResponse, 0, len(items))
|
||||
for _, t := range items {
|
||||
out = append(out, h.toResponse(c, t))
|
||||
}
|
||||
response.Success(c, gin.H{"items": out})
|
||||
}
|
||||
|
||||
// Get GET /api/v1/admin/channel-monitor-templates/:id
|
||||
func (h *ChannelMonitorRequestTemplateHandler) Get(c *gin.Context) {
|
||||
id, ok := parseTemplateID(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
t, err := h.templateService.Get(c.Request.Context(), id)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, h.toResponse(c, t))
|
||||
}
|
||||
|
||||
// Create POST /api/v1/admin/channel-monitor-templates
|
||||
func (h *ChannelMonitorRequestTemplateHandler) Create(c *gin.Context) {
|
||||
var req channelMonitorTemplateCreateRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
response.ErrorFrom(c, infraerrors.BadRequest("VALIDATION_ERROR", err.Error()))
|
||||
return
|
||||
}
|
||||
t, err := h.templateService.Create(c.Request.Context(), service.ChannelMonitorRequestTemplateCreateParams{
|
||||
Name: req.Name,
|
||||
Provider: req.Provider,
|
||||
Description: req.Description,
|
||||
ExtraHeaders: req.ExtraHeaders,
|
||||
BodyOverrideMode: req.BodyOverrideMode,
|
||||
BodyOverride: req.BodyOverride,
|
||||
})
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Created(c, h.toResponse(c, t))
|
||||
}
|
||||
|
||||
// Update PUT /api/v1/admin/channel-monitor-templates/:id
|
||||
func (h *ChannelMonitorRequestTemplateHandler) Update(c *gin.Context) {
|
||||
id, ok := parseTemplateID(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
var req channelMonitorTemplateUpdateRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
response.ErrorFrom(c, infraerrors.BadRequest("VALIDATION_ERROR", err.Error()))
|
||||
return
|
||||
}
|
||||
t, err := h.templateService.Update(c.Request.Context(), id, service.ChannelMonitorRequestTemplateUpdateParams{
|
||||
Name: req.Name,
|
||||
Description: req.Description,
|
||||
ExtraHeaders: req.ExtraHeaders,
|
||||
BodyOverrideMode: req.BodyOverrideMode,
|
||||
BodyOverride: req.BodyOverride,
|
||||
})
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, h.toResponse(c, t))
|
||||
}
|
||||
|
||||
// Delete DELETE /api/v1/admin/channel-monitor-templates/:id
|
||||
func (h *ChannelMonitorRequestTemplateHandler) Delete(c *gin.Context) {
|
||||
id, ok := parseTemplateID(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if err := h.templateService.Delete(c.Request.Context(), id); err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, nil)
|
||||
}
|
||||
|
||||
type channelMonitorTemplateApplyRequest struct {
|
||||
// MonitorIDs 必填、非空:用户在 picker 里勾选的要被覆盖的监控 ID 列表。
|
||||
// 仅当对应监控当前 template_id == :id 时才会真的被覆盖。
|
||||
MonitorIDs []int64 `json:"monitor_ids" binding:"required,min=1"`
|
||||
}
|
||||
|
||||
// Apply POST /api/v1/admin/channel-monitor-templates/:id/apply
|
||||
// 把模板当前配置覆盖到 monitor_ids 列表里的关联监控(picker 选中的子集)。
|
||||
func (h *ChannelMonitorRequestTemplateHandler) Apply(c *gin.Context) {
|
||||
id, ok := parseTemplateID(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
var req channelMonitorTemplateApplyRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
response.ErrorFrom(c, infraerrors.BadRequest("VALIDATION_ERROR", err.Error()))
|
||||
return
|
||||
}
|
||||
affected, err := h.templateService.ApplyToMonitors(c.Request.Context(), id, req.MonitorIDs)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, gin.H{"affected": affected})
|
||||
}
|
||||
|
||||
type associatedMonitorBriefResponse struct {
|
||||
ID int64 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Provider string `json:"provider"`
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
// AssociatedMonitors GET /api/v1/admin/channel-monitor-templates/:id/monitors
|
||||
// 列出关联监控(picker 弹窗用)。
|
||||
func (h *ChannelMonitorRequestTemplateHandler) AssociatedMonitors(c *gin.Context) {
|
||||
id, ok := parseTemplateID(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
items, err := h.templateService.ListAssociatedMonitors(c.Request.Context(), id)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
out := make([]associatedMonitorBriefResponse, 0, len(items))
|
||||
for _, m := range items {
|
||||
out = append(out, associatedMonitorBriefResponse{
|
||||
ID: m.ID, Name: m.Name, Provider: m.Provider, Enabled: m.Enabled,
|
||||
})
|
||||
}
|
||||
response.Success(c, gin.H{"items": out})
|
||||
}
|
||||
@@ -185,6 +185,10 @@ func (h *SettingHandler) GetSettings(c *gin.Context) {
|
||||
CustomEndpoints: dto.ParseCustomEndpoints(settings.CustomEndpoints),
|
||||
DefaultConcurrency: settings.DefaultConcurrency,
|
||||
DefaultBalance: settings.DefaultBalance,
|
||||
AffiliateRebateRate: settings.AffiliateRebateRate,
|
||||
AffiliateRebateFreezeHours: settings.AffiliateRebateFreezeHours,
|
||||
AffiliateRebateDurationDays: settings.AffiliateRebateDurationDays,
|
||||
AffiliateRebatePerInviteeCap: settings.AffiliateRebatePerInviteeCap,
|
||||
DefaultUserRPMLimit: settings.DefaultUserRPMLimit,
|
||||
DefaultSubscriptions: defaultSubscriptions,
|
||||
EnableModelFallback: settings.EnableModelFallback,
|
||||
@@ -236,6 +240,13 @@ func (h *SettingHandler) GetSettings(c *gin.Context) {
|
||||
PaymentCancelRateLimitWindow: paymentCfg.CancelRateLimitWindow,
|
||||
PaymentCancelRateLimitUnit: paymentCfg.CancelRateLimitUnit,
|
||||
PaymentCancelRateLimitMode: paymentCfg.CancelRateLimitMode,
|
||||
|
||||
ChannelMonitorEnabled: settings.ChannelMonitorEnabled,
|
||||
ChannelMonitorDefaultIntervalSeconds: settings.ChannelMonitorDefaultIntervalSeconds,
|
||||
|
||||
AvailableChannelsEnabled: settings.AvailableChannelsEnabled,
|
||||
|
||||
AffiliateEnabled: settings.AffiliateEnabled,
|
||||
}
|
||||
response.Success(c, systemSettingsResponseData(payload, authSourceDefaults))
|
||||
}
|
||||
@@ -333,6 +344,10 @@ type UpdateSettingsRequest struct {
|
||||
// 默认配置
|
||||
DefaultConcurrency int `json:"default_concurrency"`
|
||||
DefaultBalance float64 `json:"default_balance"`
|
||||
AffiliateRebateRate *float64 `json:"affiliate_rebate_rate"`
|
||||
AffiliateRebateFreezeHours *int `json:"affiliate_rebate_freeze_hours"`
|
||||
AffiliateRebateDurationDays *int `json:"affiliate_rebate_duration_days"`
|
||||
AffiliateRebatePerInviteeCap *float64 `json:"affiliate_rebate_per_invitee_cap"`
|
||||
DefaultUserRPMLimit int `json:"default_user_rpm_limit"`
|
||||
DefaultSubscriptions []dto.DefaultSubscriptionSetting `json:"default_subscriptions"`
|
||||
AuthSourceDefaultEmailBalance *float64 `json:"auth_source_default_email_balance"`
|
||||
@@ -427,6 +442,16 @@ type UpdateSettingsRequest struct {
|
||||
PaymentCancelRateLimitWindow *int `json:"payment_cancel_rate_limit_window"`
|
||||
PaymentCancelRateLimitUnit *string `json:"payment_cancel_rate_limit_unit"`
|
||||
PaymentCancelRateLimitMode *string `json:"payment_cancel_rate_limit_window_mode"`
|
||||
|
||||
// Channel Monitor feature switch
|
||||
ChannelMonitorEnabled *bool `json:"channel_monitor_enabled"`
|
||||
ChannelMonitorDefaultIntervalSeconds *int `json:"channel_monitor_default_interval_seconds"`
|
||||
|
||||
// Available Channels feature switch (user-facing)
|
||||
AvailableChannelsEnabled *bool `json:"available_channels_enabled"`
|
||||
|
||||
// Affiliate (邀请返利) feature switch
|
||||
AffiliateEnabled *bool `json:"affiliate_enabled"`
|
||||
}
|
||||
|
||||
// UpdateSettings 更新系统设置
|
||||
@@ -456,6 +481,43 @@ func (h *SettingHandler) UpdateSettings(c *gin.Context) {
|
||||
if req.DefaultBalance < 0 {
|
||||
req.DefaultBalance = 0
|
||||
}
|
||||
affiliateRebateRate := previousSettings.AffiliateRebateRate
|
||||
if req.AffiliateRebateRate != nil {
|
||||
affiliateRebateRate = *req.AffiliateRebateRate
|
||||
}
|
||||
if affiliateRebateRate < service.AffiliateRebateRateMin {
|
||||
affiliateRebateRate = service.AffiliateRebateRateMin
|
||||
}
|
||||
if affiliateRebateRate > service.AffiliateRebateRateMax {
|
||||
affiliateRebateRate = service.AffiliateRebateRateMax
|
||||
}
|
||||
affiliateRebateFreezeHours := previousSettings.AffiliateRebateFreezeHours
|
||||
if req.AffiliateRebateFreezeHours != nil {
|
||||
affiliateRebateFreezeHours = *req.AffiliateRebateFreezeHours
|
||||
}
|
||||
if affiliateRebateFreezeHours < 0 {
|
||||
affiliateRebateFreezeHours = service.AffiliateRebateFreezeHoursDefault
|
||||
}
|
||||
if affiliateRebateFreezeHours > service.AffiliateRebateFreezeHoursMax {
|
||||
affiliateRebateFreezeHours = service.AffiliateRebateFreezeHoursMax
|
||||
}
|
||||
affiliateRebateDurationDays := previousSettings.AffiliateRebateDurationDays
|
||||
if req.AffiliateRebateDurationDays != nil {
|
||||
affiliateRebateDurationDays = *req.AffiliateRebateDurationDays
|
||||
}
|
||||
if affiliateRebateDurationDays < 0 {
|
||||
affiliateRebateDurationDays = service.AffiliateRebateDurationDaysDefault
|
||||
}
|
||||
if affiliateRebateDurationDays > service.AffiliateRebateDurationDaysMax {
|
||||
affiliateRebateDurationDays = service.AffiliateRebateDurationDaysMax
|
||||
}
|
||||
affiliateRebatePerInviteeCap := previousSettings.AffiliateRebatePerInviteeCap
|
||||
if req.AffiliateRebatePerInviteeCap != nil {
|
||||
affiliateRebatePerInviteeCap = *req.AffiliateRebatePerInviteeCap
|
||||
}
|
||||
if affiliateRebatePerInviteeCap < 0 {
|
||||
affiliateRebatePerInviteeCap = service.AffiliateRebatePerInviteeCapDefault
|
||||
}
|
||||
// 通用表格配置:兼容旧客户端未传字段时保留当前值。
|
||||
if req.TableDefaultPageSize <= 0 {
|
||||
req.TableDefaultPageSize = previousSettings.TableDefaultPageSize
|
||||
@@ -1107,6 +1169,10 @@ func (h *SettingHandler) UpdateSettings(c *gin.Context) {
|
||||
CustomEndpoints: customEndpointsJSON,
|
||||
DefaultConcurrency: req.DefaultConcurrency,
|
||||
DefaultBalance: req.DefaultBalance,
|
||||
AffiliateRebateRate: affiliateRebateRate,
|
||||
AffiliateRebateFreezeHours: affiliateRebateFreezeHours,
|
||||
AffiliateRebateDurationDays: affiliateRebateDurationDays,
|
||||
AffiliateRebatePerInviteeCap: affiliateRebatePerInviteeCap,
|
||||
DefaultUserRPMLimit: req.DefaultUserRPMLimit,
|
||||
DefaultSubscriptions: defaultSubscriptions,
|
||||
EnableModelFallback: req.EnableModelFallback,
|
||||
@@ -1222,6 +1288,30 @@ func (h *SettingHandler) UpdateSettings(c *gin.Context) {
|
||||
}
|
||||
return previousSettings.AccountQuotaNotifyEmails
|
||||
}(),
|
||||
ChannelMonitorEnabled: func() bool {
|
||||
if req.ChannelMonitorEnabled != nil {
|
||||
return *req.ChannelMonitorEnabled
|
||||
}
|
||||
return previousSettings.ChannelMonitorEnabled
|
||||
}(),
|
||||
ChannelMonitorDefaultIntervalSeconds: func() int {
|
||||
if req.ChannelMonitorDefaultIntervalSeconds != nil {
|
||||
return *req.ChannelMonitorDefaultIntervalSeconds
|
||||
}
|
||||
return previousSettings.ChannelMonitorDefaultIntervalSeconds
|
||||
}(),
|
||||
AvailableChannelsEnabled: func() bool {
|
||||
if req.AvailableChannelsEnabled != nil {
|
||||
return *req.AvailableChannelsEnabled
|
||||
}
|
||||
return previousSettings.AvailableChannelsEnabled
|
||||
}(),
|
||||
AffiliateEnabled: func() bool {
|
||||
if req.AffiliateEnabled != nil {
|
||||
return *req.AffiliateEnabled
|
||||
}
|
||||
return previousSettings.AffiliateEnabled
|
||||
}(),
|
||||
}
|
||||
|
||||
authSourceDefaults := &service.AuthSourceDefaultSettings{
|
||||
@@ -1403,6 +1493,10 @@ func (h *SettingHandler) UpdateSettings(c *gin.Context) {
|
||||
CustomEndpoints: dto.ParseCustomEndpoints(updatedSettings.CustomEndpoints),
|
||||
DefaultConcurrency: updatedSettings.DefaultConcurrency,
|
||||
DefaultBalance: updatedSettings.DefaultBalance,
|
||||
AffiliateRebateRate: updatedSettings.AffiliateRebateRate,
|
||||
AffiliateRebateFreezeHours: updatedSettings.AffiliateRebateFreezeHours,
|
||||
AffiliateRebateDurationDays: updatedSettings.AffiliateRebateDurationDays,
|
||||
AffiliateRebatePerInviteeCap: updatedSettings.AffiliateRebatePerInviteeCap,
|
||||
DefaultUserRPMLimit: updatedSettings.DefaultUserRPMLimit,
|
||||
DefaultSubscriptions: updatedDefaultSubscriptions,
|
||||
EnableModelFallback: updatedSettings.EnableModelFallback,
|
||||
@@ -1453,6 +1547,13 @@ func (h *SettingHandler) UpdateSettings(c *gin.Context) {
|
||||
PaymentCancelRateLimitWindow: updatedPaymentCfg.CancelRateLimitWindow,
|
||||
PaymentCancelRateLimitUnit: updatedPaymentCfg.CancelRateLimitUnit,
|
||||
PaymentCancelRateLimitMode: updatedPaymentCfg.CancelRateLimitMode,
|
||||
|
||||
ChannelMonitorEnabled: updatedSettings.ChannelMonitorEnabled,
|
||||
ChannelMonitorDefaultIntervalSeconds: updatedSettings.ChannelMonitorDefaultIntervalSeconds,
|
||||
|
||||
AvailableChannelsEnabled: updatedSettings.AvailableChannelsEnabled,
|
||||
|
||||
AffiliateEnabled: updatedSettings.AffiliateEnabled,
|
||||
}
|
||||
response.Success(c, systemSettingsResponseData(payload, updatedAuthSourceDefaults))
|
||||
}
|
||||
@@ -1703,6 +1804,18 @@ func diffSettings(before *service.SystemSettings, after *service.SystemSettings,
|
||||
if before.DefaultBalance != after.DefaultBalance {
|
||||
changed = append(changed, "default_balance")
|
||||
}
|
||||
if before.AffiliateRebateRate != after.AffiliateRebateRate {
|
||||
changed = append(changed, "affiliate_rebate_rate")
|
||||
}
|
||||
if before.AffiliateRebateFreezeHours != after.AffiliateRebateFreezeHours {
|
||||
changed = append(changed, "affiliate_rebate_freeze_hours")
|
||||
}
|
||||
if before.AffiliateRebateDurationDays != after.AffiliateRebateDurationDays {
|
||||
changed = append(changed, "affiliate_rebate_duration_days")
|
||||
}
|
||||
if before.AffiliateRebatePerInviteeCap != after.AffiliateRebatePerInviteeCap {
|
||||
changed = append(changed, "affiliate_rebate_per_invitee_cap")
|
||||
}
|
||||
if !equalDefaultSubscriptions(before.DefaultSubscriptions, after.DefaultSubscriptions) {
|
||||
changed = append(changed, "default_subscriptions")
|
||||
}
|
||||
@@ -1809,6 +1922,18 @@ func diffSettings(before *service.SystemSettings, after *service.SystemSettings,
|
||||
if !equalNotifyEmailEntries(before.AccountQuotaNotifyEmails, after.AccountQuotaNotifyEmails) {
|
||||
changed = append(changed, "account_quota_notify_emails")
|
||||
}
|
||||
if before.ChannelMonitorEnabled != after.ChannelMonitorEnabled {
|
||||
changed = append(changed, "channel_monitor_enabled")
|
||||
}
|
||||
if before.ChannelMonitorDefaultIntervalSeconds != after.ChannelMonitorDefaultIntervalSeconds {
|
||||
changed = append(changed, "channel_monitor_default_interval_seconds")
|
||||
}
|
||||
if before.AvailableChannelsEnabled != after.AvailableChannelsEnabled {
|
||||
changed = append(changed, "available_channels_enabled")
|
||||
}
|
||||
if before.AffiliateEnabled != after.AffiliateEnabled {
|
||||
changed = append(changed, "affiliate_enabled")
|
||||
}
|
||||
changed = appendAuthSourceDefaultChanges(changed, beforeAuthSourceDefaults, afterAuthSourceDefaults)
|
||||
return changed
|
||||
}
|
||||
|
||||
@@ -48,6 +48,7 @@ type RegisterRequest struct {
|
||||
TurnstileToken string `json:"turnstile_token"`
|
||||
PromoCode string `json:"promo_code"` // 注册优惠码
|
||||
InvitationCode string `json:"invitation_code"` // 邀请码
|
||||
AffCode string `json:"aff_code"` // 邀请返利码
|
||||
}
|
||||
|
||||
// SendVerifyCodeRequest 发送验证码请求
|
||||
@@ -164,7 +165,15 @@ func (h *AuthHandler) Register(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
_, user, err := h.authService.RegisterWithVerification(c.Request.Context(), req.Email, req.Password, req.VerifyCode, req.PromoCode, req.InvitationCode)
|
||||
_, user, err := h.authService.RegisterWithVerification(
|
||||
c.Request.Context(),
|
||||
req.Email,
|
||||
req.Password,
|
||||
req.VerifyCode,
|
||||
req.PromoCode,
|
||||
req.InvitationCode,
|
||||
req.AffCode,
|
||||
)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
|
||||
@@ -435,6 +435,7 @@ func (h *AuthHandler) createLinuxDoOAuthChoicePendingSession(
|
||||
|
||||
type completeLinuxDoOAuthRequest struct {
|
||||
InvitationCode string `json:"invitation_code" binding:"required"`
|
||||
AffCode string `json:"aff_code,omitempty"`
|
||||
AdoptDisplayName *bool `json:"adopt_display_name,omitempty"`
|
||||
AdoptAvatar *bool `json:"adopt_avatar,omitempty"`
|
||||
}
|
||||
@@ -518,7 +519,7 @@ func (h *AuthHandler) CompleteLinuxDoOAuthRegistration(c *gin.Context) {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
tokenPair, user, err := h.authService.LoginOrRegisterOAuthWithTokenPair(c.Request.Context(), email, username, req.InvitationCode)
|
||||
tokenPair, user, err := h.authService.LoginOrRegisterOAuthWithTokenPair(c.Request.Context(), email, username, req.InvitationCode, req.AffCode)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
|
||||
@@ -67,6 +67,7 @@ type createPendingOAuthAccountRequest struct {
|
||||
VerifyCode string `json:"verify_code,omitempty"`
|
||||
Password string `json:"password" binding:"required,min=6"`
|
||||
InvitationCode string `json:"invitation_code,omitempty"`
|
||||
AffCode string `json:"aff_code,omitempty"`
|
||||
AdoptDisplayName *bool `json:"adopt_display_name,omitempty"`
|
||||
AdoptAvatar *bool `json:"adopt_avatar,omitempty"`
|
||||
}
|
||||
@@ -1751,6 +1752,7 @@ func (h *AuthHandler) createPendingOAuthAccount(c *gin.Context, provider string)
|
||||
user,
|
||||
strings.TrimSpace(req.InvitationCode),
|
||||
strings.TrimSpace(session.ProviderType),
|
||||
strings.TrimSpace(req.AffCode),
|
||||
); err != nil {
|
||||
_ = tx.Rollback()
|
||||
if rollbackCreatedUser(err) {
|
||||
|
||||
@@ -2210,6 +2210,7 @@ CREATE TABLE IF NOT EXISTS user_avatars (
|
||||
nil,
|
||||
nil,
|
||||
options.defaultSubAssigner,
|
||||
nil,
|
||||
)
|
||||
userSvc := service.NewUserService(userRepo, nil, nil, nil)
|
||||
var totpSvc *service.TotpService
|
||||
|
||||
@@ -582,6 +582,7 @@ func (h *AuthHandler) createOIDCOAuthChoicePendingSession(
|
||||
|
||||
type completeOIDCOAuthRequest struct {
|
||||
InvitationCode string `json:"invitation_code" binding:"required"`
|
||||
AffCode string `json:"aff_code,omitempty"`
|
||||
AdoptDisplayName *bool `json:"adopt_display_name,omitempty"`
|
||||
AdoptAvatar *bool `json:"adopt_avatar,omitempty"`
|
||||
}
|
||||
@@ -665,7 +666,7 @@ func (h *AuthHandler) CompleteOIDCOAuthRegistration(c *gin.Context) {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
tokenPair, user, err := h.authService.LoginOrRegisterOAuthWithTokenPair(c.Request.Context(), email, username, req.InvitationCode)
|
||||
tokenPair, user, err := h.authService.LoginOrRegisterOAuthWithTokenPair(c.Request.Context(), email, username, req.InvitationCode, req.AffCode)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
|
||||
@@ -35,7 +35,7 @@ func TestAuthHandlerRevokeAllSessionsInvalidatesAccessTokens(t *testing.T) {
|
||||
ExpireHour: 1,
|
||||
},
|
||||
}
|
||||
authService := service.NewAuthService(nil, repo, nil, refreshTokenCache, cfg, nil, nil, nil, nil, nil, nil)
|
||||
authService := service.NewAuthService(nil, repo, nil, refreshTokenCache, cfg, nil, nil, nil, nil, nil, nil, nil)
|
||||
handler := &AuthHandler{authService: authService}
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
|
||||
@@ -481,6 +481,7 @@ func (h *AuthHandler) wechatPaymentResumeService() *service.PaymentResumeService
|
||||
|
||||
type completeWeChatOAuthRequest struct {
|
||||
InvitationCode string `json:"invitation_code" binding:"required"`
|
||||
AffCode string `json:"aff_code,omitempty"`
|
||||
AdoptDisplayName *bool `json:"adopt_display_name,omitempty"`
|
||||
AdoptAvatar *bool `json:"adopt_avatar,omitempty"`
|
||||
}
|
||||
@@ -547,7 +548,7 @@ func (h *AuthHandler) CompleteWeChatOAuthRegistration(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
tokenPair, user, err := h.authService.LoginOrRegisterOAuthWithTokenPair(c.Request.Context(), email, username, req.InvitationCode)
|
||||
tokenPair, user, err := h.authService.LoginOrRegisterOAuthWithTokenPair(c.Request.Context(), email, username, req.InvitationCode, req.AffCode)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
|
||||
@@ -1399,6 +1399,7 @@ func newWeChatOAuthTestHandlerWithSettings(t *testing.T, invitationEnabled bool,
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
return &AuthHandler{
|
||||
|
||||
283
backend/internal/handler/available_channel_handler.go
Normal file
283
backend/internal/handler/available_channel_handler.go
Normal file
@@ -0,0 +1,283 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/response"
|
||||
"github.com/Wei-Shaw/sub2api/internal/server/middleware"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// AvailableChannelHandler 处理用户侧「可用渠道」查询。
|
||||
//
|
||||
// 用户侧接口委托 ChannelService.ListAvailable,并在返回前做三层过滤:
|
||||
// 1. 行过滤:只保留状态为 Active 且与当前用户可访问分组有交集的渠道;
|
||||
// 2. 分组过滤:渠道的 Groups 只保留用户可访问的那些;
|
||||
// 3. 平台过滤:渠道的 SupportedModels 只保留平台在用户可见 Groups 中出现过的模型,
|
||||
// 防止"渠道同时挂在 antigravity / anthropic 两个平台的分组上,用户只访问
|
||||
// antigravity,却看到 anthropic 模型"这类跨平台信息泄漏;
|
||||
// 4. 字段白名单:仅返回用户需要的字段(省略 BillingModelSource / RestrictModels
|
||||
// / 内部 ID / Status 等管理字段)。
|
||||
type AvailableChannelHandler struct {
|
||||
channelService *service.ChannelService
|
||||
apiKeyService *service.APIKeyService
|
||||
settingService *service.SettingService
|
||||
}
|
||||
|
||||
// NewAvailableChannelHandler 创建用户侧可用渠道 handler。
|
||||
func NewAvailableChannelHandler(
|
||||
channelService *service.ChannelService,
|
||||
apiKeyService *service.APIKeyService,
|
||||
settingService *service.SettingService,
|
||||
) *AvailableChannelHandler {
|
||||
return &AvailableChannelHandler{
|
||||
channelService: channelService,
|
||||
apiKeyService: apiKeyService,
|
||||
settingService: settingService,
|
||||
}
|
||||
}
|
||||
|
||||
// featureEnabled 返回 available-channels 开关是否启用。默认关闭(opt-in)。
|
||||
func (h *AvailableChannelHandler) featureEnabled(c *gin.Context) bool {
|
||||
if h.settingService == nil {
|
||||
return false
|
||||
}
|
||||
return h.settingService.GetAvailableChannelsRuntime(c.Request.Context()).Enabled
|
||||
}
|
||||
|
||||
// userAvailableGroup 用户可见的分组概要(白名单字段)。
|
||||
//
|
||||
// 前端据此区分专属 vs 公开分组(IsExclusive)、订阅 vs 标准分组(SubscriptionType,
|
||||
// 订阅视觉加深),并用 RateMultiplier 作为默认倍率;用户专属倍率前端走
|
||||
// /groups/rates,和 API 密钥页面保持一致。
|
||||
type userAvailableGroup struct {
|
||||
ID int64 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Platform string `json:"platform"`
|
||||
SubscriptionType string `json:"subscription_type"`
|
||||
RateMultiplier float64 `json:"rate_multiplier"`
|
||||
IsExclusive bool `json:"is_exclusive"`
|
||||
}
|
||||
|
||||
// userSupportedModelPricing 用户可见的定价字段白名单。
|
||||
type userSupportedModelPricing struct {
|
||||
BillingMode string `json:"billing_mode"`
|
||||
InputPrice *float64 `json:"input_price"`
|
||||
OutputPrice *float64 `json:"output_price"`
|
||||
CacheWritePrice *float64 `json:"cache_write_price"`
|
||||
CacheReadPrice *float64 `json:"cache_read_price"`
|
||||
ImageOutputPrice *float64 `json:"image_output_price"`
|
||||
PerRequestPrice *float64 `json:"per_request_price"`
|
||||
Intervals []userPricingIntervalDTO `json:"intervals"`
|
||||
}
|
||||
|
||||
// userPricingIntervalDTO 定价区间白名单(去掉内部 ID、SortOrder 等前端不渲染的字段)。
|
||||
type userPricingIntervalDTO struct {
|
||||
MinTokens int `json:"min_tokens"`
|
||||
MaxTokens *int `json:"max_tokens"`
|
||||
TierLabel string `json:"tier_label,omitempty"`
|
||||
InputPrice *float64 `json:"input_price"`
|
||||
OutputPrice *float64 `json:"output_price"`
|
||||
CacheWritePrice *float64 `json:"cache_write_price"`
|
||||
CacheReadPrice *float64 `json:"cache_read_price"`
|
||||
PerRequestPrice *float64 `json:"per_request_price"`
|
||||
}
|
||||
|
||||
// userSupportedModel 用户可见的支持模型条目。
|
||||
type userSupportedModel struct {
|
||||
Name string `json:"name"`
|
||||
Platform string `json:"platform"`
|
||||
Pricing *userSupportedModelPricing `json:"pricing"`
|
||||
}
|
||||
|
||||
// userChannelPlatformSection 单渠道内某个平台的子视图:用户可见的分组 + 该平台
|
||||
// 支持的模型。按 platform 聚合后让前端可以把渠道名作为 row-group 一次渲染,
|
||||
// 后面的平台行按 sections 顺序铺开。
|
||||
type userChannelPlatformSection struct {
|
||||
Platform string `json:"platform"`
|
||||
Groups []userAvailableGroup `json:"groups"`
|
||||
SupportedModels []userSupportedModel `json:"supported_models"`
|
||||
}
|
||||
|
||||
// userAvailableChannel 用户可见的渠道条目(白名单字段)。
|
||||
//
|
||||
// 每个渠道聚合为一条记录,内嵌 platforms 子数组:每个 section 对应一个平台,
|
||||
// 包含该平台的 groups 和 supported_models。
|
||||
type userAvailableChannel struct {
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description"`
|
||||
Platforms []userChannelPlatformSection `json:"platforms"`
|
||||
}
|
||||
|
||||
// List 列出当前用户可见的「可用渠道」。
|
||||
// GET /api/v1/channels/available
|
||||
func (h *AvailableChannelHandler) List(c *gin.Context) {
|
||||
subject, ok := middleware.GetAuthSubjectFromContext(c)
|
||||
if !ok {
|
||||
response.Unauthorized(c, "User not authenticated")
|
||||
return
|
||||
}
|
||||
|
||||
// Feature 未启用时返回空数组(不暴露渠道信息)。检查放在认证之后,
|
||||
// 保持与未开关前的 401 行为一致:未登录先 401,登录后再按开关决定。
|
||||
if !h.featureEnabled(c) {
|
||||
response.Success(c, []userAvailableChannel{})
|
||||
return
|
||||
}
|
||||
|
||||
userGroups, err := h.apiKeyService.GetAvailableGroups(c.Request.Context(), subject.UserID)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
allowedGroupIDs := make(map[int64]struct{}, len(userGroups))
|
||||
for i := range userGroups {
|
||||
allowedGroupIDs[userGroups[i].ID] = struct{}{}
|
||||
}
|
||||
|
||||
channels, err := h.channelService.ListAvailable(c.Request.Context())
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
|
||||
out := make([]userAvailableChannel, 0, len(channels))
|
||||
for _, ch := range channels {
|
||||
if ch.Status != service.StatusActive {
|
||||
continue
|
||||
}
|
||||
visibleGroups := filterUserVisibleGroups(ch.Groups, allowedGroupIDs)
|
||||
if len(visibleGroups) == 0 {
|
||||
continue
|
||||
}
|
||||
sections := buildPlatformSections(ch, visibleGroups)
|
||||
if len(sections) == 0 {
|
||||
continue
|
||||
}
|
||||
out = append(out, userAvailableChannel{
|
||||
Name: ch.Name,
|
||||
Description: ch.Description,
|
||||
Platforms: sections,
|
||||
})
|
||||
}
|
||||
|
||||
response.Success(c, out)
|
||||
}
|
||||
|
||||
// buildPlatformSections 把一个渠道按 visibleGroups 的平台集合拆成有序的 section 列表:
|
||||
// 每个 section 对应一个平台,只包含该平台的 groups 和 supported_models。
|
||||
// 输出按 platform 字母序稳定排序,便于前端等效比较与回归测试。
|
||||
func buildPlatformSections(
|
||||
ch service.AvailableChannel,
|
||||
visibleGroups []userAvailableGroup,
|
||||
) []userChannelPlatformSection {
|
||||
groupsByPlatform := make(map[string][]userAvailableGroup, 4)
|
||||
for _, g := range visibleGroups {
|
||||
if g.Platform == "" {
|
||||
continue
|
||||
}
|
||||
groupsByPlatform[g.Platform] = append(groupsByPlatform[g.Platform], g)
|
||||
}
|
||||
if len(groupsByPlatform) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
platforms := make([]string, 0, len(groupsByPlatform))
|
||||
for p := range groupsByPlatform {
|
||||
platforms = append(platforms, p)
|
||||
}
|
||||
sort.Strings(platforms)
|
||||
|
||||
sections := make([]userChannelPlatformSection, 0, len(platforms))
|
||||
for _, platform := range platforms {
|
||||
platformSet := map[string]struct{}{platform: {}}
|
||||
sections = append(sections, userChannelPlatformSection{
|
||||
Platform: platform,
|
||||
Groups: groupsByPlatform[platform],
|
||||
SupportedModels: toUserSupportedModels(ch.SupportedModels, platformSet),
|
||||
})
|
||||
}
|
||||
return sections
|
||||
}
|
||||
|
||||
// filterUserVisibleGroups 仅保留用户可访问的分组。
|
||||
func filterUserVisibleGroups(
|
||||
groups []service.AvailableGroupRef,
|
||||
allowed map[int64]struct{},
|
||||
) []userAvailableGroup {
|
||||
visible := make([]userAvailableGroup, 0, len(groups))
|
||||
for _, g := range groups {
|
||||
if _, ok := allowed[g.ID]; !ok {
|
||||
continue
|
||||
}
|
||||
visible = append(visible, userAvailableGroup{
|
||||
ID: g.ID,
|
||||
Name: g.Name,
|
||||
Platform: g.Platform,
|
||||
SubscriptionType: g.SubscriptionType,
|
||||
RateMultiplier: g.RateMultiplier,
|
||||
IsExclusive: g.IsExclusive,
|
||||
})
|
||||
}
|
||||
return visible
|
||||
}
|
||||
|
||||
// toUserSupportedModels 将 service 层支持模型转换为用户 DTO(字段白名单)。
|
||||
// 仅保留平台在 allowedPlatforms 中的条目,防止跨平台模型信息泄漏。
|
||||
// allowedPlatforms 为 nil 时不做平台过滤(保留全部,供测试或明确无过滤场景使用)。
|
||||
func toUserSupportedModels(
|
||||
src []service.SupportedModel,
|
||||
allowedPlatforms map[string]struct{},
|
||||
) []userSupportedModel {
|
||||
out := make([]userSupportedModel, 0, len(src))
|
||||
for i := range src {
|
||||
m := src[i]
|
||||
if allowedPlatforms != nil {
|
||||
if _, ok := allowedPlatforms[m.Platform]; !ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
out = append(out, userSupportedModel{
|
||||
Name: m.Name,
|
||||
Platform: m.Platform,
|
||||
Pricing: toUserPricing(m.Pricing),
|
||||
})
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// toUserPricing 将 service 层定价转换为用户 DTO;入参为 nil 时返回 nil。
|
||||
func toUserPricing(p *service.ChannelModelPricing) *userSupportedModelPricing {
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
intervals := make([]userPricingIntervalDTO, 0, len(p.Intervals))
|
||||
for _, iv := range p.Intervals {
|
||||
intervals = append(intervals, userPricingIntervalDTO{
|
||||
MinTokens: iv.MinTokens,
|
||||
MaxTokens: iv.MaxTokens,
|
||||
TierLabel: iv.TierLabel,
|
||||
InputPrice: iv.InputPrice,
|
||||
OutputPrice: iv.OutputPrice,
|
||||
CacheWritePrice: iv.CacheWritePrice,
|
||||
CacheReadPrice: iv.CacheReadPrice,
|
||||
PerRequestPrice: iv.PerRequestPrice,
|
||||
})
|
||||
}
|
||||
billingMode := string(p.BillingMode)
|
||||
if billingMode == "" {
|
||||
billingMode = string(service.BillingModeToken)
|
||||
}
|
||||
return &userSupportedModelPricing{
|
||||
BillingMode: billingMode,
|
||||
InputPrice: p.InputPrice,
|
||||
OutputPrice: p.OutputPrice,
|
||||
CacheWritePrice: p.CacheWritePrice,
|
||||
CacheReadPrice: p.CacheReadPrice,
|
||||
ImageOutputPrice: p.ImageOutputPrice,
|
||||
PerRequestPrice: p.PerRequestPrice,
|
||||
Intervals: intervals,
|
||||
}
|
||||
}
|
||||
157
backend/internal/handler/available_channel_handler_test.go
Normal file
157
backend/internal/handler/available_channel_handler_test.go
Normal file
@@ -0,0 +1,157 @@
|
||||
//go:build unit
|
||||
|
||||
package handler
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestUserAvailableChannel_Unauthenticated401(t *testing.T) {
|
||||
// 没有 AuthSubject 注入时,handler 应返回 401 且不触达 service 依赖。
|
||||
gin.SetMode(gin.TestMode)
|
||||
h := &AvailableChannelHandler{} // nil services — 401 路径不会调用它们
|
||||
w := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
c.Request = httptest.NewRequest(http.MethodGet, "/api/v1/channels/available", nil)
|
||||
|
||||
h.List(c)
|
||||
|
||||
require.Equal(t, http.StatusUnauthorized, w.Code)
|
||||
}
|
||||
|
||||
func TestFilterUserVisibleGroups_IntersectionOnly(t *testing.T) {
|
||||
// 渠道挂在 {g1, g2, g3},用户只允许 {g1, g3} —— 响应必须仅含 g1/g3。
|
||||
groups := []service.AvailableGroupRef{
|
||||
{ID: 1, Name: "g1", Platform: "anthropic"},
|
||||
{ID: 2, Name: "g2", Platform: "anthropic"},
|
||||
{ID: 3, Name: "g3", Platform: "openai"},
|
||||
}
|
||||
allowed := map[int64]struct{}{1: {}, 3: {}}
|
||||
|
||||
visible := filterUserVisibleGroups(groups, allowed)
|
||||
require.Len(t, visible, 2)
|
||||
ids := []int64{visible[0].ID, visible[1].ID}
|
||||
require.ElementsMatch(t, []int64{1, 3}, ids)
|
||||
}
|
||||
|
||||
func TestToUserSupportedModels_FiltersByAllowedPlatforms(t *testing.T) {
|
||||
// 用户可访问分组只覆盖 anthropic;anthropic 平台的模型保留,openai 模型被剔除。
|
||||
src := []service.SupportedModel{
|
||||
{Name: "claude-sonnet-4-6", Platform: "anthropic", Pricing: nil},
|
||||
{Name: "gpt-4o", Platform: "openai", Pricing: nil},
|
||||
}
|
||||
allowed := map[string]struct{}{"anthropic": {}}
|
||||
out := toUserSupportedModels(src, allowed)
|
||||
require.Len(t, out, 1)
|
||||
require.Equal(t, "claude-sonnet-4-6", out[0].Name)
|
||||
}
|
||||
|
||||
func TestToUserSupportedModels_NilAllowedPlatformsKeepsAll(t *testing.T) {
|
||||
// 显式传 nil allowedPlatforms 表示不做过滤。
|
||||
src := []service.SupportedModel{
|
||||
{Name: "a", Platform: "anthropic"},
|
||||
{Name: "b", Platform: "openai"},
|
||||
}
|
||||
require.Len(t, toUserSupportedModels(src, nil), 2)
|
||||
}
|
||||
|
||||
func TestUserAvailableChannel_FieldWhitelist(t *testing.T) {
|
||||
// 通过序列化 userAvailableChannel 结构体验证响应形状:
|
||||
// 只有 name / description / platforms;不含管理端字段。
|
||||
row := userAvailableChannel{
|
||||
Name: "ch",
|
||||
Description: "d",
|
||||
Platforms: []userChannelPlatformSection{
|
||||
{
|
||||
Platform: "anthropic",
|
||||
Groups: []userAvailableGroup{{ID: 1, Name: "g1", Platform: "anthropic"}},
|
||||
SupportedModels: []userSupportedModel{},
|
||||
},
|
||||
},
|
||||
}
|
||||
raw, err := json.Marshal(row)
|
||||
require.NoError(t, err)
|
||||
var decoded map[string]any
|
||||
require.NoError(t, json.Unmarshal(raw, &decoded))
|
||||
|
||||
for _, key := range []string{"id", "status", "billing_model_source", "restrict_models"} {
|
||||
_, exists := decoded[key]
|
||||
require.Falsef(t, exists, "user DTO must not expose %q", key)
|
||||
}
|
||||
for _, key := range []string{"name", "description", "platforms"} {
|
||||
_, exists := decoded[key]
|
||||
require.Truef(t, exists, "user DTO must expose %q", key)
|
||||
}
|
||||
|
||||
// 验证 section 的字段(platform / groups / supported_models)。
|
||||
rawSection, err := json.Marshal(row.Platforms[0])
|
||||
require.NoError(t, err)
|
||||
var sectionDecoded map[string]any
|
||||
require.NoError(t, json.Unmarshal(rawSection, §ionDecoded))
|
||||
for _, key := range []string{"platform", "groups", "supported_models"} {
|
||||
_, exists := sectionDecoded[key]
|
||||
require.Truef(t, exists, "platform section must expose %q", key)
|
||||
}
|
||||
|
||||
// Group DTO 暴露区分专属/公开、订阅类型、默认倍率所需的字段,
|
||||
// 前端据此渲染 GroupBadge 并与 API 密钥页保持一致的视觉。
|
||||
rawGroup, err := json.Marshal(row.Platforms[0].Groups[0])
|
||||
require.NoError(t, err)
|
||||
var groupDecoded map[string]any
|
||||
require.NoError(t, json.Unmarshal(rawGroup, &groupDecoded))
|
||||
for _, key := range []string{"id", "name", "platform", "subscription_type", "rate_multiplier", "is_exclusive"} {
|
||||
_, exists := groupDecoded[key]
|
||||
require.Truef(t, exists, "group DTO must expose %q", key)
|
||||
}
|
||||
|
||||
// pricing interval 白名单:不应暴露 id / sort_order。
|
||||
pricing := toUserPricing(&service.ChannelModelPricing{
|
||||
BillingMode: service.BillingModeToken,
|
||||
Intervals: []service.PricingInterval{
|
||||
{ID: 7, MinTokens: 0, MaxTokens: nil, SortOrder: 3},
|
||||
},
|
||||
})
|
||||
require.NotNil(t, pricing)
|
||||
require.Len(t, pricing.Intervals, 1)
|
||||
rawIv, err := json.Marshal(pricing.Intervals[0])
|
||||
require.NoError(t, err)
|
||||
var ivDecoded map[string]any
|
||||
require.NoError(t, json.Unmarshal(rawIv, &ivDecoded))
|
||||
for _, key := range []string{"id", "pricing_id", "sort_order"} {
|
||||
_, exists := ivDecoded[key]
|
||||
require.Falsef(t, exists, "user pricing interval must not expose %q", key)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildPlatformSections_GroupsByPlatform(t *testing.T) {
|
||||
// 一个渠道横跨 anthropic / openai / 空平台:应该生成 2 个 section,
|
||||
// 按 platform 字母序排序,各自 groups 和 supported_models 只含同平台条目。
|
||||
ch := service.AvailableChannel{
|
||||
Name: "ch",
|
||||
SupportedModels: []service.SupportedModel{
|
||||
{Name: "claude-sonnet-4-6", Platform: "anthropic"},
|
||||
{Name: "gpt-4o", Platform: "openai"},
|
||||
},
|
||||
}
|
||||
visible := []userAvailableGroup{
|
||||
{ID: 1, Name: "g-openai", Platform: "openai"},
|
||||
{ID: 2, Name: "g-ant", Platform: "anthropic"},
|
||||
{ID: 3, Name: "g-empty", Platform: ""},
|
||||
}
|
||||
sections := buildPlatformSections(ch, visible)
|
||||
require.Len(t, sections, 2)
|
||||
require.Equal(t, "anthropic", sections[0].Platform)
|
||||
require.Equal(t, "openai", sections[1].Platform)
|
||||
require.Len(t, sections[0].Groups, 1)
|
||||
require.Equal(t, int64(2), sections[0].Groups[0].ID)
|
||||
require.Len(t, sections[0].SupportedModels, 1)
|
||||
require.Equal(t, "claude-sonnet-4-6", sections[0].SupportedModels[0].Name)
|
||||
}
|
||||
176
backend/internal/handler/channel_monitor_user_handler.go
Normal file
176
backend/internal/handler/channel_monitor_user_handler.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/Wei-Shaw/sub2api/internal/handler/admin"
|
||||
"github.com/Wei-Shaw/sub2api/internal/handler/dto"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/response"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// ChannelMonitorUserHandler 渠道监控用户只读 handler。
|
||||
type ChannelMonitorUserHandler struct {
|
||||
monitorService *service.ChannelMonitorService
|
||||
settingService *service.SettingService
|
||||
}
|
||||
|
||||
// NewChannelMonitorUserHandler 创建 handler。
|
||||
// settingService 用于每次请求前读取功能开关;关闭时 List/GetStatus 直接返回空/404。
|
||||
func NewChannelMonitorUserHandler(
|
||||
monitorService *service.ChannelMonitorService,
|
||||
settingService *service.SettingService,
|
||||
) *ChannelMonitorUserHandler {
|
||||
return &ChannelMonitorUserHandler{
|
||||
monitorService: monitorService,
|
||||
settingService: settingService,
|
||||
}
|
||||
}
|
||||
|
||||
// featureEnabled 返回当前渠道监控功能是否开启。
|
||||
// settingService 为 nil(测试场景)视为启用。
|
||||
func (h *ChannelMonitorUserHandler) featureEnabled(c *gin.Context) bool {
|
||||
if h.settingService == nil {
|
||||
return true
|
||||
}
|
||||
return h.settingService.GetChannelMonitorRuntime(c.Request.Context()).Enabled
|
||||
}
|
||||
|
||||
// --- Response ---
|
||||
|
||||
type channelMonitorUserListItem struct {
|
||||
ID int64 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Provider string `json:"provider"`
|
||||
GroupName string `json:"group_name"`
|
||||
PrimaryModel string `json:"primary_model"`
|
||||
PrimaryStatus string `json:"primary_status"`
|
||||
PrimaryLatencyMs *int `json:"primary_latency_ms"`
|
||||
PrimaryPingLatencyMs *int `json:"primary_ping_latency_ms"`
|
||||
Availability7d float64 `json:"availability_7d"`
|
||||
ExtraModels []dto.ChannelMonitorExtraModelStatus `json:"extra_models"`
|
||||
Timeline []channelMonitorUserTimelinePoint `json:"timeline"`
|
||||
}
|
||||
|
||||
// channelMonitorUserTimelinePoint 主模型最近一次检测的 timeline 点。
|
||||
// 仅用于用户视图 list 响应,admin 视图不使用。
|
||||
type channelMonitorUserTimelinePoint struct {
|
||||
Status string `json:"status"`
|
||||
LatencyMs *int `json:"latency_ms"`
|
||||
PingLatencyMs *int `json:"ping_latency_ms"`
|
||||
CheckedAt string `json:"checked_at"`
|
||||
}
|
||||
|
||||
type channelMonitorUserDetailResponse struct {
|
||||
ID int64 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Provider string `json:"provider"`
|
||||
GroupName string `json:"group_name"`
|
||||
Models []channelMonitorUserModelStat `json:"models"`
|
||||
}
|
||||
|
||||
type channelMonitorUserModelStat struct {
|
||||
Model string `json:"model"`
|
||||
LatestStatus string `json:"latest_status"`
|
||||
LatestLatencyMs *int `json:"latest_latency_ms"`
|
||||
Availability7d float64 `json:"availability_7d"`
|
||||
Availability15d float64 `json:"availability_15d"`
|
||||
Availability30d float64 `json:"availability_30d"`
|
||||
AvgLatency7dMs *int `json:"avg_latency_7d_ms"`
|
||||
}
|
||||
|
||||
func userMonitorViewToItem(v *service.UserMonitorView) channelMonitorUserListItem {
|
||||
extras := make([]dto.ChannelMonitorExtraModelStatus, 0, len(v.ExtraModels))
|
||||
for _, e := range v.ExtraModels {
|
||||
extras = append(extras, dto.ChannelMonitorExtraModelStatus{
|
||||
Model: e.Model,
|
||||
Status: e.Status,
|
||||
LatencyMs: e.LatencyMs,
|
||||
})
|
||||
}
|
||||
timeline := make([]channelMonitorUserTimelinePoint, 0, len(v.Timeline))
|
||||
for _, p := range v.Timeline {
|
||||
timeline = append(timeline, channelMonitorUserTimelinePoint{
|
||||
Status: p.Status,
|
||||
LatencyMs: p.LatencyMs,
|
||||
PingLatencyMs: p.PingLatencyMs,
|
||||
CheckedAt: p.CheckedAt.UTC().Format(time.RFC3339),
|
||||
})
|
||||
}
|
||||
return channelMonitorUserListItem{
|
||||
ID: v.ID,
|
||||
Name: v.Name,
|
||||
Provider: v.Provider,
|
||||
GroupName: v.GroupName,
|
||||
PrimaryModel: v.PrimaryModel,
|
||||
PrimaryStatus: v.PrimaryStatus,
|
||||
PrimaryLatencyMs: v.PrimaryLatencyMs,
|
||||
PrimaryPingLatencyMs: v.PrimaryPingLatencyMs,
|
||||
Availability7d: v.Availability7d,
|
||||
ExtraModels: extras,
|
||||
Timeline: timeline,
|
||||
}
|
||||
}
|
||||
|
||||
func userMonitorDetailToResponse(d *service.UserMonitorDetail) *channelMonitorUserDetailResponse {
|
||||
models := make([]channelMonitorUserModelStat, 0, len(d.Models))
|
||||
for _, m := range d.Models {
|
||||
models = append(models, channelMonitorUserModelStat{
|
||||
Model: m.Model,
|
||||
LatestStatus: m.LatestStatus,
|
||||
LatestLatencyMs: m.LatestLatencyMs,
|
||||
Availability7d: m.Availability7d,
|
||||
Availability15d: m.Availability15d,
|
||||
Availability30d: m.Availability30d,
|
||||
AvgLatency7dMs: m.AvgLatency7dMs,
|
||||
})
|
||||
}
|
||||
return &channelMonitorUserDetailResponse{
|
||||
ID: d.ID,
|
||||
Name: d.Name,
|
||||
Provider: d.Provider,
|
||||
GroupName: d.GroupName,
|
||||
Models: models,
|
||||
}
|
||||
}
|
||||
|
||||
// --- Handlers ---
|
||||
|
||||
// List GET /api/v1/channel-monitors
|
||||
func (h *ChannelMonitorUserHandler) List(c *gin.Context) {
|
||||
if !h.featureEnabled(c) {
|
||||
response.Success(c, gin.H{"items": []channelMonitorUserListItem{}})
|
||||
return
|
||||
}
|
||||
views, err := h.monitorService.ListUserView(c.Request.Context())
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
items := make([]channelMonitorUserListItem, 0, len(views))
|
||||
for _, v := range views {
|
||||
items = append(items, userMonitorViewToItem(v))
|
||||
}
|
||||
response.Success(c, gin.H{"items": items})
|
||||
}
|
||||
|
||||
// GetStatus GET /api/v1/channel-monitors/:id/status
|
||||
func (h *ChannelMonitorUserHandler) GetStatus(c *gin.Context) {
|
||||
if !h.featureEnabled(c) {
|
||||
response.ErrorFrom(c, service.ErrChannelMonitorNotFound)
|
||||
return
|
||||
}
|
||||
// 复用 admin.ParseChannelMonitorID 保持错误码与日志一致。
|
||||
id, ok := admin.ParseChannelMonitorID(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
detail, err := h.monitorService.GetUserDetail(c.Request.Context(), id)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, userMonitorDetailToResponse(detail))
|
||||
}
|
||||
10
backend/internal/handler/dto/channel_monitor.go
Normal file
10
backend/internal/handler/dto/channel_monitor.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package dto
|
||||
|
||||
// ChannelMonitorExtraModelStatus 渠道监控附加模型最近一次状态。
|
||||
// 同时被 admin handler(List 响应)与 user handler(List 响应)复用,
|
||||
// 字段必须保持一致以保证前端拿到统一结构。
|
||||
type ChannelMonitorExtraModelStatus struct {
|
||||
Model string `json:"model"`
|
||||
Status string `json:"status"`
|
||||
LatencyMs *int `json:"latency_ms"`
|
||||
}
|
||||
@@ -0,0 +1,70 @@
|
||||
package dto
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
)
|
||||
|
||||
// TestPublicSettingsInjectionPayload_SchemaDoesNotDrift guarantees the SSR
|
||||
// injection struct exposes every JSON field consumed by the frontend.
|
||||
//
|
||||
// Why this test exists: before we extracted a named PublicSettingsInjectionPayload
|
||||
// type, the inline struct was manually kept in sync with dto.PublicSettings and
|
||||
// drifted — ChannelMonitorEnabled / AvailableChannelsEnabled were missing, which
|
||||
// made the frontend read `undefined` on refresh and hide the "可用渠道" menu
|
||||
// until the async /api/v1/settings/public round-trip finished.
|
||||
//
|
||||
// This test compares the two JSON-tag sets and fails if injection is missing
|
||||
// any field that dto.PublicSettings exposes. Adding a new feature flag with
|
||||
// only a DTO entry will fail this test until the injection struct is updated.
|
||||
//
|
||||
// Intentional exclusions (fields present on dto.PublicSettings that SSR does
|
||||
// not need to inject) are listed in `dtoOnlyFields` below with a reason.
|
||||
func TestPublicSettingsInjectionPayload_SchemaDoesNotDrift(t *testing.T) {
|
||||
injection := jsonTags(reflect.TypeOf(service.PublicSettingsInjectionPayload{}))
|
||||
dtoKeys := jsonTags(reflect.TypeOf(PublicSettings{}))
|
||||
|
||||
// Fields that legitimately live only on the DTO. Keep tiny; document each.
|
||||
dtoOnlyFields := map[string]string{
|
||||
// sora_client_enabled is an upstream-only field the fork does not surface.
|
||||
"sora_client_enabled": "upstream-only field, not used on this fork",
|
||||
// force_email_on_third_party_signup lives on the DTO but is not injected via SSR.
|
||||
"force_email_on_third_party_signup": "auth-source default, not a feature flag",
|
||||
}
|
||||
|
||||
var missing []string
|
||||
for key := range dtoKeys {
|
||||
if _, ok := injection[key]; ok {
|
||||
continue
|
||||
}
|
||||
if _, allowed := dtoOnlyFields[key]; allowed {
|
||||
continue
|
||||
}
|
||||
missing = append(missing, key)
|
||||
}
|
||||
if len(missing) > 0 {
|
||||
t.Fatalf("service.PublicSettingsInjectionPayload is missing JSON fields present on dto.PublicSettings: %s\n"+
|
||||
"add the field to PublicSettingsInjectionPayload (and GetPublicSettingsForInjection), or "+
|
||||
"document the exclusion in dtoOnlyFields with a reason.", strings.Join(missing, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
func jsonTags(t reflect.Type) map[string]struct{} {
|
||||
out := make(map[string]struct{})
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
tag := f.Tag.Get("json")
|
||||
if tag == "" || tag == "-" {
|
||||
continue
|
||||
}
|
||||
name := strings.SplitN(tag, ",", 2)[0]
|
||||
if name == "" {
|
||||
continue
|
||||
}
|
||||
out[name] = struct{}{}
|
||||
}
|
||||
return out
|
||||
}
|
||||
@@ -106,10 +106,14 @@ type SystemSettings struct {
|
||||
CustomMenuItems []CustomMenuItem `json:"custom_menu_items"`
|
||||
CustomEndpoints []CustomEndpoint `json:"custom_endpoints"`
|
||||
|
||||
DefaultConcurrency int `json:"default_concurrency"`
|
||||
DefaultBalance float64 `json:"default_balance"`
|
||||
DefaultUserRPMLimit int `json:"default_user_rpm_limit"`
|
||||
DefaultSubscriptions []DefaultSubscriptionSetting `json:"default_subscriptions"`
|
||||
DefaultConcurrency int `json:"default_concurrency"`
|
||||
DefaultBalance float64 `json:"default_balance"`
|
||||
AffiliateRebateRate float64 `json:"affiliate_rebate_rate"`
|
||||
AffiliateRebateFreezeHours int `json:"affiliate_rebate_freeze_hours"`
|
||||
AffiliateRebateDurationDays int `json:"affiliate_rebate_duration_days"`
|
||||
AffiliateRebatePerInviteeCap float64 `json:"affiliate_rebate_per_invitee_cap"`
|
||||
DefaultUserRPMLimit int `json:"default_user_rpm_limit"`
|
||||
DefaultSubscriptions []DefaultSubscriptionSetting `json:"default_subscriptions"`
|
||||
|
||||
// Model fallback configuration
|
||||
EnableModelFallback bool `json:"enable_model_fallback"`
|
||||
@@ -184,6 +188,16 @@ type SystemSettings struct {
|
||||
BalanceLowNotifyRechargeURL string `json:"balance_low_notify_recharge_url"`
|
||||
AccountQuotaNotifyEnabled bool `json:"account_quota_notify_enabled"`
|
||||
AccountQuotaNotifyEmails []NotifyEmailEntry `json:"account_quota_notify_emails"`
|
||||
|
||||
// Channel Monitor feature switch
|
||||
ChannelMonitorEnabled bool `json:"channel_monitor_enabled"`
|
||||
ChannelMonitorDefaultIntervalSeconds int `json:"channel_monitor_default_interval_seconds"`
|
||||
|
||||
// Available Channels feature switch (user-facing aggregate view)
|
||||
AvailableChannelsEnabled bool `json:"available_channels_enabled"`
|
||||
|
||||
// Affiliate (邀请返利) feature switch
|
||||
AffiliateEnabled bool `json:"affiliate_enabled"`
|
||||
}
|
||||
|
||||
type DefaultSubscriptionSetting struct {
|
||||
@@ -231,6 +245,13 @@ type PublicSettings struct {
|
||||
AccountQuotaNotifyEnabled bool `json:"account_quota_notify_enabled"`
|
||||
BalanceLowNotifyThreshold float64 `json:"balance_low_notify_threshold"`
|
||||
BalanceLowNotifyRechargeURL string `json:"balance_low_notify_recharge_url"`
|
||||
|
||||
ChannelMonitorEnabled bool `json:"channel_monitor_enabled"`
|
||||
ChannelMonitorDefaultIntervalSeconds int `json:"channel_monitor_default_interval_seconds"`
|
||||
|
||||
AvailableChannelsEnabled bool `json:"available_channels_enabled"`
|
||||
|
||||
AffiliateEnabled bool `json:"affiliate_enabled"`
|
||||
}
|
||||
|
||||
// OverloadCooldownSettings 529过载冷却配置 DTO
|
||||
|
||||
@@ -304,6 +304,12 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
||||
selection, err := h.gatewayService.SelectAccountWithLoadAwareness(c.Request.Context(), apiKey.GroupID, sessionKey, reqModel, fs.FailedAccountIDs, "", int64(0)) // Gemini 不使用会话限制
|
||||
if err != nil {
|
||||
if len(fs.FailedAccountIDs) == 0 {
|
||||
reqLog.Warn("gateway.select_account_no_available",
|
||||
zap.String("model", reqModel),
|
||||
zap.Int64p("group_id", apiKey.GroupID),
|
||||
zap.String("platform", platform),
|
||||
zap.Error(err),
|
||||
)
|
||||
h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts: "+err.Error(), streamStarted)
|
||||
return
|
||||
}
|
||||
@@ -347,6 +353,11 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
||||
accountReleaseFunc := selection.ReleaseFunc
|
||||
if !selection.Acquired {
|
||||
if selection.WaitPlan == nil {
|
||||
reqLog.Warn("gateway.select_account_no_slot_no_wait_plan",
|
||||
zap.Int64("account_id", account.ID),
|
||||
zap.String("model", reqModel),
|
||||
zap.String("platform", platform),
|
||||
)
|
||||
h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts", streamStarted)
|
||||
return
|
||||
}
|
||||
@@ -528,6 +539,13 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
||||
selection, err := h.gatewayService.SelectAccountWithLoadAwareness(c.Request.Context(), currentAPIKey.GroupID, sessionKey, reqModel, fs.FailedAccountIDs, parsedReq.MetadataUserID, subject.UserID)
|
||||
if err != nil {
|
||||
if len(fs.FailedAccountIDs) == 0 {
|
||||
reqLog.Warn("gateway.select_account_no_available",
|
||||
zap.String("model", reqModel),
|
||||
zap.Int64p("group_id", currentAPIKey.GroupID),
|
||||
zap.String("platform", platform),
|
||||
zap.Bool("fallback_used", fallbackUsed),
|
||||
zap.Error(err),
|
||||
)
|
||||
h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts: "+err.Error(), streamStarted)
|
||||
return
|
||||
}
|
||||
@@ -571,6 +589,11 @@ func (h *GatewayHandler) Messages(c *gin.Context) {
|
||||
accountReleaseFunc := selection.ReleaseFunc
|
||||
if !selection.Acquired {
|
||||
if selection.WaitPlan == nil {
|
||||
reqLog.Warn("gateway.select_account_no_slot_no_wait_plan",
|
||||
zap.Int64("account_id", account.ID),
|
||||
zap.String("model", reqModel),
|
||||
zap.String("platform", platform),
|
||||
)
|
||||
h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts", streamStarted)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -6,50 +6,55 @@ import (
|
||||
|
||||
// AdminHandlers contains all admin-related HTTP handlers
|
||||
type AdminHandlers struct {
|
||||
Dashboard *admin.DashboardHandler
|
||||
User *admin.UserHandler
|
||||
Group *admin.GroupHandler
|
||||
Account *admin.AccountHandler
|
||||
Announcement *admin.AnnouncementHandler
|
||||
DataManagement *admin.DataManagementHandler
|
||||
Backup *admin.BackupHandler
|
||||
OAuth *admin.OAuthHandler
|
||||
OpenAIOAuth *admin.OpenAIOAuthHandler
|
||||
GeminiOAuth *admin.GeminiOAuthHandler
|
||||
AntigravityOAuth *admin.AntigravityOAuthHandler
|
||||
Proxy *admin.ProxyHandler
|
||||
Redeem *admin.RedeemHandler
|
||||
Promo *admin.PromoHandler
|
||||
Setting *admin.SettingHandler
|
||||
Ops *admin.OpsHandler
|
||||
System *admin.SystemHandler
|
||||
Subscription *admin.SubscriptionHandler
|
||||
Usage *admin.UsageHandler
|
||||
UserAttribute *admin.UserAttributeHandler
|
||||
ErrorPassthrough *admin.ErrorPassthroughHandler
|
||||
TLSFingerprintProfile *admin.TLSFingerprintProfileHandler
|
||||
APIKey *admin.AdminAPIKeyHandler
|
||||
ScheduledTest *admin.ScheduledTestHandler
|
||||
Channel *admin.ChannelHandler
|
||||
Payment *admin.PaymentHandler
|
||||
Dashboard *admin.DashboardHandler
|
||||
User *admin.UserHandler
|
||||
Group *admin.GroupHandler
|
||||
Account *admin.AccountHandler
|
||||
Announcement *admin.AnnouncementHandler
|
||||
DataManagement *admin.DataManagementHandler
|
||||
Backup *admin.BackupHandler
|
||||
OAuth *admin.OAuthHandler
|
||||
OpenAIOAuth *admin.OpenAIOAuthHandler
|
||||
GeminiOAuth *admin.GeminiOAuthHandler
|
||||
AntigravityOAuth *admin.AntigravityOAuthHandler
|
||||
Proxy *admin.ProxyHandler
|
||||
Redeem *admin.RedeemHandler
|
||||
Promo *admin.PromoHandler
|
||||
Setting *admin.SettingHandler
|
||||
Ops *admin.OpsHandler
|
||||
System *admin.SystemHandler
|
||||
Subscription *admin.SubscriptionHandler
|
||||
Usage *admin.UsageHandler
|
||||
UserAttribute *admin.UserAttributeHandler
|
||||
ErrorPassthrough *admin.ErrorPassthroughHandler
|
||||
TLSFingerprintProfile *admin.TLSFingerprintProfileHandler
|
||||
APIKey *admin.AdminAPIKeyHandler
|
||||
ScheduledTest *admin.ScheduledTestHandler
|
||||
Channel *admin.ChannelHandler
|
||||
ChannelMonitor *admin.ChannelMonitorHandler
|
||||
ChannelMonitorTemplate *admin.ChannelMonitorRequestTemplateHandler
|
||||
Payment *admin.PaymentHandler
|
||||
Affiliate *admin.AffiliateHandler
|
||||
}
|
||||
|
||||
// Handlers contains all HTTP handlers
|
||||
type Handlers struct {
|
||||
Auth *AuthHandler
|
||||
User *UserHandler
|
||||
APIKey *APIKeyHandler
|
||||
Usage *UsageHandler
|
||||
Redeem *RedeemHandler
|
||||
Subscription *SubscriptionHandler
|
||||
Announcement *AnnouncementHandler
|
||||
Admin *AdminHandlers
|
||||
Gateway *GatewayHandler
|
||||
OpenAIGateway *OpenAIGatewayHandler
|
||||
Setting *SettingHandler
|
||||
Totp *TotpHandler
|
||||
Payment *PaymentHandler
|
||||
PaymentWebhook *PaymentWebhookHandler
|
||||
Auth *AuthHandler
|
||||
User *UserHandler
|
||||
APIKey *APIKeyHandler
|
||||
Usage *UsageHandler
|
||||
Redeem *RedeemHandler
|
||||
Subscription *SubscriptionHandler
|
||||
Announcement *AnnouncementHandler
|
||||
ChannelMonitor *ChannelMonitorUserHandler
|
||||
Admin *AdminHandlers
|
||||
Gateway *GatewayHandler
|
||||
OpenAIGateway *OpenAIGatewayHandler
|
||||
Setting *SettingHandler
|
||||
Totp *TotpHandler
|
||||
Payment *PaymentHandler
|
||||
PaymentWebhook *PaymentWebhookHandler
|
||||
AvailableChannel *AvailableChannelHandler
|
||||
}
|
||||
|
||||
// BuildInfo contains build-time information
|
||||
|
||||
@@ -130,6 +130,7 @@ func (h *OpenAIGatewayHandler) ChatCompletions(c *gin.Context) {
|
||||
reqModel,
|
||||
failedAccountIDs,
|
||||
service.OpenAIUpstreamTransportAny,
|
||||
false,
|
||||
)
|
||||
if err != nil {
|
||||
reqLog.Warn("openai_chat_completions.account_select_failed",
|
||||
@@ -153,6 +154,7 @@ func (h *OpenAIGatewayHandler) ChatCompletions(c *gin.Context) {
|
||||
defaultModel,
|
||||
failedAccountIDs,
|
||||
service.OpenAIUpstreamTransportAny,
|
||||
false,
|
||||
)
|
||||
if err == nil && selection != nil {
|
||||
c.Set("openai_chat_completions_fallback_model", defaultModel)
|
||||
|
||||
@@ -116,7 +116,7 @@ func TestLogOpenAIRemoteCompactOutcome_Succeeded(t *testing.T) {
|
||||
rec := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(rec)
|
||||
c.Request = httptest.NewRequest(http.MethodPost, "/v1/responses/compact", nil)
|
||||
c.Request.Header.Set("User-Agent", "codex_cli_rs/0.104.0")
|
||||
c.Request.Header.Set("User-Agent", "codex_cli_rs/0.125.0")
|
||||
c.Set(opsModelKey, "gpt-5.3-codex")
|
||||
c.Set(opsAccountIDKey, int64(123))
|
||||
c.Header("x-request-id", "rid-compact-ok")
|
||||
@@ -142,7 +142,7 @@ func TestLogOpenAIRemoteCompactOutcome_Failed(t *testing.T) {
|
||||
rec := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(rec)
|
||||
c.Request = httptest.NewRequest(http.MethodPost, "/responses/compact", nil)
|
||||
c.Request.Header.Set("User-Agent", "codex_cli_rs/0.104.0")
|
||||
c.Request.Header.Set("User-Agent", "codex_cli_rs/0.125.0")
|
||||
c.Status(http.StatusBadGateway)
|
||||
|
||||
h := &OpenAIGatewayHandler{}
|
||||
@@ -180,7 +180,7 @@ func TestOpenAIResponses_CompactUnauthorizedLogsFailed(t *testing.T) {
|
||||
c, _ := gin.CreateTestContext(rec)
|
||||
c.Request = httptest.NewRequest(http.MethodPost, "/v1/responses/compact", strings.NewReader(`{"model":"gpt-5.3-codex"}`))
|
||||
c.Request.Header.Set("Content-Type", "application/json")
|
||||
c.Request.Header.Set("User-Agent", "codex_cli_rs/0.104.0")
|
||||
c.Request.Header.Set("User-Agent", "codex_cli_rs/0.125.0")
|
||||
|
||||
h := &OpenAIGatewayHandler{}
|
||||
h.Responses(c)
|
||||
|
||||
@@ -238,6 +238,7 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) {
|
||||
|
||||
// Generate session hash (header first; fallback to prompt_cache_key)
|
||||
sessionHash := h.gatewayService.GenerateSessionHash(c, sessionHashBody)
|
||||
requireCompact := isOpenAIRemoteCompactPath(c)
|
||||
|
||||
maxAccountSwitches := h.maxAccountSwitches
|
||||
switchCount := 0
|
||||
@@ -256,6 +257,7 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) {
|
||||
reqModel,
|
||||
failedAccountIDs,
|
||||
service.OpenAIUpstreamTransportAny,
|
||||
requireCompact,
|
||||
)
|
||||
if err != nil {
|
||||
reqLog.Warn("openai.account_select_failed",
|
||||
@@ -263,6 +265,10 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) {
|
||||
zap.Int("excluded_account_count", len(failedAccountIDs)),
|
||||
)
|
||||
if len(failedAccountIDs) == 0 {
|
||||
if errors.Is(err, service.ErrNoAvailableCompactAccounts) {
|
||||
h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "compact_not_supported", "No available OpenAI accounts support /responses/compact", streamStarted)
|
||||
return
|
||||
}
|
||||
h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "Service temporarily unavailable", streamStarted)
|
||||
return
|
||||
}
|
||||
@@ -644,6 +650,7 @@ func (h *OpenAIGatewayHandler) Messages(c *gin.Context) {
|
||||
currentRoutingModel,
|
||||
failedAccountIDs,
|
||||
service.OpenAIUpstreamTransportAny,
|
||||
false,
|
||||
)
|
||||
if err != nil {
|
||||
reqLog.Warn("openai_messages.account_select_failed",
|
||||
@@ -1167,6 +1174,7 @@ func (h *OpenAIGatewayHandler) ResponsesWebSocket(c *gin.Context) {
|
||||
reqModel,
|
||||
nil,
|
||||
service.OpenAIUpstreamTransportResponsesWebsocketV2,
|
||||
false,
|
||||
)
|
||||
if err != nil {
|
||||
reqLog.Warn("openai.websocket_account_select_failed", zap.Error(err))
|
||||
|
||||
@@ -117,7 +117,7 @@ func TestVerifyOrderPublicReturnsLegacyOrderState(t *testing.T) {
|
||||
Save(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
paymentSvc := service.NewPaymentService(client, payment.NewRegistry(), nil, nil, nil, nil, nil, nil)
|
||||
paymentSvc := service.NewPaymentService(client, payment.NewRegistry(), nil, nil, nil, nil, nil, nil, nil)
|
||||
h := NewPaymentHandler(paymentSvc, nil, nil)
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
@@ -215,7 +215,7 @@ func TestResolveOrderPublicByResumeTokenReturnsFrontendContractFields(t *testing
|
||||
require.NoError(t, err)
|
||||
|
||||
configSvc := service.NewPaymentConfigService(client, nil, []byte("0123456789abcdef0123456789abcdef"))
|
||||
paymentSvc := service.NewPaymentService(client, payment.NewRegistry(), nil, nil, nil, configSvc, nil, nil)
|
||||
paymentSvc := service.NewPaymentService(client, payment.NewRegistry(), nil, nil, nil, configSvc, nil, nil, nil)
|
||||
h := NewPaymentHandler(paymentSvc, nil, nil)
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
@@ -302,7 +302,7 @@ func TestResolveOrderPublicByResumeTokenReturnsBadRequestForMismatchedToken(t *t
|
||||
require.NoError(t, err)
|
||||
|
||||
configSvc := service.NewPaymentConfigService(client, nil, []byte("0123456789abcdef0123456789abcdef"))
|
||||
paymentSvc := service.NewPaymentService(client, payment.NewRegistry(), nil, nil, nil, configSvc, nil, nil)
|
||||
paymentSvc := service.NewPaymentService(client, payment.NewRegistry(), nil, nil, nil, configSvc, nil, nil, nil)
|
||||
h := NewPaymentHandler(paymentSvc, nil, nil)
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
@@ -342,7 +342,7 @@ func TestVerifyOrderPublicRejectsBlankOutTradeNo(t *testing.T) {
|
||||
client := enttest.NewClient(t, enttest.WithOptions(dbent.Driver(drv)))
|
||||
t.Cleanup(func() { _ = client.Close() })
|
||||
|
||||
paymentSvc := service.NewPaymentService(client, payment.NewRegistry(), nil, nil, nil, nil, nil, nil)
|
||||
paymentSvc := service.NewPaymentService(client, payment.NewRegistry(), nil, nil, nil, nil, nil, nil, nil)
|
||||
h := NewPaymentHandler(paymentSvc, nil, nil)
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
|
||||
@@ -2,6 +2,7 @@ package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
@@ -114,6 +115,20 @@ func (h *PaymentWebhookHandler) handleNotify(c *gin.Context, providerKey string)
|
||||
}
|
||||
|
||||
if err := h.paymentService.HandlePaymentNotification(c.Request.Context(), notification, resolvedProviderKey); err != nil {
|
||||
// Unknown order: ack with 2xx so the provider stops retrying. This
|
||||
// guards against foreign environments whose webhook endpoints are
|
||||
// (mis)configured to point at us — without a 2xx, the provider will
|
||||
// retry for days and spam our error logs. We still emit a WARN so the
|
||||
// event is discoverable in logs.
|
||||
if errors.Is(err, service.ErrOrderNotFound) {
|
||||
slog.Warn("[Payment Webhook] unknown order, acking to stop retries",
|
||||
"provider", resolvedProviderKey,
|
||||
"outTradeNo", notification.OrderID,
|
||||
"tradeNo", notification.TradeNo,
|
||||
)
|
||||
writeSuccessResponse(c, resolvedProviderKey)
|
||||
return
|
||||
}
|
||||
slog.Error("[Payment Webhook] handle notification failed", "provider", resolvedProviderKey, "error", err)
|
||||
c.String(http.StatusInternalServerError, "handle failed")
|
||||
return
|
||||
|
||||
@@ -6,11 +6,13 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/Wei-Shaw/sub2api/internal/payment"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -91,6 +93,43 @@ func TestWriteSuccessResponse(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestUnknownOrderWebhookAcksWithSuccess exercises the response contract that
|
||||
// handleNotify relies on when HandlePaymentNotification returns ErrOrderNotFound:
|
||||
// we still need to emit the provider-specific 2xx so the provider stops
|
||||
// retrying. We can't easily drive handleNotify end-to-end without mocking the
|
||||
// concrete *service.PaymentService, so this test locks down the two ingredients
|
||||
// the fix depends on:
|
||||
// 1. errors.Is recognises the sentinel through fmt.Errorf %w wrapping (which
|
||||
// is how service layer wraps it with the out_trade_no context).
|
||||
// 2. writeSuccessResponse produces the provider-specific body for Stripe
|
||||
// (empty 200) — matching what handleNotify calls on the ack path.
|
||||
//
|
||||
// If either contract breaks, the Stripe "unknown order → 500 loop" regresses.
|
||||
func TestUnknownOrderWebhookAcksWithSuccess(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
// 1) Sentinel recognition through wrapping.
|
||||
wrapped := fmt.Errorf("%w: out_trade_no=sub2_missing_42", service.ErrOrderNotFound)
|
||||
require.True(t, errors.Is(wrapped, service.ErrOrderNotFound),
|
||||
"handleNotify uses errors.Is on the wrapped service error; regression here "+
|
||||
"would mean unknown-order webhooks go back to returning 500 and looping forever")
|
||||
|
||||
// A distinct error must NOT match — otherwise a DB failure would be silently
|
||||
// swallowed as an ack.
|
||||
other := errors.New("lookup order failed: connection refused")
|
||||
require.False(t, errors.Is(other, service.ErrOrderNotFound))
|
||||
|
||||
// 2) Provider-specific success body is what handleNotify emits on the
|
||||
// ack path. Asserted again here because this is the shape Stripe expects
|
||||
// to consider the webhook acknowledged.
|
||||
w := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
writeSuccessResponse(c, payment.TypeStripe)
|
||||
require.Equal(t, http.StatusOK, w.Code,
|
||||
"Stripe requires 2xx to stop retrying; anything else restarts the retry loop")
|
||||
require.Empty(t, w.Body.String(), "Stripe expects an empty body on the ack path")
|
||||
}
|
||||
|
||||
func TestWebhookConstants(t *testing.T) {
|
||||
t.Run("maxWebhookBodySize is 1MB", func(t *testing.T) {
|
||||
assert.Equal(t, int64(1<<20), int64(maxWebhookBodySize))
|
||||
|
||||
@@ -70,5 +70,12 @@ func (h *SettingHandler) GetPublicSettings(c *gin.Context) {
|
||||
AccountQuotaNotifyEnabled: settings.AccountQuotaNotifyEnabled,
|
||||
BalanceLowNotifyThreshold: settings.BalanceLowNotifyThreshold,
|
||||
BalanceLowNotifyRechargeURL: settings.BalanceLowNotifyRechargeURL,
|
||||
|
||||
ChannelMonitorEnabled: settings.ChannelMonitorEnabled,
|
||||
ChannelMonitorDefaultIntervalSeconds: settings.ChannelMonitorDefaultIntervalSeconds,
|
||||
|
||||
AvailableChannelsEnabled: settings.AvailableChannelsEnabled,
|
||||
|
||||
AffiliateEnabled: settings.AffiliateEnabled,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -14,10 +14,11 @@ import (
|
||||
|
||||
// UserHandler handles user-related requests
|
||||
type UserHandler struct {
|
||||
userService *service.UserService
|
||||
authService *service.AuthService
|
||||
emailService *service.EmailService
|
||||
emailCache service.EmailCache
|
||||
userService *service.UserService
|
||||
authService *service.AuthService
|
||||
emailService *service.EmailService
|
||||
emailCache service.EmailCache
|
||||
affiliateService *service.AffiliateService
|
||||
}
|
||||
|
||||
// NewUserHandler creates a new UserHandler
|
||||
@@ -26,12 +27,14 @@ func NewUserHandler(
|
||||
authService *service.AuthService,
|
||||
emailService *service.EmailService,
|
||||
emailCache service.EmailCache,
|
||||
affiliateService *service.AffiliateService,
|
||||
) *UserHandler {
|
||||
return &UserHandler{
|
||||
userService: userService,
|
||||
authService: authService,
|
||||
emailService: emailService,
|
||||
emailCache: emailCache,
|
||||
userService: userService,
|
||||
authService: authService,
|
||||
emailService: emailService,
|
||||
emailCache: emailCache,
|
||||
affiliateService: affiliateService,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -159,6 +162,44 @@ func (h *UserHandler) UpdateProfile(c *gin.Context) {
|
||||
response.Success(c, profileResp)
|
||||
}
|
||||
|
||||
// GetAffiliate returns the current user's affiliate details.
|
||||
// GET /api/v1/user/aff
|
||||
func (h *UserHandler) GetAffiliate(c *gin.Context) {
|
||||
subject, ok := middleware2.GetAuthSubjectFromContext(c)
|
||||
if !ok {
|
||||
response.Unauthorized(c, "User not authenticated")
|
||||
return
|
||||
}
|
||||
|
||||
detail, err := h.affiliateService.GetAffiliateDetail(c.Request.Context(), subject.UserID)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
response.Success(c, detail)
|
||||
}
|
||||
|
||||
// TransferAffiliateQuota transfers all available affiliate quota into current balance.
|
||||
// POST /api/v1/user/aff/transfer
|
||||
func (h *UserHandler) TransferAffiliateQuota(c *gin.Context) {
|
||||
subject, ok := middleware2.GetAuthSubjectFromContext(c)
|
||||
if !ok {
|
||||
response.Unauthorized(c, "User not authenticated")
|
||||
return
|
||||
}
|
||||
|
||||
transferred, balance, err := h.affiliateService.TransferAffiliateQuota(c.Request.Context(), subject.UserID)
|
||||
if err != nil {
|
||||
response.ErrorFrom(c, err)
|
||||
return
|
||||
}
|
||||
|
||||
response.Success(c, gin.H{
|
||||
"transferred_quota": transferred,
|
||||
"balance": balance,
|
||||
})
|
||||
}
|
||||
|
||||
type StartIdentityBindingRequest struct {
|
||||
Provider string `json:"provider" binding:"required"`
|
||||
RedirectTo string `json:"redirect_to"`
|
||||
|
||||
@@ -142,7 +142,7 @@ func TestUserHandlerUpdateProfileReturnsAvatarURL(t *testing.T) {
|
||||
Status: service.StatusActive,
|
||||
},
|
||||
}
|
||||
handler := NewUserHandler(service.NewUserService(repo, nil, nil, nil), nil, nil, nil)
|
||||
handler := NewUserHandler(service.NewUserService(repo, nil, nil, nil), nil, nil, nil, nil)
|
||||
|
||||
body := []byte(`{"avatar_url":"https://cdn.example.com/avatar.png"}`)
|
||||
recorder := httptest.NewRecorder()
|
||||
@@ -200,7 +200,7 @@ func TestUserHandlerGetProfileReturnsIdentitySummaries(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
handler := NewUserHandler(service.NewUserService(repo, nil, nil, nil), nil, nil, nil)
|
||||
handler := NewUserHandler(service.NewUserService(repo, nil, nil, nil), nil, nil, nil, nil)
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(recorder)
|
||||
@@ -283,7 +283,7 @@ func TestUserHandlerGetProfileReturnsLegacyCompatibilityFields(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
handler := NewUserHandler(service.NewUserService(repo, nil, nil, nil), nil, nil, nil)
|
||||
handler := NewUserHandler(service.NewUserService(repo, nil, nil, nil), nil, nil, nil, nil)
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(recorder)
|
||||
@@ -362,7 +362,7 @@ func TestUserHandlerGetProfileDoesNotInferEditedProfileSourcesWithoutMatchingIde
|
||||
},
|
||||
},
|
||||
}
|
||||
handler := NewUserHandler(service.NewUserService(repo, nil, nil, nil), nil, nil, nil)
|
||||
handler := NewUserHandler(service.NewUserService(repo, nil, nil, nil), nil, nil, nil, nil)
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(recorder)
|
||||
@@ -511,8 +511,8 @@ func TestUserHandlerBindEmailIdentityReturnsProfileResponse(t *testing.T) {
|
||||
},
|
||||
}
|
||||
emailService := service.NewEmailService(nil, emailCache)
|
||||
authService := service.NewAuthService(nil, repo, nil, nil, cfg, nil, emailService, nil, nil, nil, nil)
|
||||
handler := NewUserHandler(service.NewUserService(repo, nil, nil, nil), authService, nil, nil)
|
||||
authService := service.NewAuthService(nil, repo, nil, nil, cfg, nil, emailService, nil, nil, nil, nil, nil)
|
||||
handler := NewUserHandler(service.NewUserService(repo, nil, nil, nil), authService, nil, nil, nil)
|
||||
|
||||
body := []byte(`{"email":"new@example.com","verify_code":"123456","password":"new-password"}`)
|
||||
recorder := httptest.NewRecorder()
|
||||
@@ -566,7 +566,7 @@ func TestUserHandlerUnbindIdentityReturnsUpdatedProfile(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
handler := NewUserHandler(service.NewUserService(repo, nil, nil, nil), nil, nil, nil)
|
||||
handler := NewUserHandler(service.NewUserService(repo, nil, nil, nil), nil, nil, nil, nil)
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(recorder)
|
||||
@@ -625,8 +625,8 @@ func TestUserHandlerUnbindIdentityRevokesAllUserSessionsWhenAuthServiceConfigure
|
||||
ExpireHour: 1,
|
||||
},
|
||||
}
|
||||
authService := service.NewAuthService(nil, repo, nil, refreshTokenCache, cfg, nil, nil, nil, nil, nil, nil)
|
||||
handler := NewUserHandler(service.NewUserService(repo, nil, nil, nil), authService, nil, nil)
|
||||
authService := service.NewAuthService(nil, repo, nil, refreshTokenCache, cfg, nil, nil, nil, nil, nil, nil, nil)
|
||||
handler := NewUserHandler(service.NewUserService(repo, nil, nil, nil), authService, nil, nil, nil)
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(recorder)
|
||||
@@ -668,8 +668,8 @@ func TestUserHandlerUnbindIdentityDoesNotRevokeSessionsWhenNothingWasUnbound(t *
|
||||
ExpireHour: 1,
|
||||
},
|
||||
}
|
||||
authService := service.NewAuthService(nil, repo, nil, refreshTokenCache, cfg, nil, nil, nil, nil, nil, nil)
|
||||
handler := NewUserHandler(service.NewUserService(repo, nil, nil, nil), authService, nil, nil)
|
||||
authService := service.NewAuthService(nil, repo, nil, refreshTokenCache, cfg, nil, nil, nil, nil, nil, nil, nil)
|
||||
handler := NewUserHandler(service.NewUserService(repo, nil, nil, nil), authService, nil, nil, nil)
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(recorder)
|
||||
@@ -712,8 +712,8 @@ func TestUserHandlerBindEmailIdentityRejectsWrongCurrentPasswordForBoundEmail(t
|
||||
},
|
||||
}
|
||||
emailService := service.NewEmailService(nil, emailCache)
|
||||
authService := service.NewAuthService(nil, repo, nil, nil, cfg, nil, emailService, nil, nil, nil, nil)
|
||||
handler := NewUserHandler(service.NewUserService(repo, nil, nil, nil), authService, nil, nil)
|
||||
authService := service.NewAuthService(nil, repo, nil, nil, cfg, nil, emailService, nil, nil, nil, nil, nil)
|
||||
handler := NewUserHandler(service.NewUserService(repo, nil, nil, nil), authService, nil, nil, nil)
|
||||
|
||||
body := []byte(`{"email":"new@example.com","verify_code":"123456","password":"wrong-password"}`)
|
||||
recorder := httptest.NewRecorder()
|
||||
@@ -750,7 +750,7 @@ func TestUserHandlerStartIdentityBindingReturnsAuthorizeURL(t *testing.T) {
|
||||
Status: service.StatusActive,
|
||||
},
|
||||
}
|
||||
handler := NewUserHandler(service.NewUserService(repo, nil, nil, nil), nil, nil, nil)
|
||||
handler := NewUserHandler(service.NewUserService(repo, nil, nil, nil), nil, nil, nil, nil)
|
||||
|
||||
body := []byte(`{"provider":"wechat","redirect_to":"/settings/profile"}`)
|
||||
recorder := httptest.NewRecorder()
|
||||
|
||||
@@ -34,35 +34,41 @@ func ProvideAdminHandlers(
|
||||
apiKeyHandler *admin.AdminAPIKeyHandler,
|
||||
scheduledTestHandler *admin.ScheduledTestHandler,
|
||||
channelHandler *admin.ChannelHandler,
|
||||
channelMonitorHandler *admin.ChannelMonitorHandler,
|
||||
channelMonitorTemplateHandler *admin.ChannelMonitorRequestTemplateHandler,
|
||||
paymentHandler *admin.PaymentHandler,
|
||||
affiliateHandler *admin.AffiliateHandler,
|
||||
) *AdminHandlers {
|
||||
return &AdminHandlers{
|
||||
Dashboard: dashboardHandler,
|
||||
User: userHandler,
|
||||
Group: groupHandler,
|
||||
Account: accountHandler,
|
||||
Announcement: announcementHandler,
|
||||
DataManagement: dataManagementHandler,
|
||||
Backup: backupHandler,
|
||||
OAuth: oauthHandler,
|
||||
OpenAIOAuth: openaiOAuthHandler,
|
||||
GeminiOAuth: geminiOAuthHandler,
|
||||
AntigravityOAuth: antigravityOAuthHandler,
|
||||
Proxy: proxyHandler,
|
||||
Redeem: redeemHandler,
|
||||
Promo: promoHandler,
|
||||
Setting: settingHandler,
|
||||
Ops: opsHandler,
|
||||
System: systemHandler,
|
||||
Subscription: subscriptionHandler,
|
||||
Usage: usageHandler,
|
||||
UserAttribute: userAttributeHandler,
|
||||
ErrorPassthrough: errorPassthroughHandler,
|
||||
TLSFingerprintProfile: tlsFingerprintProfileHandler,
|
||||
APIKey: apiKeyHandler,
|
||||
ScheduledTest: scheduledTestHandler,
|
||||
Channel: channelHandler,
|
||||
Payment: paymentHandler,
|
||||
Dashboard: dashboardHandler,
|
||||
User: userHandler,
|
||||
Group: groupHandler,
|
||||
Account: accountHandler,
|
||||
Announcement: announcementHandler,
|
||||
DataManagement: dataManagementHandler,
|
||||
Backup: backupHandler,
|
||||
OAuth: oauthHandler,
|
||||
OpenAIOAuth: openaiOAuthHandler,
|
||||
GeminiOAuth: geminiOAuthHandler,
|
||||
AntigravityOAuth: antigravityOAuthHandler,
|
||||
Proxy: proxyHandler,
|
||||
Redeem: redeemHandler,
|
||||
Promo: promoHandler,
|
||||
Setting: settingHandler,
|
||||
Ops: opsHandler,
|
||||
System: systemHandler,
|
||||
Subscription: subscriptionHandler,
|
||||
Usage: usageHandler,
|
||||
UserAttribute: userAttributeHandler,
|
||||
ErrorPassthrough: errorPassthroughHandler,
|
||||
TLSFingerprintProfile: tlsFingerprintProfileHandler,
|
||||
APIKey: apiKeyHandler,
|
||||
ScheduledTest: scheduledTestHandler,
|
||||
Channel: channelHandler,
|
||||
ChannelMonitor: channelMonitorHandler,
|
||||
ChannelMonitorTemplate: channelMonitorTemplateHandler,
|
||||
Payment: paymentHandler,
|
||||
Affiliate: affiliateHandler,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -85,6 +91,7 @@ func ProvideHandlers(
|
||||
redeemHandler *RedeemHandler,
|
||||
subscriptionHandler *SubscriptionHandler,
|
||||
announcementHandler *AnnouncementHandler,
|
||||
channelMonitorUserHandler *ChannelMonitorUserHandler,
|
||||
adminHandlers *AdminHandlers,
|
||||
gatewayHandler *GatewayHandler,
|
||||
openaiGatewayHandler *OpenAIGatewayHandler,
|
||||
@@ -92,24 +99,27 @@ func ProvideHandlers(
|
||||
totpHandler *TotpHandler,
|
||||
paymentHandler *PaymentHandler,
|
||||
paymentWebhookHandler *PaymentWebhookHandler,
|
||||
availableChannelHandler *AvailableChannelHandler,
|
||||
_ *service.IdempotencyCoordinator,
|
||||
_ *service.IdempotencyCleanupService,
|
||||
) *Handlers {
|
||||
return &Handlers{
|
||||
Auth: authHandler,
|
||||
User: userHandler,
|
||||
APIKey: apiKeyHandler,
|
||||
Usage: usageHandler,
|
||||
Redeem: redeemHandler,
|
||||
Subscription: subscriptionHandler,
|
||||
Announcement: announcementHandler,
|
||||
Admin: adminHandlers,
|
||||
Gateway: gatewayHandler,
|
||||
OpenAIGateway: openaiGatewayHandler,
|
||||
Setting: settingHandler,
|
||||
Totp: totpHandler,
|
||||
Payment: paymentHandler,
|
||||
PaymentWebhook: paymentWebhookHandler,
|
||||
Auth: authHandler,
|
||||
User: userHandler,
|
||||
APIKey: apiKeyHandler,
|
||||
Usage: usageHandler,
|
||||
Redeem: redeemHandler,
|
||||
Subscription: subscriptionHandler,
|
||||
Announcement: announcementHandler,
|
||||
ChannelMonitor: channelMonitorUserHandler,
|
||||
Admin: adminHandlers,
|
||||
Gateway: gatewayHandler,
|
||||
OpenAIGateway: openaiGatewayHandler,
|
||||
Setting: settingHandler,
|
||||
Totp: totpHandler,
|
||||
Payment: paymentHandler,
|
||||
PaymentWebhook: paymentWebhookHandler,
|
||||
AvailableChannel: availableChannelHandler,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -123,12 +133,14 @@ var ProviderSet = wire.NewSet(
|
||||
NewRedeemHandler,
|
||||
NewSubscriptionHandler,
|
||||
NewAnnouncementHandler,
|
||||
NewChannelMonitorUserHandler,
|
||||
NewGatewayHandler,
|
||||
NewOpenAIGatewayHandler,
|
||||
NewTotpHandler,
|
||||
ProvideSettingHandler,
|
||||
NewPaymentHandler,
|
||||
NewPaymentWebhookHandler,
|
||||
NewAvailableChannelHandler,
|
||||
|
||||
// Admin handlers
|
||||
admin.NewDashboardHandler,
|
||||
@@ -156,7 +168,10 @@ var ProviderSet = wire.NewSet(
|
||||
admin.NewAdminAPIKeyHandler,
|
||||
admin.NewScheduledTestHandler,
|
||||
admin.NewChannelHandler,
|
||||
admin.NewChannelMonitorHandler,
|
||||
admin.NewChannelMonitorRequestTemplateHandler,
|
||||
admin.NewPaymentHandler,
|
||||
admin.NewAffiliateHandler,
|
||||
|
||||
// AdminHandlers and Handlers constructors
|
||||
ProvideAdminHandlers,
|
||||
|
||||
@@ -25,6 +25,7 @@ const (
|
||||
easypayStatusPaid = 1
|
||||
easypayHTTPTimeout = 10 * time.Second
|
||||
maxEasypayResponseSize = 1 << 20 // 1MB
|
||||
maxEasypayErrorSummary = 512
|
||||
tradeStatusSuccess = "TRADE_SUCCESS"
|
||||
signTypeMD5 = "MD5"
|
||||
paymentModePopup = "popup"
|
||||
@@ -42,17 +43,55 @@ type EasyPay struct {
|
||||
// config keys: pid, pkey, apiBase, notifyUrl, returnUrl, cid, cidAlipay, cidWxpay
|
||||
func NewEasyPay(instanceID string, config map[string]string) (*EasyPay, error) {
|
||||
for _, k := range []string{"pid", "pkey", "apiBase", "notifyUrl", "returnUrl"} {
|
||||
if config[k] == "" {
|
||||
if strings.TrimSpace(config[k]) == "" {
|
||||
return nil, fmt.Errorf("easypay config missing required key: %s", k)
|
||||
}
|
||||
}
|
||||
cfg := make(map[string]string, len(config))
|
||||
for k, v := range config {
|
||||
cfg[k] = v
|
||||
}
|
||||
cfg["apiBase"] = normalizeEasyPayAPIBase(cfg["apiBase"])
|
||||
return &EasyPay{
|
||||
instanceID: instanceID,
|
||||
config: config,
|
||||
config: cfg,
|
||||
httpClient: &http.Client{Timeout: easypayHTTPTimeout},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func normalizeEasyPayAPIBase(apiBase string) string {
|
||||
base := strings.TrimSpace(apiBase)
|
||||
if base == "" {
|
||||
return ""
|
||||
}
|
||||
if parsed, err := url.Parse(base); err == nil && parsed.Scheme != "" && parsed.Host != "" {
|
||||
parsed.RawQuery = ""
|
||||
parsed.Fragment = ""
|
||||
parsed.RawPath = ""
|
||||
parsed.Path = trimEasyPayEndpointPath(parsed.Path)
|
||||
return strings.TrimRight(parsed.String(), "/")
|
||||
}
|
||||
return strings.TrimRight(trimEasyPayEndpointPath(base), "/")
|
||||
}
|
||||
|
||||
func trimEasyPayEndpointPath(path string) string {
|
||||
path = strings.TrimRight(strings.TrimSpace(path), "/")
|
||||
lower := strings.ToLower(path)
|
||||
for _, endpoint := range []string{"/submit.php", "/mapi.php", "/api.php"} {
|
||||
if strings.HasSuffix(lower, endpoint) {
|
||||
return strings.TrimRight(path[:len(path)-len(endpoint)], "/")
|
||||
}
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
func (e *EasyPay) apiBase() string {
|
||||
if e == nil {
|
||||
return ""
|
||||
}
|
||||
return normalizeEasyPayAPIBase(e.config["apiBase"])
|
||||
}
|
||||
|
||||
func (e *EasyPay) Name() string { return "EasyPay" }
|
||||
func (e *EasyPay) ProviderKey() string { return payment.TypeEasyPay }
|
||||
func (e *EasyPay) SupportedTypes() []payment.PaymentType {
|
||||
@@ -104,8 +143,7 @@ func (e *EasyPay) createRedirectPayment(req payment.CreatePaymentRequest) (*paym
|
||||
for k, v := range params {
|
||||
q.Set(k, v)
|
||||
}
|
||||
base := strings.TrimRight(e.config["apiBase"], "/")
|
||||
payURL := base + "/submit.php?" + q.Encode()
|
||||
payURL := e.apiBase() + "/submit.php?" + q.Encode()
|
||||
return &payment.CreatePaymentResponse{PayURL: payURL}, nil
|
||||
}
|
||||
|
||||
@@ -127,7 +165,7 @@ func (e *EasyPay) createAPIPayment(ctx context.Context, req payment.CreatePaymen
|
||||
params["sign"] = easyPaySign(params, e.config["pkey"])
|
||||
params["sign_type"] = signTypeMD5
|
||||
|
||||
body, err := e.post(ctx, strings.TrimRight(e.config["apiBase"], "/")+"/mapi.php", params)
|
||||
body, err := e.post(ctx, e.apiBase()+"/mapi.php", params)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("easypay create: %w", err)
|
||||
}
|
||||
@@ -171,7 +209,7 @@ func (e *EasyPay) QueryOrder(ctx context.Context, tradeNo string) (*payment.Quer
|
||||
"act": "order", "pid": e.config["pid"],
|
||||
"key": e.config["pkey"], "out_trade_no": tradeNo,
|
||||
}
|
||||
body, err := e.post(ctx, e.config["apiBase"]+"/api.php", params)
|
||||
body, err := e.post(ctx, e.apiBase()+"/api.php", params)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("easypay query: %w", err)
|
||||
}
|
||||
@@ -234,25 +272,128 @@ func (e *EasyPay) VerifyNotification(_ context.Context, rawBody string, _ map[st
|
||||
}
|
||||
|
||||
func (e *EasyPay) Refund(ctx context.Context, req payment.RefundRequest) (*payment.RefundResponse, error) {
|
||||
params := map[string]string{
|
||||
"pid": e.config["pid"], "key": e.config["pkey"],
|
||||
"trade_no": req.TradeNo, "out_trade_no": req.OrderID, "money": req.Amount,
|
||||
attempts := e.refundAttempts(req)
|
||||
if len(attempts) == 0 {
|
||||
return nil, fmt.Errorf("easypay refund missing order identifier")
|
||||
}
|
||||
body, err := e.post(ctx, e.config["apiBase"]+"/api.php?act=refund", params)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("easypay refund: %w", err)
|
||||
var firstErr error
|
||||
for i, attempt := range attempts {
|
||||
body, status, err := e.postRaw(ctx, e.apiBase()+"/api.php?act=refund", attempt.params)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("easypay refund request: %w", err)
|
||||
}
|
||||
if err := parseEasyPayRefundResponse(status, body); err != nil {
|
||||
if firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
if i+1 < len(attempts) && isEasyPayRefundOrderNotFound(err) {
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return &payment.RefundResponse{RefundID: attempt.refundID, Status: payment.ProviderStatusSuccess}, nil
|
||||
}
|
||||
return nil, firstErr
|
||||
}
|
||||
|
||||
type easyPayRefundAttempt struct {
|
||||
params map[string]string
|
||||
refundID string
|
||||
}
|
||||
|
||||
func (e *EasyPay) refundAttempts(req payment.RefundRequest) []easyPayRefundAttempt {
|
||||
base := map[string]string{
|
||||
"pid": e.config["pid"], "key": e.config["pkey"], "money": req.Amount,
|
||||
}
|
||||
var attempts []easyPayRefundAttempt
|
||||
if orderID := strings.TrimSpace(req.OrderID); orderID != "" {
|
||||
params := cloneStringMap(base)
|
||||
params["out_trade_no"] = orderID
|
||||
attempts = append(attempts, easyPayRefundAttempt{params: params, refundID: orderID})
|
||||
}
|
||||
if tradeNo := strings.TrimSpace(req.TradeNo); tradeNo != "" {
|
||||
params := cloneStringMap(base)
|
||||
params["trade_no"] = tradeNo
|
||||
attempts = append(attempts, easyPayRefundAttempt{params: params, refundID: tradeNo})
|
||||
}
|
||||
return attempts
|
||||
}
|
||||
|
||||
func cloneStringMap(in map[string]string) map[string]string {
|
||||
out := make(map[string]string, len(in))
|
||||
for k, v := range in {
|
||||
out[k] = v
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func isEasyPayRefundOrderNotFound(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
msg := err.Error()
|
||||
lower := strings.ToLower(msg)
|
||||
return strings.Contains(msg, "订单编号不存在") ||
|
||||
strings.Contains(msg, "订单不存在") ||
|
||||
strings.Contains(lower, "order not found") ||
|
||||
strings.Contains(lower, "not exist")
|
||||
}
|
||||
|
||||
func parseEasyPayRefundResponse(status int, body []byte) error {
|
||||
summary := summarizeEasyPayResponse(body)
|
||||
if status < http.StatusOK || status >= http.StatusMultipleChoices {
|
||||
return fmt.Errorf("easypay refund HTTP %d: %s", status, summary)
|
||||
}
|
||||
|
||||
trimmed := strings.TrimSpace(string(body))
|
||||
if trimmed == "" {
|
||||
return fmt.Errorf("easypay refund empty response (HTTP %d): %s", status, summary)
|
||||
}
|
||||
|
||||
lower := strings.ToLower(trimmed)
|
||||
if strings.HasPrefix(lower, "<!doctype html") || strings.HasPrefix(lower, "<html") ||
|
||||
(strings.HasPrefix(lower, "<") && strings.Contains(lower, "html")) {
|
||||
return fmt.Errorf("easypay refund non-JSON response (HTTP %d): %s", status, summary)
|
||||
}
|
||||
|
||||
var resp struct {
|
||||
Code int `json:"code"`
|
||||
Code any `json:"code"`
|
||||
Msg string `json:"msg"`
|
||||
}
|
||||
if err := json.Unmarshal(body, &resp); err != nil {
|
||||
return nil, fmt.Errorf("easypay parse refund: %w", err)
|
||||
return fmt.Errorf("easypay refund non-JSON response (HTTP %d): %s", status, summary)
|
||||
}
|
||||
if resp.Code != easypayCodeSuccess {
|
||||
return nil, fmt.Errorf("easypay refund failed: %s", resp.Msg)
|
||||
if !easyPayResponseCodeIsSuccess(resp.Code) {
|
||||
msg := strings.TrimSpace(resp.Msg)
|
||||
if msg == "" {
|
||||
msg = summary
|
||||
}
|
||||
return fmt.Errorf("easypay refund failed (HTTP %d): %s", status, msg)
|
||||
}
|
||||
return &payment.RefundResponse{RefundID: req.TradeNo, Status: payment.ProviderStatusSuccess}, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func easyPayResponseCodeIsSuccess(code any) bool {
|
||||
switch v := code.(type) {
|
||||
case float64:
|
||||
return int(v) == easypayCodeSuccess
|
||||
case string:
|
||||
n, err := strconv.Atoi(strings.TrimSpace(v))
|
||||
return err == nil && n == easypayCodeSuccess
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func summarizeEasyPayResponse(body []byte) string {
|
||||
summary := strings.Join(strings.Fields(string(body)), " ")
|
||||
if summary == "" {
|
||||
return "<empty>"
|
||||
}
|
||||
if len(summary) > maxEasypayErrorSummary {
|
||||
return summary[:maxEasypayErrorSummary] + "..."
|
||||
}
|
||||
return summary
|
||||
}
|
||||
|
||||
func (e *EasyPay) resolveCID(paymentType string) string {
|
||||
@@ -269,21 +410,34 @@ func (e *EasyPay) resolveCID(paymentType string) string {
|
||||
}
|
||||
|
||||
func (e *EasyPay) post(ctx context.Context, endpoint string, params map[string]string) ([]byte, error) {
|
||||
body, _, err := e.postRaw(ctx, endpoint, params)
|
||||
return body, err
|
||||
}
|
||||
|
||||
func (e *EasyPay) postRaw(ctx context.Context, endpoint string, params map[string]string) ([]byte, int, error) {
|
||||
form := url.Values{}
|
||||
for k, v := range params {
|
||||
form.Set(k, v)
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, strings.NewReader(form.Encode()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, 0, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
resp, err := e.httpClient.Do(req)
|
||||
client := e.httpClient
|
||||
if client == nil {
|
||||
client = &http.Client{Timeout: easypayHTTPTimeout}
|
||||
}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, 0, err
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
return io.ReadAll(io.LimitReader(resp.Body, maxEasypayResponseSize))
|
||||
body, err := io.ReadAll(io.LimitReader(resp.Body, maxEasypayResponseSize))
|
||||
if err != nil {
|
||||
return nil, resp.StatusCode, err
|
||||
}
|
||||
return body, resp.StatusCode, nil
|
||||
}
|
||||
|
||||
func easyPaySign(params map[string]string, pkey string) string {
|
||||
|
||||
196
backend/internal/payment/provider/easypay_refund_test.go
Normal file
196
backend/internal/payment/provider/easypay_refund_test.go
Normal file
@@ -0,0 +1,196 @@
|
||||
package provider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Wei-Shaw/sub2api/internal/payment"
|
||||
)
|
||||
|
||||
func TestNormalizeEasyPayAPIBase(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
input string
|
||||
want string
|
||||
}{
|
||||
{input: "https://zpayz.cn", want: "https://zpayz.cn"},
|
||||
{input: "https://zpayz.cn/", want: "https://zpayz.cn"},
|
||||
{input: "https://zpayz.cn/mapi.php", want: "https://zpayz.cn"},
|
||||
{input: "https://zpayz.cn/submit.php", want: "https://zpayz.cn"},
|
||||
{input: "https://zpayz.cn/api.php", want: "https://zpayz.cn"},
|
||||
{input: "https://zpayz.cn/api.php?act=refund", want: "https://zpayz.cn"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.input, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
if got := normalizeEasyPayAPIBase(tt.input); got != tt.want {
|
||||
t.Fatalf("normalizeEasyPayAPIBase(%q) = %q, want %q", tt.input, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEasyPayRefundNormalizesAPIBaseAndSendsOutTradeNoOnly(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var gotPath string
|
||||
var gotQuery url.Values
|
||||
var gotForm url.Values
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
gotPath = r.URL.Path
|
||||
gotQuery = r.URL.Query()
|
||||
if err := r.ParseForm(); err != nil {
|
||||
t.Errorf("ParseForm: %v", err)
|
||||
}
|
||||
gotForm = r.PostForm
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_, _ = w.Write([]byte(`{"code":1,"msg":"ok"}`))
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
provider := newTestEasyPay(t, server.URL+"/mapi.php")
|
||||
resp, err := provider.Refund(context.Background(), payment.RefundRequest{
|
||||
TradeNo: "trade-123",
|
||||
OrderID: "out-456",
|
||||
Amount: "1.50",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Refund returned error: %v", err)
|
||||
}
|
||||
if resp == nil || resp.Status != payment.ProviderStatusSuccess {
|
||||
t.Fatalf("Refund response = %+v, want success", resp)
|
||||
}
|
||||
if gotPath != "/api.php" {
|
||||
t.Fatalf("refund path = %q, want /api.php", gotPath)
|
||||
}
|
||||
if gotQuery.Get("act") != "refund" {
|
||||
t.Fatalf("refund act query = %q, want refund", gotQuery.Get("act"))
|
||||
}
|
||||
for key, want := range map[string]string{
|
||||
"pid": "pid-1",
|
||||
"key": "pkey-1",
|
||||
"out_trade_no": "out-456",
|
||||
"money": "1.50",
|
||||
} {
|
||||
if got := gotForm.Get(key); got != want {
|
||||
t.Fatalf("form[%s] = %q, want %q (form=%v)", key, got, want, gotForm)
|
||||
}
|
||||
}
|
||||
if got := gotForm.Get("trade_no"); got != "" {
|
||||
t.Fatalf("form[trade_no] = %q, want empty (form=%v)", got, gotForm)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEasyPayRefundRetriesWithTradeNoWhenOutTradeNoNotFound(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var gotForms []url.Values
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path != "/api.php" {
|
||||
t.Errorf("refund path = %q, want /api.php", r.URL.Path)
|
||||
}
|
||||
if r.URL.Query().Get("act") != "refund" {
|
||||
t.Errorf("refund act query = %q, want refund", r.URL.Query().Get("act"))
|
||||
}
|
||||
if err := r.ParseForm(); err != nil {
|
||||
t.Errorf("ParseForm: %v", err)
|
||||
}
|
||||
gotForms = append(gotForms, r.PostForm)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
if len(gotForms) == 1 {
|
||||
_, _ = w.Write([]byte(`{"code":0,"msg":"订单编号不存在!"}`))
|
||||
return
|
||||
}
|
||||
_, _ = w.Write([]byte(`{"code":1,"msg":"ok"}`))
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
provider := newTestEasyPay(t, server.URL+"/mapi.php")
|
||||
resp, err := provider.Refund(context.Background(), payment.RefundRequest{
|
||||
TradeNo: "trade-123",
|
||||
OrderID: "out-456",
|
||||
Amount: "1.50",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Refund returned error: %v", err)
|
||||
}
|
||||
if resp == nil || resp.Status != payment.ProviderStatusSuccess || resp.RefundID != "trade-123" {
|
||||
t.Fatalf("Refund response = %+v, want success with trade refund id", resp)
|
||||
}
|
||||
if len(gotForms) != 2 {
|
||||
t.Fatalf("refund attempts = %d, want 2", len(gotForms))
|
||||
}
|
||||
if got := gotForms[0].Get("out_trade_no"); got != "out-456" {
|
||||
t.Fatalf("first form[out_trade_no] = %q, want out-456 (form=%v)", got, gotForms[0])
|
||||
}
|
||||
if got := gotForms[0].Get("trade_no"); got != "" {
|
||||
t.Fatalf("first form[trade_no] = %q, want empty (form=%v)", got, gotForms[0])
|
||||
}
|
||||
if got := gotForms[1].Get("trade_no"); got != "trade-123" {
|
||||
t.Fatalf("second form[trade_no] = %q, want trade-123 (form=%v)", got, gotForms[1])
|
||||
}
|
||||
if got := gotForms[1].Get("out_trade_no"); got != "" {
|
||||
t.Fatalf("second form[out_trade_no] = %q, want empty (form=%v)", got, gotForms[1])
|
||||
}
|
||||
}
|
||||
|
||||
func TestEasyPayRefundResponseErrors(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
statusCode int
|
||||
body string
|
||||
want string
|
||||
}{
|
||||
{name: "html response", statusCode: http.StatusOK, body: "<html>bad config</html>", want: "non-JSON response (HTTP 200): <html>bad config</html>"},
|
||||
{name: "non json response", statusCode: http.StatusOK, body: "not json", want: "non-JSON response (HTTP 200): not json"},
|
||||
{name: "non 2xx response", statusCode: http.StatusBadGateway, body: "bad gateway", want: "HTTP 502: bad gateway"},
|
||||
{name: "empty response", statusCode: http.StatusOK, body: "", want: "empty response (HTTP 200): <empty>"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
||||
w.WriteHeader(tt.statusCode)
|
||||
_, _ = w.Write([]byte(tt.body))
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
provider := newTestEasyPay(t, server.URL)
|
||||
_, err := provider.Refund(context.Background(), payment.RefundRequest{
|
||||
OrderID: "out-456",
|
||||
Amount: "1.50",
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatal("Refund returned nil error")
|
||||
}
|
||||
if !strings.Contains(err.Error(), tt.want) {
|
||||
t.Fatalf("Refund error = %q, want substring %q", err.Error(), tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newTestEasyPay(t *testing.T, apiBase string) *EasyPay {
|
||||
t.Helper()
|
||||
|
||||
provider, err := NewEasyPay("test-instance", map[string]string{
|
||||
"pid": "pid-1",
|
||||
"pkey": "pkey-1",
|
||||
"apiBase": apiBase,
|
||||
"notifyUrl": "https://example.com/notify",
|
||||
"returnUrl": "https://example.com/return",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("NewEasyPay: %v", err)
|
||||
}
|
||||
return provider
|
||||
}
|
||||
@@ -181,6 +181,55 @@ func TestResponsesToAnthropic_TextOnly(t *testing.T) {
|
||||
assert.Equal(t, 5, anth.Usage.OutputTokens)
|
||||
}
|
||||
|
||||
func TestResponsesToAnthropic_CachedTokensUseAnthropicInputSemantics(t *testing.T) {
|
||||
resp := &ResponsesResponse{
|
||||
ID: "resp_cached",
|
||||
Model: "gpt-5.2",
|
||||
Status: "completed",
|
||||
Output: []ResponsesOutput{
|
||||
{
|
||||
Type: "message",
|
||||
Content: []ResponsesContentPart{
|
||||
{Type: "output_text", Text: "Cached response"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Usage: &ResponsesUsage{
|
||||
InputTokens: 54006,
|
||||
OutputTokens: 123,
|
||||
TotalTokens: 54129,
|
||||
InputTokensDetails: &ResponsesInputTokensDetails{
|
||||
CachedTokens: 50688,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
anth := ResponsesToAnthropic(resp, "claude-sonnet-4-5-20250929")
|
||||
assert.Equal(t, 3318, anth.Usage.InputTokens)
|
||||
assert.Equal(t, 50688, anth.Usage.CacheReadInputTokens)
|
||||
assert.Equal(t, 123, anth.Usage.OutputTokens)
|
||||
}
|
||||
|
||||
func TestResponsesToAnthropic_CachedTokensClampInputTokens(t *testing.T) {
|
||||
resp := &ResponsesResponse{
|
||||
ID: "resp_cached_clamp",
|
||||
Model: "gpt-5.2",
|
||||
Status: "completed",
|
||||
Usage: &ResponsesUsage{
|
||||
InputTokens: 100,
|
||||
OutputTokens: 5,
|
||||
InputTokensDetails: &ResponsesInputTokensDetails{
|
||||
CachedTokens: 150,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
anth := ResponsesToAnthropic(resp, "claude-sonnet-4-5-20250929")
|
||||
assert.Equal(t, 0, anth.Usage.InputTokens)
|
||||
assert.Equal(t, 150, anth.Usage.CacheReadInputTokens)
|
||||
assert.Equal(t, 5, anth.Usage.OutputTokens)
|
||||
}
|
||||
|
||||
func TestResponsesToAnthropic_ToolUse(t *testing.T) {
|
||||
resp := &ResponsesResponse{
|
||||
ID: "resp_456",
|
||||
@@ -343,6 +392,36 @@ func TestStreamingTextOnly(t *testing.T) {
|
||||
assert.Equal(t, "message_stop", events[1].Type)
|
||||
}
|
||||
|
||||
func TestStreamingCachedTokensUseAnthropicInputSemantics(t *testing.T) {
|
||||
state := NewResponsesEventToAnthropicState()
|
||||
ResponsesEventToAnthropicEvents(&ResponsesStreamEvent{
|
||||
Type: "response.created",
|
||||
Response: &ResponsesResponse{ID: "resp_cached_stream", Model: "gpt-5.2"},
|
||||
}, state)
|
||||
|
||||
events := ResponsesEventToAnthropicEvents(&ResponsesStreamEvent{
|
||||
Type: "response.completed",
|
||||
Response: &ResponsesResponse{
|
||||
Status: "completed",
|
||||
Usage: &ResponsesUsage{
|
||||
InputTokens: 54006,
|
||||
OutputTokens: 123,
|
||||
TotalTokens: 54129,
|
||||
InputTokensDetails: &ResponsesInputTokensDetails{
|
||||
CachedTokens: 50688,
|
||||
},
|
||||
},
|
||||
},
|
||||
}, state)
|
||||
|
||||
require.Len(t, events, 2)
|
||||
assert.Equal(t, "message_delta", events[0].Type)
|
||||
assert.Equal(t, 3318, events[0].Usage.InputTokens)
|
||||
assert.Equal(t, 50688, events[0].Usage.CacheReadInputTokens)
|
||||
assert.Equal(t, 123, events[0].Usage.OutputTokens)
|
||||
assert.Equal(t, "message_stop", events[1].Type)
|
||||
}
|
||||
|
||||
func TestStreamingToolCall(t *testing.T) {
|
||||
state := NewResponsesEventToAnthropicState()
|
||||
|
||||
|
||||
@@ -84,18 +84,34 @@ func ResponsesToAnthropic(resp *ResponsesResponse, model string) *AnthropicRespo
|
||||
out.StopReason = responsesStatusToAnthropicStopReason(resp.Status, resp.IncompleteDetails, blocks)
|
||||
|
||||
if resp.Usage != nil {
|
||||
out.Usage = AnthropicUsage{
|
||||
InputTokens: resp.Usage.InputTokens,
|
||||
OutputTokens: resp.Usage.OutputTokens,
|
||||
}
|
||||
if resp.Usage.InputTokensDetails != nil {
|
||||
out.Usage.CacheReadInputTokens = resp.Usage.InputTokensDetails.CachedTokens
|
||||
}
|
||||
out.Usage = anthropicUsageFromResponsesUsage(resp.Usage)
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func anthropicUsageFromResponsesUsage(usage *ResponsesUsage) AnthropicUsage {
|
||||
if usage == nil {
|
||||
return AnthropicUsage{}
|
||||
}
|
||||
|
||||
cachedTokens := 0
|
||||
if usage.InputTokensDetails != nil {
|
||||
cachedTokens = usage.InputTokensDetails.CachedTokens
|
||||
}
|
||||
|
||||
inputTokens := usage.InputTokens - cachedTokens
|
||||
if inputTokens < 0 {
|
||||
inputTokens = 0
|
||||
}
|
||||
|
||||
return AnthropicUsage{
|
||||
InputTokens: inputTokens,
|
||||
OutputTokens: usage.OutputTokens,
|
||||
CacheReadInputTokens: cachedTokens,
|
||||
}
|
||||
}
|
||||
|
||||
func responsesStatusToAnthropicStopReason(status string, details *ResponsesIncompleteDetails, blocks []AnthropicContentBlock) string {
|
||||
switch status {
|
||||
case "incomplete":
|
||||
@@ -466,11 +482,10 @@ func resToAnthHandleCompleted(evt *ResponsesStreamEvent, state *ResponsesEventTo
|
||||
stopReason := "end_turn"
|
||||
if evt.Response != nil {
|
||||
if evt.Response.Usage != nil {
|
||||
state.InputTokens = evt.Response.Usage.InputTokens
|
||||
state.OutputTokens = evt.Response.Usage.OutputTokens
|
||||
if evt.Response.Usage.InputTokensDetails != nil {
|
||||
state.CacheReadInputTokens = evt.Response.Usage.InputTokensDetails.CachedTokens
|
||||
}
|
||||
usage := anthropicUsageFromResponsesUsage(evt.Response.Usage)
|
||||
state.InputTokens = usage.InputTokens
|
||||
state.OutputTokens = usage.OutputTokens
|
||||
state.CacheReadInputTokens = usage.CacheReadInputTokens
|
||||
}
|
||||
switch evt.Response.Status {
|
||||
case "incomplete":
|
||||
|
||||
@@ -390,7 +390,7 @@ func convertResponsesToAnthropicTools(tools []ResponsesTool) []AnthropicTool {
|
||||
var out []AnthropicTool
|
||||
for _, t := range tools {
|
||||
switch t.Type {
|
||||
case "web_search":
|
||||
case "web_search", "google_search", "web_search_20250305":
|
||||
out = append(out, AnthropicTool{
|
||||
Type: "web_search_20250305",
|
||||
Name: "web_search",
|
||||
|
||||
@@ -12,17 +12,23 @@ import "encoding/json"
|
||||
|
||||
// AnthropicRequest is the request body for POST /v1/messages.
|
||||
type AnthropicRequest struct {
|
||||
Model string `json:"model"`
|
||||
MaxTokens int `json:"max_tokens"`
|
||||
System json.RawMessage `json:"system,omitempty"` // string or []AnthropicContentBlock
|
||||
Messages []AnthropicMessage `json:"messages"`
|
||||
Tools []AnthropicTool `json:"tools,omitempty"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
TopP *float64 `json:"top_p,omitempty"`
|
||||
StopSeqs []string `json:"stop_sequences,omitempty"`
|
||||
Thinking *AnthropicThinking `json:"thinking,omitempty"`
|
||||
ToolChoice json.RawMessage `json:"tool_choice,omitempty"`
|
||||
Model string `json:"model"`
|
||||
MaxTokens int `json:"max_tokens"`
|
||||
System json.RawMessage `json:"system,omitempty"` // string or []AnthropicContentBlock
|
||||
Messages []AnthropicMessage `json:"messages"`
|
||||
Tools []AnthropicTool `json:"tools,omitempty"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
TopP *float64 `json:"top_p,omitempty"`
|
||||
StopSeqs []string `json:"stop_sequences,omitempty"`
|
||||
Thinking *AnthropicThinking `json:"thinking,omitempty"`
|
||||
ToolChoice json.RawMessage `json:"tool_choice,omitempty"`
|
||||
// Metadata 会被原样透传给上游。OAuth/Claude-Code 路径依赖 metadata.user_id
|
||||
// 参与上游的"是否为官方 Claude Code 请求"判定;如果经由本结构体重新序列化
|
||||
// 时丢弃该字段,网关侧后续的 metadata 重写(ensureClaudeOAuthMetadataUserID/
|
||||
// RewriteUserIDWithMasking) 在 body 里拿不到起点,就无法重建一个合法的
|
||||
// user_id,进而导致请求被归类为第三方 app。
|
||||
Metadata json.RawMessage `json:"metadata,omitempty"`
|
||||
OutputConfig *AnthropicOutputConfig `json:"output_config,omitempty"`
|
||||
}
|
||||
|
||||
@@ -76,10 +82,18 @@ type AnthropicImageSource struct {
|
||||
|
||||
// AnthropicTool describes a tool available to the model.
|
||||
type AnthropicTool struct {
|
||||
Type string `json:"type,omitempty"` // e.g. "web_search_20250305" for server tools
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description,omitempty"`
|
||||
InputSchema json.RawMessage `json:"input_schema"` // JSON Schema object
|
||||
Type string `json:"type,omitempty"` // e.g. "web_search_20250305" for server tools
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description,omitempty"`
|
||||
InputSchema json.RawMessage `json:"input_schema"` // JSON Schema object
|
||||
CacheControl *AnthropicCacheControl `json:"cache_control,omitempty"`
|
||||
}
|
||||
|
||||
// AnthropicCacheControl 对应 Anthropic API 的 cache_control 字段。
|
||||
// ttl 默认由调用方决定;本项目策略见 claude.DefaultCacheControlTTL。
|
||||
type AnthropicCacheControl struct {
|
||||
Type string `json:"type"` // "ephemeral"
|
||||
TTL string `json:"ttl,omitempty"` // "5m" / "1h" / 省略=默认 5m(由 Anthropic 判定)
|
||||
}
|
||||
|
||||
// AnthropicResponse is the non-streaming response from POST /v1/messages.
|
||||
|
||||
@@ -4,6 +4,12 @@ package claude
|
||||
// Claude Code 客户端相关常量
|
||||
|
||||
// Beta header 常量
|
||||
//
|
||||
// 这里的常量对齐真实 Claude Code CLI 的最新流量(截至 2026-04)。
|
||||
// 选型参考:与 Parrot (src/transform/cc_mimicry.py) 的 BETAS 保持一致,
|
||||
// 原因:Anthropic 上游会基于 anthropic-beta 的完整集合判定请求来源;
|
||||
// 缺少任何"官方 Claude Code 请求才会带"的 beta,都会被降级到第三方额度,
|
||||
// 对应报错:`Third-party apps now draw from your extra usage, not your plan limits.`
|
||||
const (
|
||||
BetaOAuth = "oauth-2025-04-20"
|
||||
BetaClaudeCode = "claude-code-20250219"
|
||||
@@ -12,6 +18,13 @@ const (
|
||||
BetaTokenCounting = "token-counting-2024-11-01"
|
||||
BetaContext1M = "context-1m-2025-08-07"
|
||||
BetaFastMode = "fast-mode-2026-02-01"
|
||||
|
||||
// 新增(对齐官方 CLI 2.1.9x 以来的流量)
|
||||
BetaPromptCachingScope = "prompt-caching-scope-2026-01-05"
|
||||
BetaEffort = "effort-2025-11-24"
|
||||
BetaRedactThinking = "redact-thinking-2026-02-12"
|
||||
BetaContextManagement = "context-management-2025-06-27"
|
||||
BetaExtendedCacheTTL = "extended-cache-ttl-2025-04-11"
|
||||
)
|
||||
|
||||
// DroppedBetas 是转发时需要从 anthropic-beta header 中移除的 beta token 列表。
|
||||
@@ -44,11 +57,43 @@ const APIKeyBetaHeader = BetaClaudeCode + "," + BetaInterleavedThinking + "," +
|
||||
// APIKeyHaikuBetaHeader Haiku 模型在 API-key 账号下使用的 anthropic-beta header(不包含 oauth / claude-code)
|
||||
const APIKeyHaikuBetaHeader = BetaInterleavedThinking
|
||||
|
||||
// DefaultCacheControlTTL 是网关代理为自己生成的 cache_control 块默认使用的 ttl。
|
||||
// 真实 Claude Code CLI 当前使用 "1h",但本仓策略是"客户端透传 ttl 优先;
|
||||
// 客户端缺省时统一使用 5m",这样既不浪费 1h 缓存额度,也保留客户端自定义能力。
|
||||
const DefaultCacheControlTTL = "5m"
|
||||
|
||||
// CLICurrentVersion 是 sub2api 当前对外伪装的 Claude Code CLI 版本号(三段 semver)。
|
||||
// 用于 billing attribution block 中的 cc_version=X.Y.Z.{fp} 前缀以及 fingerprint 计算。
|
||||
// 必须与 DefaultHeaders["User-Agent"] 中的版本号严格一致;不一致会被 Anthropic 判第三方。
|
||||
const CLICurrentVersion = "2.1.92"
|
||||
|
||||
// FullClaudeCodeMimicryBetas 返回最"像"真实 Claude Code CLI 的完整 beta 列表,
|
||||
// 用于 OAuth 账号伪装成 Claude Code 时使用。
|
||||
// 顺序与真实 CLI 抓包一致。
|
||||
//
|
||||
// 使用建议:
|
||||
// - OAuth 账号 + 非 haiku:追加这整份列表,再按需保留 client 带来的 beta。
|
||||
// - OAuth 账号 + haiku:Anthropic 对 haiku 不做 third-party 判定,使用 HaikuBetaHeader 即可。
|
||||
// - API-key 账号:不要使用本函数,参见 APIKeyBetaHeader。
|
||||
func FullClaudeCodeMimicryBetas() []string {
|
||||
return []string{
|
||||
BetaClaudeCode,
|
||||
BetaOAuth,
|
||||
BetaInterleavedThinking,
|
||||
BetaPromptCachingScope,
|
||||
BetaEffort,
|
||||
BetaRedactThinking,
|
||||
BetaContextManagement,
|
||||
BetaExtendedCacheTTL,
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultHeaders 是 Claude Code 客户端默认请求头。
|
||||
var DefaultHeaders = map[string]string{
|
||||
// Keep these in sync with recent Claude CLI traffic to reduce the chance
|
||||
// that Claude Code-scoped OAuth credentials are rejected as "non-CLI" usage.
|
||||
"User-Agent": "claude-cli/2.1.22 (external, cli)",
|
||||
// 版本参考:对齐 Parrot (src/transform/cc_mimicry.py:49) 的 CLI_USER_AGENT。
|
||||
"User-Agent": "claude-cli/2.1.92 (external, cli)",
|
||||
"X-Stainless-Lang": "js",
|
||||
"X-Stainless-Package-Version": "0.70.0",
|
||||
"X-Stainless-OS": "Linux",
|
||||
|
||||
@@ -15,6 +15,7 @@ type Model struct {
|
||||
|
||||
// DefaultModels OpenAI models list
|
||||
var DefaultModels = []Model{
|
||||
{ID: "gpt-5.5", Object: "model", Created: 1776873600, OwnedBy: "openai", Type: "model", DisplayName: "GPT-5.5"},
|
||||
{ID: "gpt-5.4", Object: "model", Created: 1738368000, OwnedBy: "openai", Type: "model", DisplayName: "GPT-5.4"},
|
||||
{ID: "gpt-5.4-mini", Object: "model", Created: 1738368000, OwnedBy: "openai", Type: "model", DisplayName: "GPT-5.4 Mini"},
|
||||
{ID: "gpt-5.3-codex", Object: "model", Created: 1735689600, OwnedBy: "openai", Type: "model", DisplayName: "GPT-5.3 Codex"},
|
||||
|
||||
@@ -0,0 +1,14 @@
|
||||
package repository
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestShouldEnqueueSchedulerOutboxForExtraUpdates_CompactCapabilityKeysAreRelevant(t *testing.T) {
|
||||
updates := map[string]any{
|
||||
"openai_compact_supported": true,
|
||||
"openai_compact_checked_at": "2026-04-10T10:00:00Z",
|
||||
}
|
||||
|
||||
if !shouldEnqueueSchedulerOutboxForExtraUpdates(updates) {
|
||||
t.Fatalf("expected compact capability updates to enqueue scheduler outbox")
|
||||
}
|
||||
}
|
||||
762
backend/internal/repository/affiliate_repo.go
Normal file
762
backend/internal/repository/affiliate_repo.go
Normal file
@@ -0,0 +1,762 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
dbent "github.com/Wei-Shaw/sub2api/ent"
|
||||
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
"github.com/lib/pq"
|
||||
)
|
||||
|
||||
const (
|
||||
affiliateCodeLength = 12
|
||||
affiliateCodeMaxAttempts = 12
|
||||
)
|
||||
|
||||
var affiliateCodeCharset = []byte("ABCDEFGHJKLMNPQRSTUVWXYZ23456789")
|
||||
|
||||
type affiliateQueryExecer interface {
|
||||
QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error)
|
||||
ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error)
|
||||
}
|
||||
|
||||
type affiliateRepository struct {
|
||||
client *dbent.Client
|
||||
}
|
||||
|
||||
func NewAffiliateRepository(client *dbent.Client, _ *sql.DB) service.AffiliateRepository {
|
||||
return &affiliateRepository{client: client}
|
||||
}
|
||||
|
||||
func (r *affiliateRepository) EnsureUserAffiliate(ctx context.Context, userID int64) (*service.AffiliateSummary, error) {
|
||||
if userID <= 0 {
|
||||
return nil, service.ErrUserNotFound
|
||||
}
|
||||
client := clientFromContext(ctx, r.client)
|
||||
return ensureUserAffiliateWithClient(ctx, client, userID)
|
||||
}
|
||||
|
||||
func (r *affiliateRepository) GetAffiliateByCode(ctx context.Context, code string) (*service.AffiliateSummary, error) {
|
||||
client := clientFromContext(ctx, r.client)
|
||||
return queryAffiliateByCode(ctx, client, code)
|
||||
}
|
||||
|
||||
func (r *affiliateRepository) BindInviter(ctx context.Context, userID, inviterID int64) (bool, error) {
|
||||
var bound bool
|
||||
err := r.withTx(ctx, func(txCtx context.Context, txClient *dbent.Client) error {
|
||||
if _, err := ensureUserAffiliateWithClient(txCtx, txClient, userID); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := ensureUserAffiliateWithClient(txCtx, txClient, inviterID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
res, err := txClient.ExecContext(txCtx,
|
||||
"UPDATE user_affiliates SET inviter_id = $1, updated_at = NOW() WHERE user_id = $2 AND inviter_id IS NULL",
|
||||
inviterID, userID,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bind inviter: %w", err)
|
||||
}
|
||||
affected, _ := res.RowsAffected()
|
||||
if affected == 0 {
|
||||
bound = false
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, err = txClient.ExecContext(txCtx,
|
||||
"UPDATE user_affiliates SET aff_count = aff_count + 1, updated_at = NOW() WHERE user_id = $1",
|
||||
inviterID,
|
||||
); err != nil {
|
||||
return fmt.Errorf("increment inviter aff_count: %w", err)
|
||||
}
|
||||
bound = true
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return bound, nil
|
||||
}
|
||||
|
||||
func (r *affiliateRepository) AccrueQuota(ctx context.Context, inviterID, inviteeUserID int64, amount float64, freezeHours int) (bool, error) {
|
||||
if amount <= 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
var applied bool
|
||||
err := r.withTx(ctx, func(txCtx context.Context, txClient *dbent.Client) error {
|
||||
// freezeHours > 0: add to frozen quota; == 0: add to available quota directly
|
||||
var updateSQL string
|
||||
if freezeHours > 0 {
|
||||
updateSQL = "UPDATE user_affiliates SET aff_frozen_quota = aff_frozen_quota + $1, aff_history_quota = aff_history_quota + $1, updated_at = NOW() WHERE user_id = $2"
|
||||
} else {
|
||||
updateSQL = "UPDATE user_affiliates SET aff_quota = aff_quota + $1, aff_history_quota = aff_history_quota + $1, updated_at = NOW() WHERE user_id = $2"
|
||||
}
|
||||
res, err := txClient.ExecContext(txCtx, updateSQL, amount, inviterID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
affected, _ := res.RowsAffected()
|
||||
if affected == 0 {
|
||||
applied = false
|
||||
return nil
|
||||
}
|
||||
|
||||
if freezeHours > 0 {
|
||||
if _, err = txClient.ExecContext(txCtx, `
|
||||
INSERT INTO user_affiliate_ledger (user_id, action, amount, source_user_id, frozen_until, created_at, updated_at)
|
||||
VALUES ($1, 'accrue', $2, $3, NOW() + make_interval(hours => $4), NOW(), NOW())`,
|
||||
inviterID, amount, inviteeUserID, freezeHours); err != nil {
|
||||
return fmt.Errorf("insert affiliate accrue ledger: %w", err)
|
||||
}
|
||||
} else {
|
||||
if _, err = txClient.ExecContext(txCtx, `
|
||||
INSERT INTO user_affiliate_ledger (user_id, action, amount, source_user_id, created_at, updated_at)
|
||||
VALUES ($1, 'accrue', $2, $3, NOW(), NOW())`, inviterID, amount, inviteeUserID); err != nil {
|
||||
return fmt.Errorf("insert affiliate accrue ledger: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
applied = true
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return applied, nil
|
||||
}
|
||||
|
||||
func (r *affiliateRepository) GetAccruedRebateFromInvitee(ctx context.Context, inviterID, inviteeUserID int64) (float64, error) {
|
||||
client := clientFromContext(ctx, r.client)
|
||||
rows, err := client.QueryContext(ctx,
|
||||
`SELECT COALESCE(SUM(amount), 0)::double precision FROM user_affiliate_ledger WHERE user_id = $1 AND source_user_id = $2 AND action = 'accrue'`,
|
||||
inviterID, inviteeUserID)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("query accrued rebate from invitee: %w", err)
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
var total float64
|
||||
if rows.Next() {
|
||||
if err := rows.Scan(&total); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return total, rows.Close()
|
||||
}
|
||||
|
||||
func (r *affiliateRepository) ThawFrozenQuota(ctx context.Context, userID int64) (float64, error) {
|
||||
var thawed float64
|
||||
err := r.withTx(ctx, func(txCtx context.Context, txClient *dbent.Client) error {
|
||||
var err error
|
||||
thawed, err = thawFrozenQuotaTx(txCtx, txClient, userID)
|
||||
return err
|
||||
})
|
||||
return thawed, err
|
||||
}
|
||||
|
||||
// thawFrozenQuotaTx moves matured frozen quota to available quota within an existing tx.
|
||||
func thawFrozenQuotaTx(txCtx context.Context, txClient *dbent.Client, userID int64) (float64, error) {
|
||||
rows, err := txClient.QueryContext(txCtx, `
|
||||
WITH matured AS (
|
||||
UPDATE user_affiliate_ledger
|
||||
SET frozen_until = NULL, updated_at = NOW()
|
||||
WHERE user_id = $1
|
||||
AND frozen_until IS NOT NULL
|
||||
AND frozen_until <= NOW()
|
||||
RETURNING amount
|
||||
)
|
||||
SELECT COALESCE(SUM(amount), 0) FROM matured`, userID)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("thaw frozen quota: %w", err)
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
var thawed float64
|
||||
if rows.Next() {
|
||||
if err := rows.Scan(&thawed); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
if err := rows.Close(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if thawed <= 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
_, err = txClient.ExecContext(txCtx, `
|
||||
UPDATE user_affiliates
|
||||
SET aff_quota = aff_quota + $1,
|
||||
aff_frozen_quota = GREATEST(aff_frozen_quota - $1, 0),
|
||||
updated_at = NOW()
|
||||
WHERE user_id = $2`, thawed, userID)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("move thawed quota: %w", err)
|
||||
}
|
||||
return thawed, nil
|
||||
}
|
||||
|
||||
func (r *affiliateRepository) TransferQuotaToBalance(ctx context.Context, userID int64) (float64, float64, error) {
|
||||
var transferred float64
|
||||
var newBalance float64
|
||||
|
||||
err := r.withTx(ctx, func(txCtx context.Context, txClient *dbent.Client) error {
|
||||
if _, err := ensureUserAffiliateWithClient(txCtx, txClient, userID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Thaw any matured frozen quota before transfer.
|
||||
if _, err := thawFrozenQuotaTx(txCtx, txClient, userID); err != nil {
|
||||
return fmt.Errorf("thaw before transfer: %w", err)
|
||||
}
|
||||
|
||||
rows, err := txClient.QueryContext(txCtx, `
|
||||
WITH claimed AS (
|
||||
SELECT aff_quota::double precision AS amount
|
||||
FROM user_affiliates
|
||||
WHERE user_id = $1
|
||||
AND aff_quota > 0
|
||||
FOR UPDATE
|
||||
),
|
||||
cleared AS (
|
||||
UPDATE user_affiliates ua
|
||||
SET aff_quota = 0,
|
||||
updated_at = NOW()
|
||||
FROM claimed c
|
||||
WHERE ua.user_id = $1
|
||||
RETURNING c.amount
|
||||
)
|
||||
SELECT amount
|
||||
FROM cleared`, userID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("claim affiliate quota: %w", err)
|
||||
}
|
||||
|
||||
if !rows.Next() {
|
||||
_ = rows.Close()
|
||||
if err := rows.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
return service.ErrAffiliateQuotaEmpty
|
||||
}
|
||||
if err := rows.Scan(&transferred); err != nil {
|
||||
_ = rows.Close()
|
||||
return err
|
||||
}
|
||||
if err := rows.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if transferred <= 0 {
|
||||
return service.ErrAffiliateQuotaEmpty
|
||||
}
|
||||
|
||||
affected, err := txClient.User.Update().
|
||||
Where(user.IDEQ(userID)).
|
||||
AddBalance(transferred).
|
||||
AddTotalRecharged(transferred).
|
||||
Save(txCtx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("credit user balance by affiliate quota: %w", err)
|
||||
}
|
||||
if affected == 0 {
|
||||
return service.ErrUserNotFound
|
||||
}
|
||||
|
||||
newBalance, err = queryUserBalance(txCtx, txClient, userID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = txClient.ExecContext(txCtx, `
|
||||
INSERT INTO user_affiliate_ledger (user_id, action, amount, source_user_id, created_at, updated_at)
|
||||
VALUES ($1, 'transfer', $2, NULL, NOW(), NOW())`, userID, transferred); err != nil {
|
||||
return fmt.Errorf("insert affiliate transfer ledger: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
return transferred, newBalance, nil
|
||||
}
|
||||
|
||||
func (r *affiliateRepository) ListInvitees(ctx context.Context, inviterID int64, limit int) ([]service.AffiliateInvitee, error) {
|
||||
if limit <= 0 {
|
||||
limit = 100
|
||||
}
|
||||
client := clientFromContext(ctx, r.client)
|
||||
rows, err := client.QueryContext(ctx, `
|
||||
SELECT ua.user_id,
|
||||
COALESCE(u.email, ''),
|
||||
COALESCE(u.username, ''),
|
||||
ua.created_at,
|
||||
COALESCE(SUM(ual.amount), 0)::double precision AS total_rebate
|
||||
FROM user_affiliates ua
|
||||
LEFT JOIN users u ON u.id = ua.user_id
|
||||
LEFT JOIN user_affiliate_ledger ual
|
||||
ON ual.user_id = $1
|
||||
AND ual.source_user_id = ua.user_id
|
||||
AND ual.action = 'accrue'
|
||||
WHERE ua.inviter_id = $1
|
||||
GROUP BY ua.user_id, u.email, u.username, ua.created_at
|
||||
ORDER BY ua.created_at DESC
|
||||
LIMIT $2`, inviterID, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
invitees := make([]service.AffiliateInvitee, 0)
|
||||
for rows.Next() {
|
||||
var item service.AffiliateInvitee
|
||||
var createdAt time.Time
|
||||
if err := rows.Scan(&item.UserID, &item.Email, &item.Username, &createdAt, &item.TotalRebate); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
item.CreatedAt = &createdAt
|
||||
invitees = append(invitees, item)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return invitees, nil
|
||||
}
|
||||
|
||||
func (r *affiliateRepository) withTx(ctx context.Context, fn func(txCtx context.Context, txClient *dbent.Client) error) error {
|
||||
if tx := dbent.TxFromContext(ctx); tx != nil {
|
||||
return fn(ctx, tx.Client())
|
||||
}
|
||||
|
||||
tx, err := r.client.Tx(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("begin affiliate transaction: %w", err)
|
||||
}
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
|
||||
txCtx := dbent.NewTxContext(ctx, tx)
|
||||
if err := fn(txCtx, tx.Client()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return fmt.Errorf("commit affiliate transaction: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ensureUserAffiliateWithClient(ctx context.Context, client affiliateQueryExecer, userID int64) (*service.AffiliateSummary, error) {
|
||||
summary, err := queryAffiliateByUserID(ctx, client, userID)
|
||||
if err == nil {
|
||||
return summary, nil
|
||||
}
|
||||
if !errors.Is(err, service.ErrAffiliateProfileNotFound) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := 0; i < affiliateCodeMaxAttempts; i++ {
|
||||
code, codeErr := generateAffiliateCode()
|
||||
if codeErr != nil {
|
||||
return nil, codeErr
|
||||
}
|
||||
_, insertErr := client.ExecContext(ctx, `
|
||||
INSERT INTO user_affiliates (user_id, aff_code, created_at, updated_at)
|
||||
VALUES ($1, $2, NOW(), NOW())
|
||||
ON CONFLICT (user_id) DO NOTHING`, userID, code)
|
||||
if insertErr == nil {
|
||||
break
|
||||
}
|
||||
if isAffiliateUniqueViolation(insertErr) {
|
||||
continue
|
||||
}
|
||||
return nil, insertErr
|
||||
}
|
||||
|
||||
return queryAffiliateByUserID(ctx, client, userID)
|
||||
}
|
||||
|
||||
func queryAffiliateByUserID(ctx context.Context, client affiliateQueryExecer, userID int64) (*service.AffiliateSummary, error) {
|
||||
rows, err := client.QueryContext(ctx, `
|
||||
SELECT user_id,
|
||||
aff_code,
|
||||
aff_code_custom,
|
||||
aff_rebate_rate_percent,
|
||||
inviter_id,
|
||||
aff_count,
|
||||
aff_quota::double precision,
|
||||
aff_frozen_quota::double precision,
|
||||
aff_history_quota::double precision,
|
||||
created_at,
|
||||
updated_at
|
||||
FROM user_affiliates
|
||||
WHERE user_id = $1`, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
if !rows.Next() {
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, service.ErrAffiliateProfileNotFound
|
||||
}
|
||||
|
||||
var out service.AffiliateSummary
|
||||
var inviterID sql.NullInt64
|
||||
var rebateRate sql.NullFloat64
|
||||
if err := rows.Scan(
|
||||
&out.UserID,
|
||||
&out.AffCode,
|
||||
&out.AffCodeCustom,
|
||||
&rebateRate,
|
||||
&inviterID,
|
||||
&out.AffCount,
|
||||
&out.AffQuota,
|
||||
&out.AffFrozenQuota,
|
||||
&out.AffHistoryQuota,
|
||||
&out.CreatedAt,
|
||||
&out.UpdatedAt,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if inviterID.Valid {
|
||||
out.InviterID = &inviterID.Int64
|
||||
}
|
||||
if rebateRate.Valid {
|
||||
v := rebateRate.Float64
|
||||
out.AffRebateRatePercent = &v
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func queryAffiliateByCode(ctx context.Context, client affiliateQueryExecer, code string) (*service.AffiliateSummary, error) {
|
||||
rows, err := client.QueryContext(ctx, `
|
||||
SELECT user_id,
|
||||
aff_code,
|
||||
aff_code_custom,
|
||||
aff_rebate_rate_percent,
|
||||
inviter_id,
|
||||
aff_count,
|
||||
aff_quota::double precision,
|
||||
aff_frozen_quota::double precision,
|
||||
aff_history_quota::double precision,
|
||||
created_at,
|
||||
updated_at
|
||||
FROM user_affiliates
|
||||
WHERE aff_code = $1
|
||||
LIMIT 1`, strings.ToUpper(strings.TrimSpace(code)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
if !rows.Next() {
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, service.ErrAffiliateProfileNotFound
|
||||
}
|
||||
|
||||
var out service.AffiliateSummary
|
||||
var inviterID sql.NullInt64
|
||||
var rebateRate sql.NullFloat64
|
||||
if err := rows.Scan(
|
||||
&out.UserID,
|
||||
&out.AffCode,
|
||||
&out.AffCodeCustom,
|
||||
&rebateRate,
|
||||
&inviterID,
|
||||
&out.AffCount,
|
||||
&out.AffQuota,
|
||||
&out.AffFrozenQuota,
|
||||
&out.AffHistoryQuota,
|
||||
&out.CreatedAt,
|
||||
&out.UpdatedAt,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if inviterID.Valid {
|
||||
out.InviterID = &inviterID.Int64
|
||||
}
|
||||
if rebateRate.Valid {
|
||||
v := rebateRate.Float64
|
||||
out.AffRebateRatePercent = &v
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func queryUserBalance(ctx context.Context, client affiliateQueryExecer, userID int64) (float64, error) {
|
||||
rows, err := client.QueryContext(ctx,
|
||||
"SELECT balance::double precision FROM users WHERE id = $1 LIMIT 1",
|
||||
userID,
|
||||
)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
if !rows.Next() {
|
||||
if err := rows.Err(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return 0, service.ErrUserNotFound
|
||||
}
|
||||
var balance float64
|
||||
if err := rows.Scan(&balance); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return balance, nil
|
||||
}
|
||||
|
||||
func generateAffiliateCode() (string, error) {
|
||||
buf := make([]byte, affiliateCodeLength)
|
||||
if _, err := rand.Read(buf); err != nil {
|
||||
return "", fmt.Errorf("generate affiliate code: %w", err)
|
||||
}
|
||||
for i := range buf {
|
||||
buf[i] = affiliateCodeCharset[int(buf[i])%len(affiliateCodeCharset)]
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
func isAffiliateUniqueViolation(err error) bool {
|
||||
var pqErr *pq.Error
|
||||
if errors.As(err, &pqErr) {
|
||||
return string(pqErr.Code) == "23505"
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// UpdateUserAffCode 改写用户的邀请码(自定义专属邀请码)。
|
||||
// 唯一性冲突返回 ErrAffiliateCodeTaken。
|
||||
func (r *affiliateRepository) UpdateUserAffCode(ctx context.Context, userID int64, newCode string) error {
|
||||
if userID <= 0 {
|
||||
return service.ErrUserNotFound
|
||||
}
|
||||
code := strings.ToUpper(strings.TrimSpace(newCode))
|
||||
if code == "" {
|
||||
return service.ErrAffiliateCodeInvalid
|
||||
}
|
||||
|
||||
return r.withTx(ctx, func(txCtx context.Context, txClient *dbent.Client) error {
|
||||
if _, err := ensureUserAffiliateWithClient(txCtx, txClient, userID); err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := txClient.ExecContext(txCtx, `
|
||||
UPDATE user_affiliates
|
||||
SET aff_code = $1,
|
||||
aff_code_custom = true,
|
||||
updated_at = NOW()
|
||||
WHERE user_id = $2`, code, userID)
|
||||
if err != nil {
|
||||
if isAffiliateUniqueViolation(err) {
|
||||
return service.ErrAffiliateCodeTaken
|
||||
}
|
||||
return fmt.Errorf("update aff_code: %w", err)
|
||||
}
|
||||
affected, _ := res.RowsAffected()
|
||||
if affected == 0 {
|
||||
return service.ErrUserNotFound
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// ResetUserAffCode 把 aff_code 还原为系统随机码,并清除 aff_code_custom 标记。
|
||||
func (r *affiliateRepository) ResetUserAffCode(ctx context.Context, userID int64) (string, error) {
|
||||
if userID <= 0 {
|
||||
return "", service.ErrUserNotFound
|
||||
}
|
||||
var newCode string
|
||||
err := r.withTx(ctx, func(txCtx context.Context, txClient *dbent.Client) error {
|
||||
if _, err := ensureUserAffiliateWithClient(txCtx, txClient, userID); err != nil {
|
||||
return err
|
||||
}
|
||||
for i := 0; i < affiliateCodeMaxAttempts; i++ {
|
||||
candidate, codeErr := generateAffiliateCode()
|
||||
if codeErr != nil {
|
||||
return codeErr
|
||||
}
|
||||
res, err := txClient.ExecContext(txCtx, `
|
||||
UPDATE user_affiliates
|
||||
SET aff_code = $1,
|
||||
aff_code_custom = false,
|
||||
updated_at = NOW()
|
||||
WHERE user_id = $2`, candidate, userID)
|
||||
if err != nil {
|
||||
if isAffiliateUniqueViolation(err) {
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("reset aff_code: %w", err)
|
||||
}
|
||||
affected, _ := res.RowsAffected()
|
||||
if affected == 0 {
|
||||
return service.ErrUserNotFound
|
||||
}
|
||||
newCode = candidate
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("reset aff_code: exhausted attempts")
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return newCode, nil
|
||||
}
|
||||
|
||||
// SetUserRebateRate 设置或清除用户专属返利比例。ratePercent==nil 表示清除(沿用全局)。
|
||||
func (r *affiliateRepository) SetUserRebateRate(ctx context.Context, userID int64, ratePercent *float64) error {
|
||||
if userID <= 0 {
|
||||
return service.ErrUserNotFound
|
||||
}
|
||||
return r.withTx(ctx, func(txCtx context.Context, txClient *dbent.Client) error {
|
||||
if _, err := ensureUserAffiliateWithClient(txCtx, txClient, userID); err != nil {
|
||||
return err
|
||||
}
|
||||
// nullableArg lets us use a single UPDATE for both "set value" and
|
||||
// "clear" cases — database/sql converts nil interface{} to SQL NULL.
|
||||
res, err := txClient.ExecContext(txCtx, `
|
||||
UPDATE user_affiliates
|
||||
SET aff_rebate_rate_percent = $1,
|
||||
updated_at = NOW()
|
||||
WHERE user_id = $2`, nullableArg(ratePercent), userID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("set aff_rebate_rate_percent: %w", err)
|
||||
}
|
||||
affected, _ := res.RowsAffected()
|
||||
if affected == 0 {
|
||||
return service.ErrUserNotFound
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// BatchSetUserRebateRate 批量为多个用户设置专属比例(nil 清除)。
|
||||
func (r *affiliateRepository) BatchSetUserRebateRate(ctx context.Context, userIDs []int64, ratePercent *float64) error {
|
||||
if len(userIDs) == 0 {
|
||||
return nil
|
||||
}
|
||||
return r.withTx(ctx, func(txCtx context.Context, txClient *dbent.Client) error {
|
||||
for _, uid := range userIDs {
|
||||
if uid <= 0 {
|
||||
continue
|
||||
}
|
||||
if _, err := ensureUserAffiliateWithClient(txCtx, txClient, uid); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
_, err := txClient.ExecContext(txCtx, `
|
||||
UPDATE user_affiliates
|
||||
SET aff_rebate_rate_percent = $1,
|
||||
updated_at = NOW()
|
||||
WHERE user_id = ANY($2)`, nullableArg(ratePercent), pq.Array(userIDs))
|
||||
if err != nil {
|
||||
return fmt.Errorf("batch set aff_rebate_rate_percent: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// nullableArg unwraps a *float64 into an interface{} suitable for SQL parameter
|
||||
// binding: nil pointer → SQL NULL, non-nil → the float value.
|
||||
func nullableArg(v *float64) any {
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
return *v
|
||||
}
|
||||
|
||||
// ListUsersWithCustomSettings 列出有专属配置(自定义码或专属比例)的用户。
|
||||
//
|
||||
// 单一查询同时处理"无搜索"与"按邮箱/用户名模糊搜索":
|
||||
// 空 search 时拼接出的 LIKE 模式为 "%%",匹配所有行;非空时按 ILIKE 子串匹配。
|
||||
// 这避免了为两种情况维护两份 SQL 模板。
|
||||
func (r *affiliateRepository) ListUsersWithCustomSettings(ctx context.Context, filter service.AffiliateAdminFilter) ([]service.AffiliateAdminEntry, int64, error) {
|
||||
page := filter.Page
|
||||
if page < 1 {
|
||||
page = 1
|
||||
}
|
||||
pageSize := filter.PageSize
|
||||
if pageSize <= 0 || pageSize > 200 {
|
||||
pageSize = 20
|
||||
}
|
||||
offset := (page - 1) * pageSize
|
||||
likePattern := "%" + strings.TrimSpace(filter.Search) + "%"
|
||||
|
||||
const baseFrom = `
|
||||
FROM user_affiliates ua
|
||||
JOIN users u ON u.id = ua.user_id
|
||||
WHERE (ua.aff_code_custom = true OR ua.aff_rebate_rate_percent IS NOT NULL)
|
||||
AND (u.email ILIKE $1 OR u.username ILIKE $1)`
|
||||
|
||||
client := clientFromContext(ctx, r.client)
|
||||
|
||||
total, err := scanInt64(ctx, client, "SELECT COUNT(*)"+baseFrom, likePattern)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("count affiliate admin entries: %w", err)
|
||||
}
|
||||
|
||||
listQuery := `
|
||||
SELECT ua.user_id,
|
||||
COALESCE(u.email, ''),
|
||||
COALESCE(u.username, ''),
|
||||
ua.aff_code,
|
||||
ua.aff_code_custom,
|
||||
ua.aff_rebate_rate_percent,
|
||||
ua.aff_count` + baseFrom + `
|
||||
ORDER BY ua.updated_at DESC
|
||||
LIMIT $2 OFFSET $3`
|
||||
|
||||
rows, err := client.QueryContext(ctx, listQuery, likePattern, pageSize, offset)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("list affiliate admin entries: %w", err)
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
entries := make([]service.AffiliateAdminEntry, 0)
|
||||
for rows.Next() {
|
||||
var e service.AffiliateAdminEntry
|
||||
var rebate sql.NullFloat64
|
||||
if err := rows.Scan(&e.UserID, &e.Email, &e.Username, &e.AffCode,
|
||||
&e.AffCodeCustom, &rebate, &e.AffCount); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if rebate.Valid {
|
||||
v := rebate.Float64
|
||||
e.AffRebateRatePercent = &v
|
||||
}
|
||||
entries = append(entries, e)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
return entries, total, nil
|
||||
}
|
||||
|
||||
// scanInt64 runs a query expected to return a single int64 column (e.g. COUNT).
|
||||
func scanInt64(ctx context.Context, client affiliateQueryExecer, query string, args ...any) (int64, error) {
|
||||
rows, err := client.QueryContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
if !rows.Next() {
|
||||
if err := rows.Err(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
var v int64
|
||||
if err := rows.Scan(&v); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
399
backend/internal/repository/affiliate_repo_integration_test.go
Normal file
399
backend/internal/repository/affiliate_repo_integration_test.go
Normal file
@@ -0,0 +1,399 @@
|
||||
//go:build integration
|
||||
|
||||
package repository
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
dbent "github.com/Wei-Shaw/sub2api/ent"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func querySingleFloat(t *testing.T, ctx context.Context, client *dbent.Client, query string, args ...any) float64 {
|
||||
t.Helper()
|
||||
rows, err := client.QueryContext(ctx, query, args...)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
require.True(t, rows.Next(), "expected one row")
|
||||
var value float64
|
||||
require.NoError(t, rows.Scan(&value))
|
||||
require.NoError(t, rows.Err())
|
||||
return value
|
||||
}
|
||||
|
||||
func querySingleInt(t *testing.T, ctx context.Context, client *dbent.Client, query string, args ...any) int {
|
||||
t.Helper()
|
||||
rows, err := client.QueryContext(ctx, query, args...)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
require.True(t, rows.Next(), "expected one row")
|
||||
var value int
|
||||
require.NoError(t, rows.Scan(&value))
|
||||
require.NoError(t, rows.Err())
|
||||
return value
|
||||
}
|
||||
|
||||
func TestAffiliateRepository_TransferQuotaToBalance_UsesClaimedQuotaBeforeClear(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
tx := testEntTx(t)
|
||||
txCtx := dbent.NewTxContext(ctx, tx)
|
||||
client := tx.Client()
|
||||
|
||||
repo := NewAffiliateRepository(client, integrationDB)
|
||||
|
||||
u := mustCreateUser(t, client, &service.User{
|
||||
Email: fmt.Sprintf("affiliate-transfer-%d@example.com", time.Now().UnixNano()),
|
||||
PasswordHash: "hash",
|
||||
Role: service.RoleUser,
|
||||
Status: service.StatusActive,
|
||||
Balance: 5.5,
|
||||
Concurrency: 5,
|
||||
})
|
||||
|
||||
affCode := fmt.Sprintf("AFF%09d", time.Now().UnixNano()%1_000_000_000)
|
||||
_, err := client.ExecContext(txCtx, `
|
||||
INSERT INTO user_affiliates (user_id, aff_code, aff_quota, aff_history_quota, created_at, updated_at)
|
||||
VALUES ($1, $2, $3, $3, NOW(), NOW())`, u.ID, affCode, 12.34)
|
||||
require.NoError(t, err)
|
||||
|
||||
transferred, balance, err := repo.TransferQuotaToBalance(txCtx, u.ID)
|
||||
require.NoError(t, err)
|
||||
require.InDelta(t, 12.34, transferred, 1e-9)
|
||||
require.InDelta(t, 17.84, balance, 1e-9)
|
||||
|
||||
affQuota := querySingleFloat(t, txCtx, client,
|
||||
"SELECT aff_quota::double precision FROM user_affiliates WHERE user_id = $1", u.ID)
|
||||
require.InDelta(t, 0.0, affQuota, 1e-9)
|
||||
|
||||
persistedBalance := querySingleFloat(t, txCtx, client,
|
||||
"SELECT balance::double precision FROM users WHERE id = $1", u.ID)
|
||||
require.InDelta(t, 17.84, persistedBalance, 1e-9)
|
||||
|
||||
ledgerCount := querySingleInt(t, txCtx, client,
|
||||
"SELECT COUNT(*) FROM user_affiliate_ledger WHERE user_id = $1 AND action = 'transfer'", u.ID)
|
||||
require.Equal(t, 1, ledgerCount)
|
||||
}
|
||||
|
||||
// TestAffiliateRepository_AccrueQuota_ReusesOuterTransaction guards the
|
||||
// cross-layer tx propagation invariant: when AccrueQuota is called with a ctx
|
||||
// that already carries a transaction (via dbent.NewTxContext), repo.withTx
|
||||
// must reuse that tx rather than opening a nested one. If this invariant
|
||||
// breaks, AccrueQuota would commit independently and survive a rollback of
|
||||
// the outer tx, which would violate payment_fulfillment's all-or-nothing
|
||||
// semantics.
|
||||
func TestAffiliateRepository_AccrueQuota_ReusesOuterTransaction(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
outerTx, err := integrationEntClient.Tx(ctx)
|
||||
require.NoError(t, err, "begin outer tx")
|
||||
// Defensive cleanup: if any require.* below fires before the explicit
|
||||
// Rollback, this prevents the tx from leaking until container teardown.
|
||||
// Rollback is idempotent at the driver level (extra rollback returns an
|
||||
// error we ignore).
|
||||
t.Cleanup(func() { _ = outerTx.Rollback() })
|
||||
client := outerTx.Client()
|
||||
txCtx := dbent.NewTxContext(ctx, outerTx)
|
||||
|
||||
inviter := mustCreateUser(t, client, &service.User{
|
||||
Email: fmt.Sprintf("affiliate-inviter-%d@example.com", time.Now().UnixNano()),
|
||||
PasswordHash: "hash",
|
||||
Role: service.RoleUser,
|
||||
Status: service.StatusActive,
|
||||
Concurrency: 5,
|
||||
})
|
||||
invitee := mustCreateUser(t, client, &service.User{
|
||||
Email: fmt.Sprintf("affiliate-invitee-%d@example.com", time.Now().UnixNano()+1),
|
||||
PasswordHash: "hash",
|
||||
Role: service.RoleUser,
|
||||
Status: service.StatusActive,
|
||||
Concurrency: 5,
|
||||
})
|
||||
|
||||
repo := NewAffiliateRepository(client, integrationDB)
|
||||
_, err = repo.EnsureUserAffiliate(txCtx, inviter.ID)
|
||||
require.NoError(t, err)
|
||||
_, err = repo.EnsureUserAffiliate(txCtx, invitee.ID)
|
||||
require.NoError(t, err)
|
||||
|
||||
bound, err := repo.BindInviter(txCtx, invitee.ID, inviter.ID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, bound, "invitee must bind to inviter")
|
||||
|
||||
applied, err := repo.AccrueQuota(txCtx, inviter.ID, invitee.ID, 3.5, 0)
|
||||
require.NoError(t, err)
|
||||
require.True(t, applied, "AccrueQuota must report applied=true")
|
||||
|
||||
// Visible inside the outer tx.
|
||||
innerQuota := querySingleFloat(t, txCtx, client,
|
||||
"SELECT aff_quota::double precision FROM user_affiliates WHERE user_id = $1", inviter.ID)
|
||||
require.InDelta(t, 3.5, innerQuota, 1e-9)
|
||||
|
||||
// Roll back the outer tx; if AccrueQuota had opened its own inner tx and
|
||||
// committed it, the rows would still be visible to the global client.
|
||||
require.NoError(t, outerTx.Rollback())
|
||||
|
||||
rows, err := integrationEntClient.QueryContext(ctx,
|
||||
"SELECT COUNT(*) FROM user_affiliates WHERE user_id IN ($1, $2)",
|
||||
inviter.ID, invitee.ID)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = rows.Close() }()
|
||||
require.True(t, rows.Next())
|
||||
var postRollbackCount int
|
||||
require.NoError(t, rows.Scan(&postRollbackCount))
|
||||
require.Equal(t, 0, postRollbackCount,
|
||||
"AccrueQuota must propagate the outer tx — found persisted rows after rollback")
|
||||
}
|
||||
|
||||
func TestAffiliateRepository_TransferQuotaToBalance_EmptyQuota(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
tx := testEntTx(t)
|
||||
txCtx := dbent.NewTxContext(ctx, tx)
|
||||
client := tx.Client()
|
||||
|
||||
repo := NewAffiliateRepository(client, integrationDB)
|
||||
|
||||
u := mustCreateUser(t, client, &service.User{
|
||||
Email: fmt.Sprintf("affiliate-empty-%d@example.com", time.Now().UnixNano()),
|
||||
PasswordHash: "hash",
|
||||
Role: service.RoleUser,
|
||||
Status: service.StatusActive,
|
||||
Balance: 3.21,
|
||||
Concurrency: 5,
|
||||
})
|
||||
|
||||
affCode := fmt.Sprintf("AFF%09d", time.Now().UnixNano()%1_000_000_000)
|
||||
_, err := client.ExecContext(txCtx, `
|
||||
INSERT INTO user_affiliates (user_id, aff_code, aff_quota, aff_history_quota, created_at, updated_at)
|
||||
VALUES ($1, $2, 0, 0, NOW(), NOW())`, u.ID, affCode)
|
||||
require.NoError(t, err)
|
||||
|
||||
transferred, balance, err := repo.TransferQuotaToBalance(txCtx, u.ID)
|
||||
require.ErrorIs(t, err, service.ErrAffiliateQuotaEmpty)
|
||||
require.InDelta(t, 0.0, transferred, 1e-9)
|
||||
require.InDelta(t, 0.0, balance, 1e-9)
|
||||
|
||||
persistedBalance := querySingleFloat(t, txCtx, client,
|
||||
"SELECT balance::double precision FROM users WHERE id = $1", u.ID)
|
||||
require.InDelta(t, 3.21, persistedBalance, 1e-9)
|
||||
}
|
||||
|
||||
// TestAffiliateRepository_AdminCustomCode covers the success path of admin
|
||||
// invite-code rewrite + reset within a shared test transaction:
|
||||
// - UpdateUserAffCode replaces aff_code, sets aff_code_custom=true, lookup works
|
||||
// - the old code can no longer be found
|
||||
// - ResetUserAffCode reverts aff_code_custom and assigns a new system-format code
|
||||
//
|
||||
// The conflict path (duplicate code → ErrAffiliateCodeTaken) lives in its own
|
||||
// test because a unique-violation aborts the surrounding Postgres tx, which
|
||||
// would poison subsequent assertions in the same transaction.
|
||||
func TestAffiliateRepository_AdminCustomCode(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
tx := testEntTx(t)
|
||||
txCtx := dbent.NewTxContext(ctx, tx)
|
||||
client := tx.Client()
|
||||
|
||||
repo := NewAffiliateRepository(client, integrationDB)
|
||||
|
||||
u := mustCreateUser(t, client, &service.User{
|
||||
Email: fmt.Sprintf("affiliate-custom-%d@example.com", time.Now().UnixNano()),
|
||||
PasswordHash: "hash",
|
||||
Role: service.RoleUser,
|
||||
Status: service.StatusActive,
|
||||
})
|
||||
|
||||
original, err := repo.EnsureUserAffiliate(txCtx, u.ID)
|
||||
require.NoError(t, err)
|
||||
require.False(t, original.AffCodeCustom, "system-generated codes start as non-custom")
|
||||
originalCode := original.AffCode
|
||||
|
||||
// Rewrite to a custom code
|
||||
customCode := fmt.Sprintf("VIP%09d", time.Now().UnixNano()%1_000_000_000)
|
||||
require.NoError(t, repo.UpdateUserAffCode(txCtx, u.ID, customCode))
|
||||
|
||||
updated, err := repo.EnsureUserAffiliate(txCtx, u.ID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, customCode, updated.AffCode)
|
||||
require.True(t, updated.AffCodeCustom)
|
||||
|
||||
// Lookup by new custom code finds the user
|
||||
byCode, err := repo.GetAffiliateByCode(txCtx, customCode)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, u.ID, byCode.UserID)
|
||||
|
||||
// Old system code should no longer match
|
||||
_, err = repo.GetAffiliateByCode(txCtx, originalCode)
|
||||
require.ErrorIs(t, err, service.ErrAffiliateProfileNotFound)
|
||||
|
||||
// Reset back to a fresh system code, clears custom flag
|
||||
newSysCode, err := repo.ResetUserAffCode(txCtx, u.ID)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, customCode, newSysCode)
|
||||
|
||||
reset, err := repo.EnsureUserAffiliate(txCtx, u.ID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, newSysCode, reset.AffCode)
|
||||
require.False(t, reset.AffCodeCustom)
|
||||
|
||||
// The old custom code is now free again
|
||||
_, err = repo.GetAffiliateByCode(txCtx, customCode)
|
||||
require.ErrorIs(t, err, service.ErrAffiliateProfileNotFound)
|
||||
}
|
||||
|
||||
// TestAffiliateRepository_AdminCustomCode_Conflict isolates the unique-violation
|
||||
// path. PostgreSQL aborts the enclosing tx when a unique constraint fires, so
|
||||
// this test must be the only assertion and run in its own tx — production
|
||||
// callers each have their own outer tx, so this matches real behavior.
|
||||
func TestAffiliateRepository_AdminCustomCode_Conflict(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
tx := testEntTx(t)
|
||||
txCtx := dbent.NewTxContext(ctx, tx)
|
||||
client := tx.Client()
|
||||
|
||||
repo := NewAffiliateRepository(client, integrationDB)
|
||||
|
||||
taker := mustCreateUser(t, client, &service.User{
|
||||
Email: fmt.Sprintf("affiliate-conflict-taker-%d@example.com", time.Now().UnixNano()),
|
||||
PasswordHash: "hash",
|
||||
Role: service.RoleUser, Status: service.StatusActive,
|
||||
})
|
||||
requester := mustCreateUser(t, client, &service.User{
|
||||
Email: fmt.Sprintf("affiliate-conflict-req-%d@example.com", time.Now().UnixNano()),
|
||||
PasswordHash: "hash",
|
||||
Role: service.RoleUser, Status: service.StatusActive,
|
||||
})
|
||||
|
||||
takenCode := fmt.Sprintf("HOT%09d", time.Now().UnixNano()%1_000_000_000)
|
||||
require.NoError(t, repo.UpdateUserAffCode(txCtx, taker.ID, takenCode))
|
||||
|
||||
// Now requester tries to grab the same code → conflict.
|
||||
err := repo.UpdateUserAffCode(txCtx, requester.ID, takenCode)
|
||||
require.ErrorIs(t, err, service.ErrAffiliateCodeTaken)
|
||||
}
|
||||
|
||||
// TestAffiliateRepository_AdminRebateRate covers per-user exclusive rate
|
||||
// set/clear and the Batch variant including NULL semantics.
|
||||
func TestAffiliateRepository_AdminRebateRate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
tx := testEntTx(t)
|
||||
txCtx := dbent.NewTxContext(ctx, tx)
|
||||
client := tx.Client()
|
||||
|
||||
repo := NewAffiliateRepository(client, integrationDB)
|
||||
|
||||
u1 := mustCreateUser(t, client, &service.User{
|
||||
Email: fmt.Sprintf("affiliate-rate-%d-a@example.com", time.Now().UnixNano()),
|
||||
PasswordHash: "hash",
|
||||
Role: service.RoleUser,
|
||||
Status: service.StatusActive,
|
||||
})
|
||||
u2 := mustCreateUser(t, client, &service.User{
|
||||
Email: fmt.Sprintf("affiliate-rate-%d-b@example.com", time.Now().UnixNano()),
|
||||
PasswordHash: "hash",
|
||||
Role: service.RoleUser,
|
||||
Status: service.StatusActive,
|
||||
})
|
||||
|
||||
// Set exclusive rate for u1
|
||||
rate := 42.5
|
||||
require.NoError(t, repo.SetUserRebateRate(txCtx, u1.ID, &rate))
|
||||
|
||||
got, err := repo.EnsureUserAffiliate(txCtx, u1.ID)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, got.AffRebateRatePercent)
|
||||
require.InDelta(t, 42.5, *got.AffRebateRatePercent, 1e-9)
|
||||
|
||||
// Clear exclusive rate
|
||||
require.NoError(t, repo.SetUserRebateRate(txCtx, u1.ID, nil))
|
||||
cleared, err := repo.EnsureUserAffiliate(txCtx, u1.ID)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, cleared.AffRebateRatePercent)
|
||||
|
||||
// Batch set both users
|
||||
batchRate := 15.0
|
||||
require.NoError(t, repo.BatchSetUserRebateRate(txCtx, []int64{u1.ID, u2.ID}, &batchRate))
|
||||
|
||||
for _, uid := range []int64{u1.ID, u2.ID} {
|
||||
v, err := repo.EnsureUserAffiliate(txCtx, uid)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, v.AffRebateRatePercent)
|
||||
require.InDelta(t, 15.0, *v.AffRebateRatePercent, 1e-9)
|
||||
}
|
||||
|
||||
// Batch clear
|
||||
require.NoError(t, repo.BatchSetUserRebateRate(txCtx, []int64{u1.ID, u2.ID}, nil))
|
||||
for _, uid := range []int64{u1.ID, u2.ID} {
|
||||
v, err := repo.EnsureUserAffiliate(txCtx, uid)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, v.AffRebateRatePercent)
|
||||
}
|
||||
}
|
||||
|
||||
// TestAffiliateRepository_ListUsersWithCustomSettings verifies the admin list
|
||||
// only includes users with at least one override applied.
|
||||
func TestAffiliateRepository_ListUsersWithCustomSettings(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
tx := testEntTx(t)
|
||||
txCtx := dbent.NewTxContext(ctx, tx)
|
||||
client := tx.Client()
|
||||
|
||||
repo := NewAffiliateRepository(client, integrationDB)
|
||||
|
||||
// User without any custom config — should NOT appear in the list.
|
||||
plainEmail := fmt.Sprintf("affiliate-plain-%d@example.com", time.Now().UnixNano())
|
||||
uPlain := mustCreateUser(t, client, &service.User{
|
||||
Email: plainEmail, PasswordHash: "hash",
|
||||
Role: service.RoleUser, Status: service.StatusActive,
|
||||
})
|
||||
_, err := repo.EnsureUserAffiliate(txCtx, uPlain.ID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// User with a custom code — should appear.
|
||||
uCode := mustCreateUser(t, client, &service.User{
|
||||
Email: fmt.Sprintf("affiliate-codeonly-%d@example.com", time.Now().UnixNano()),
|
||||
PasswordHash: "hash",
|
||||
Role: service.RoleUser, Status: service.StatusActive,
|
||||
})
|
||||
require.NoError(t, repo.UpdateUserAffCode(txCtx, uCode.ID, fmt.Sprintf("VIP%09d", time.Now().UnixNano()%1_000_000_000)))
|
||||
|
||||
// User with only an exclusive rate — should appear.
|
||||
uRate := mustCreateUser(t, client, &service.User{
|
||||
Email: fmt.Sprintf("affiliate-rateonly-%d@example.com", time.Now().UnixNano()),
|
||||
PasswordHash: "hash",
|
||||
Role: service.RoleUser, Status: service.StatusActive,
|
||||
})
|
||||
r := 33.3
|
||||
require.NoError(t, repo.SetUserRebateRate(txCtx, uRate.ID, &r))
|
||||
|
||||
entries, total, err := repo.ListUsersWithCustomSettings(txCtx, service.AffiliateAdminFilter{
|
||||
Page: 1, PageSize: 100,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Build a quick lookup to assert per-user attributes (other tests may have
|
||||
// inserted custom rows in the same DB; we only care about our 3).
|
||||
byUserID := make(map[int64]service.AffiliateAdminEntry, len(entries))
|
||||
for _, e := range entries {
|
||||
byUserID[e.UserID] = e
|
||||
}
|
||||
|
||||
require.NotContains(t, byUserID, uPlain.ID, "users without overrides must not appear")
|
||||
|
||||
codeEntry, ok := byUserID[uCode.ID]
|
||||
require.True(t, ok, "custom-code user missing from list")
|
||||
require.True(t, codeEntry.AffCodeCustom)
|
||||
require.Nil(t, codeEntry.AffRebateRatePercent)
|
||||
|
||||
rateEntry, ok := byUserID[uRate.ID]
|
||||
require.True(t, ok, "custom-rate user missing from list")
|
||||
require.False(t, rateEntry.AffCodeCustom)
|
||||
require.NotNil(t, rateEntry.AffRebateRatePercent)
|
||||
require.InDelta(t, 33.3, *rateEntry.AffRebateRatePercent, 1e-9)
|
||||
|
||||
require.GreaterOrEqual(t, total, int64(2), "total must include at least our 2 custom rows")
|
||||
}
|
||||
755
backend/internal/repository/channel_monitor_repo.go
Normal file
755
backend/internal/repository/channel_monitor_repo.go
Normal file
@@ -0,0 +1,755 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
dbent "github.com/Wei-Shaw/sub2api/ent"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
"github.com/lib/pq"
|
||||
)
|
||||
|
||||
// channelMonitorRepository 实现 service.ChannelMonitorRepository。
|
||||
//
|
||||
// 选型说明:
|
||||
// - CRUD 走 ent,复用项目的事务上下文支持
|
||||
// - 聚合查询(latest per model / availability)走原生 SQL,避免 ent 在 GROUP BY 上
|
||||
// 的样板代码,并保证索引能被命中
|
||||
type channelMonitorRepository struct {
|
||||
client *dbent.Client
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
// NewChannelMonitorRepository 创建仓储实例。
|
||||
func NewChannelMonitorRepository(client *dbent.Client, db *sql.DB) service.ChannelMonitorRepository {
|
||||
return &channelMonitorRepository{client: client, db: db}
|
||||
}
|
||||
|
||||
// ---------- CRUD ----------
|
||||
|
||||
func (r *channelMonitorRepository) Create(ctx context.Context, m *service.ChannelMonitor) error {
|
||||
client := clientFromContext(ctx, r.client)
|
||||
builder := client.ChannelMonitor.Create().
|
||||
SetName(m.Name).
|
||||
SetProvider(channelmonitor.Provider(m.Provider)).
|
||||
SetEndpoint(m.Endpoint).
|
||||
SetAPIKeyEncrypted(m.APIKey). // 调用方传入的已是密文
|
||||
SetPrimaryModel(m.PrimaryModel).
|
||||
SetExtraModels(emptySliceIfNil(m.ExtraModels)).
|
||||
SetGroupName(m.GroupName).
|
||||
SetEnabled(m.Enabled).
|
||||
SetIntervalSeconds(m.IntervalSeconds).
|
||||
SetCreatedBy(m.CreatedBy).
|
||||
SetExtraHeaders(emptyHeadersIfNilRepo(m.ExtraHeaders)).
|
||||
SetBodyOverrideMode(defaultBodyModeRepo(m.BodyOverrideMode))
|
||||
if m.TemplateID != nil {
|
||||
builder = builder.SetTemplateID(*m.TemplateID)
|
||||
}
|
||||
if m.BodyOverride != nil {
|
||||
builder = builder.SetBodyOverride(m.BodyOverride)
|
||||
}
|
||||
|
||||
created, err := builder.Save(ctx)
|
||||
if err != nil {
|
||||
return translatePersistenceError(err, service.ErrChannelMonitorNotFound, nil)
|
||||
}
|
||||
m.ID = created.ID
|
||||
m.CreatedAt = created.CreatedAt
|
||||
m.UpdatedAt = created.UpdatedAt
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *channelMonitorRepository) GetByID(ctx context.Context, id int64) (*service.ChannelMonitor, error) {
|
||||
row, err := r.client.ChannelMonitor.Query().
|
||||
Where(channelmonitor.IDEQ(id)).
|
||||
Only(ctx)
|
||||
if err != nil {
|
||||
return nil, translatePersistenceError(err, service.ErrChannelMonitorNotFound, nil)
|
||||
}
|
||||
return entToServiceMonitor(row), nil
|
||||
}
|
||||
|
||||
func (r *channelMonitorRepository) Update(ctx context.Context, m *service.ChannelMonitor) error {
|
||||
client := clientFromContext(ctx, r.client)
|
||||
updater := client.ChannelMonitor.UpdateOneID(m.ID).
|
||||
SetName(m.Name).
|
||||
SetProvider(channelmonitor.Provider(m.Provider)).
|
||||
SetEndpoint(m.Endpoint).
|
||||
SetAPIKeyEncrypted(m.APIKey).
|
||||
SetPrimaryModel(m.PrimaryModel).
|
||||
SetExtraModels(emptySliceIfNil(m.ExtraModels)).
|
||||
SetGroupName(m.GroupName).
|
||||
SetEnabled(m.Enabled).
|
||||
SetIntervalSeconds(m.IntervalSeconds).
|
||||
SetExtraHeaders(emptyHeadersIfNilRepo(m.ExtraHeaders)).
|
||||
SetBodyOverrideMode(defaultBodyModeRepo(m.BodyOverrideMode))
|
||||
if m.TemplateID != nil {
|
||||
updater = updater.SetTemplateID(*m.TemplateID)
|
||||
} else {
|
||||
updater = updater.ClearTemplateID()
|
||||
}
|
||||
if m.BodyOverride != nil {
|
||||
updater = updater.SetBodyOverride(m.BodyOverride)
|
||||
} else {
|
||||
updater = updater.ClearBodyOverride()
|
||||
}
|
||||
|
||||
updated, err := updater.Save(ctx)
|
||||
if err != nil {
|
||||
return translatePersistenceError(err, service.ErrChannelMonitorNotFound, nil)
|
||||
}
|
||||
m.UpdatedAt = updated.UpdatedAt
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *channelMonitorRepository) Delete(ctx context.Context, id int64) error {
|
||||
client := clientFromContext(ctx, r.client)
|
||||
if err := client.ChannelMonitor.DeleteOneID(id).Exec(ctx); err != nil {
|
||||
return translatePersistenceError(err, service.ErrChannelMonitorNotFound, nil)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *channelMonitorRepository) List(ctx context.Context, params service.ChannelMonitorListParams) ([]*service.ChannelMonitor, int64, error) {
|
||||
q := r.client.ChannelMonitor.Query()
|
||||
if params.Provider != "" {
|
||||
q = q.Where(channelmonitor.ProviderEQ(channelmonitor.Provider(params.Provider)))
|
||||
}
|
||||
if params.Enabled != nil {
|
||||
q = q.Where(channelmonitor.EnabledEQ(*params.Enabled))
|
||||
}
|
||||
if s := strings.TrimSpace(params.Search); s != "" {
|
||||
q = q.Where(channelmonitor.Or(
|
||||
channelmonitor.NameContainsFold(s),
|
||||
channelmonitor.GroupNameContainsFold(s),
|
||||
channelmonitor.PrimaryModelContainsFold(s),
|
||||
))
|
||||
}
|
||||
|
||||
total, err := q.Count(ctx)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("count monitors: %w", err)
|
||||
}
|
||||
|
||||
pageSize := params.PageSize
|
||||
if pageSize <= 0 {
|
||||
pageSize = 20
|
||||
}
|
||||
page := params.Page
|
||||
if page <= 0 {
|
||||
page = 1
|
||||
}
|
||||
|
||||
rows, err := q.
|
||||
Order(dbent.Desc(channelmonitor.FieldID)).
|
||||
Offset((page - 1) * pageSize).
|
||||
Limit(pageSize).
|
||||
All(ctx)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("list monitors: %w", err)
|
||||
}
|
||||
|
||||
out := make([]*service.ChannelMonitor, 0, len(rows))
|
||||
for _, row := range rows {
|
||||
out = append(out, entToServiceMonitor(row))
|
||||
}
|
||||
return out, int64(total), nil
|
||||
}
|
||||
|
||||
// ---------- 调度器辅助 ----------
|
||||
|
||||
func (r *channelMonitorRepository) ListEnabled(ctx context.Context) ([]*service.ChannelMonitor, error) {
|
||||
rows, err := r.client.ChannelMonitor.Query().
|
||||
Where(channelmonitor.EnabledEQ(true)).
|
||||
All(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list enabled monitors: %w", err)
|
||||
}
|
||||
out := make([]*service.ChannelMonitor, 0, len(rows))
|
||||
for _, row := range rows {
|
||||
out = append(out, entToServiceMonitor(row))
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (r *channelMonitorRepository) MarkChecked(ctx context.Context, id int64, checkedAt time.Time) error {
|
||||
client := clientFromContext(ctx, r.client)
|
||||
if err := client.ChannelMonitor.UpdateOneID(id).
|
||||
SetLastCheckedAt(checkedAt).
|
||||
Exec(ctx); err != nil {
|
||||
return translatePersistenceError(err, service.ErrChannelMonitorNotFound, nil)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *channelMonitorRepository) InsertHistoryBatch(ctx context.Context, rows []*service.ChannelMonitorHistoryRow) error {
|
||||
if len(rows) == 0 {
|
||||
return nil
|
||||
}
|
||||
client := clientFromContext(ctx, r.client)
|
||||
bulk := make([]*dbent.ChannelMonitorHistoryCreate, 0, len(rows))
|
||||
for _, row := range rows {
|
||||
c := client.ChannelMonitorHistory.Create().
|
||||
SetMonitorID(row.MonitorID).
|
||||
SetModel(row.Model).
|
||||
SetStatus(channelmonitorhistory.Status(row.Status)).
|
||||
SetMessage(row.Message).
|
||||
SetCheckedAt(row.CheckedAt)
|
||||
if row.LatencyMs != nil {
|
||||
c = c.SetLatencyMs(*row.LatencyMs)
|
||||
}
|
||||
if row.PingLatencyMs != nil {
|
||||
c = c.SetPingLatencyMs(*row.PingLatencyMs)
|
||||
}
|
||||
bulk = append(bulk, c)
|
||||
}
|
||||
if _, err := client.ChannelMonitorHistory.CreateBulk(bulk...).Save(ctx); err != nil {
|
||||
return fmt.Errorf("insert history bulk: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteHistoryBefore 物理删 checked_at < before 的明细,分批 channelMonitorPruneBatchSize 行一批,
|
||||
// 避免单事务删除过多引起锁/WAL 压力。借助 (checked_at) 索引定位小批 id,再按 id 删。
|
||||
func (r *channelMonitorRepository) DeleteHistoryBefore(ctx context.Context, before time.Time) (int64, error) {
|
||||
return deleteChannelMonitorBatched(ctx, r.db, channelMonitorPruneHistorySQL, before)
|
||||
}
|
||||
|
||||
// ListHistory 按 checked_at 倒序返回某个监控的最近 N 条历史记录。
|
||||
// model 为空时不过滤;非空时只返回该模型的记录。
|
||||
func (r *channelMonitorRepository) ListHistory(ctx context.Context, monitorID int64, model string, limit int) ([]*service.ChannelMonitorHistoryEntry, error) {
|
||||
q := r.client.ChannelMonitorHistory.Query().
|
||||
Where(channelmonitorhistory.MonitorIDEQ(monitorID))
|
||||
if strings.TrimSpace(model) != "" {
|
||||
q = q.Where(channelmonitorhistory.ModelEQ(model))
|
||||
}
|
||||
rows, err := q.
|
||||
Order(dbent.Desc(channelmonitorhistory.FieldCheckedAt)).
|
||||
Limit(limit).
|
||||
All(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list history: %w", err)
|
||||
}
|
||||
out := make([]*service.ChannelMonitorHistoryEntry, 0, len(rows))
|
||||
for _, row := range rows {
|
||||
entry := &service.ChannelMonitorHistoryEntry{
|
||||
ID: row.ID,
|
||||
Model: row.Model,
|
||||
Status: string(row.Status),
|
||||
LatencyMs: row.LatencyMs,
|
||||
PingLatencyMs: row.PingLatencyMs,
|
||||
Message: row.Message,
|
||||
CheckedAt: row.CheckedAt,
|
||||
}
|
||||
out = append(out, entry)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// ---------- 用户视图聚合(原生 SQL) ----------
|
||||
|
||||
// ListLatestPerModel 用 DISTINCT ON 取每个 (monitor_id, model) 的最近一条记录。
|
||||
// 借助 (monitor_id, model, checked_at DESC) 索引可走 Index Scan。
|
||||
func (r *channelMonitorRepository) ListLatestPerModel(ctx context.Context, monitorID int64) ([]*service.ChannelMonitorLatest, error) {
|
||||
const q = `
|
||||
SELECT DISTINCT ON (model)
|
||||
model, status, latency_ms, ping_latency_ms, checked_at
|
||||
FROM channel_monitor_histories
|
||||
WHERE monitor_id = $1
|
||||
ORDER BY model, checked_at DESC
|
||||
`
|
||||
rows, err := r.db.QueryContext(ctx, q, monitorID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("query latest per model: %w", err)
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
out := make([]*service.ChannelMonitorLatest, 0)
|
||||
for rows.Next() {
|
||||
l := &service.ChannelMonitorLatest{}
|
||||
var latency, ping sql.NullInt64
|
||||
if err := rows.Scan(&l.Model, &l.Status, &latency, &ping, &l.CheckedAt); err != nil {
|
||||
return nil, fmt.Errorf("scan latest row: %w", err)
|
||||
}
|
||||
assignNullInt(&l.LatencyMs, latency)
|
||||
assignNullInt(&l.PingLatencyMs, ping)
|
||||
out = append(out, l)
|
||||
}
|
||||
return out, rows.Err()
|
||||
}
|
||||
|
||||
// assignNullInt 把 sql.NullInt64 解包到 *int 指针目标(valid 才分配新 int)。
|
||||
// 集中实现避免 latency / ping 两处重复 if latency.Valid { v := int(...) ... } 模板。
|
||||
func assignNullInt(dst **int, n sql.NullInt64) {
|
||||
if !n.Valid {
|
||||
return
|
||||
}
|
||||
v := int(n.Int64)
|
||||
*dst = &v
|
||||
}
|
||||
|
||||
// ComputeAvailability 计算指定窗口内每个模型的可用率与平均延迟。
|
||||
// "可用" = status IN (operational, degraded)。
|
||||
//
|
||||
// 数据来源:明细表只保留 1 天;窗口前其余天数走聚合表。
|
||||
// 明细保留 30 天(monitorHistoryRetentionDays),窗口 <= 30 天时直接扫 histories,
|
||||
// 精度到秒,避免与聚合表 UNION 带来的 UTC 日切精度损失。
|
||||
func (r *channelMonitorRepository) ComputeAvailability(ctx context.Context, monitorID int64, windowDays int) ([]*service.ChannelMonitorAvailability, error) {
|
||||
if windowDays <= 0 {
|
||||
windowDays = 7
|
||||
}
|
||||
const q = `
|
||||
SELECT model,
|
||||
COUNT(*) AS total,
|
||||
COUNT(*) FILTER (WHERE status IN ('operational','degraded')) AS ok,
|
||||
CASE WHEN COUNT(latency_ms) > 0
|
||||
THEN SUM(latency_ms) FILTER (WHERE latency_ms IS NOT NULL)::float8 / COUNT(latency_ms)
|
||||
ELSE NULL END AS avg_latency_ms
|
||||
FROM channel_monitor_histories
|
||||
WHERE monitor_id = $1
|
||||
AND checked_at >= NOW() - ($2::int || ' days')::interval
|
||||
GROUP BY model
|
||||
`
|
||||
rows, err := r.db.QueryContext(ctx, q, monitorID, windowDays)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("query availability: %w", err)
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
out := make([]*service.ChannelMonitorAvailability, 0)
|
||||
for rows.Next() {
|
||||
row, err := scanAvailabilityRow(rows, windowDays)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out = append(out, row)
|
||||
}
|
||||
return out, rows.Err()
|
||||
}
|
||||
|
||||
// scanAvailabilityRow 把单行 (model, total, ok, avg_latency) 扫描为 ChannelMonitorAvailability。
|
||||
// 仅服务于 ComputeAvailability(4 列);批量版本因为多一列 monitor_id 直接 inline 调 finalizeAvailabilityRow。
|
||||
func scanAvailabilityRow(rows interface{ Scan(...any) error }, windowDays int) (*service.ChannelMonitorAvailability, error) {
|
||||
row := &service.ChannelMonitorAvailability{WindowDays: windowDays}
|
||||
var avgLatency sql.NullFloat64
|
||||
if err := rows.Scan(&row.Model, &row.TotalChecks, &row.OperationalChecks, &avgLatency); err != nil {
|
||||
return nil, fmt.Errorf("scan availability row: %w", err)
|
||||
}
|
||||
finalizeAvailabilityRow(row, avgLatency)
|
||||
return row, nil
|
||||
}
|
||||
|
||||
// finalizeAvailabilityRow 根据 OperationalChecks/TotalChecks 算出可用率,
|
||||
// 并把 sql.NullFloat64 的平均延迟解包为 *int。两处复用避免维护漂移。
|
||||
func finalizeAvailabilityRow(row *service.ChannelMonitorAvailability, avgLatency sql.NullFloat64) {
|
||||
if row.TotalChecks > 0 {
|
||||
row.AvailabilityPct = float64(row.OperationalChecks) * 100.0 / float64(row.TotalChecks)
|
||||
}
|
||||
if avgLatency.Valid {
|
||||
v := int(avgLatency.Float64)
|
||||
row.AvgLatencyMs = &v
|
||||
}
|
||||
}
|
||||
|
||||
// ListLatestForMonitorIDs 一次性查询多个监控的"每个 (monitor_id, model) 最近一条"记录。
|
||||
// 利用 PG 的 DISTINCT ON 特性,借助 (monitor_id, model, checked_at DESC) 索引可走 Index Scan。
|
||||
func (r *channelMonitorRepository) ListLatestForMonitorIDs(ctx context.Context, ids []int64) (map[int64][]*service.ChannelMonitorLatest, error) {
|
||||
out := make(map[int64][]*service.ChannelMonitorLatest, len(ids))
|
||||
if len(ids) == 0 {
|
||||
return out, nil
|
||||
}
|
||||
const q = `
|
||||
SELECT DISTINCT ON (monitor_id, model)
|
||||
monitor_id, model, status, latency_ms, ping_latency_ms, checked_at
|
||||
FROM channel_monitor_histories
|
||||
WHERE monitor_id = ANY($1)
|
||||
ORDER BY monitor_id, model, checked_at DESC
|
||||
`
|
||||
rows, err := r.db.QueryContext(ctx, q, pq.Array(ids))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("query latest batch: %w", err)
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
for rows.Next() {
|
||||
var monitorID int64
|
||||
l := &service.ChannelMonitorLatest{}
|
||||
var latency, ping sql.NullInt64
|
||||
if err := rows.Scan(&monitorID, &l.Model, &l.Status, &latency, &ping, &l.CheckedAt); err != nil {
|
||||
return nil, fmt.Errorf("scan latest batch row: %w", err)
|
||||
}
|
||||
assignNullInt(&l.LatencyMs, latency)
|
||||
assignNullInt(&l.PingLatencyMs, ping)
|
||||
out[monitorID] = append(out[monitorID], l)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// ListRecentHistoryForMonitors 为多个 monitor 批量取各自"指定模型"最近 N 条历史(按 checked_at DESC,最新在前)。
|
||||
// primaryModels[monitorID] 指定该监控要过滤的模型名;monitor 不在 primaryModels 中的记录不返回。
|
||||
// 通过 CTE + unnest(两个 int8/text 数组) 构造 (monitor_id, model) 白名单,
|
||||
// 再用 ROW_NUMBER() OVER (PARTITION BY monitor_id) 取各自前 N 条。
|
||||
//
|
||||
// 返回值:map[monitorID] -> []*ChannelMonitorHistoryEntry(不含 message,减少网络开销)。
|
||||
// 空 ids / 空 primaryModels 返回空 map,不报错。
|
||||
func (r *channelMonitorRepository) ListRecentHistoryForMonitors(
|
||||
ctx context.Context,
|
||||
ids []int64,
|
||||
primaryModels map[int64]string,
|
||||
perMonitorLimit int,
|
||||
) (map[int64][]*service.ChannelMonitorHistoryEntry, error) {
|
||||
out := make(map[int64][]*service.ChannelMonitorHistoryEntry, len(ids))
|
||||
pairIDs, pairModels := buildMonitorModelPairs(ids, primaryModels)
|
||||
if len(pairIDs) == 0 {
|
||||
return out, nil
|
||||
}
|
||||
perMonitorLimit = clampTimelineLimit(perMonitorLimit)
|
||||
|
||||
const q = `
|
||||
WITH targets AS (
|
||||
SELECT unnest($1::bigint[]) AS monitor_id,
|
||||
unnest($2::text[]) AS model
|
||||
),
|
||||
ranked AS (
|
||||
SELECT h.monitor_id,
|
||||
h.status,
|
||||
h.latency_ms,
|
||||
h.ping_latency_ms,
|
||||
h.checked_at,
|
||||
ROW_NUMBER() OVER (PARTITION BY h.monitor_id ORDER BY h.checked_at DESC) AS rn
|
||||
FROM channel_monitor_histories h
|
||||
JOIN targets t
|
||||
ON t.monitor_id = h.monitor_id AND t.model = h.model
|
||||
)
|
||||
SELECT monitor_id, status, latency_ms, ping_latency_ms, checked_at
|
||||
FROM ranked
|
||||
WHERE rn <= $3
|
||||
ORDER BY monitor_id, checked_at DESC
|
||||
`
|
||||
rows, err := r.db.QueryContext(ctx, q, pq.Array(pairIDs), pq.Array(pairModels), perMonitorLimit)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("query recent history batch: %w", err)
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
for rows.Next() {
|
||||
var monitorID int64
|
||||
entry := &service.ChannelMonitorHistoryEntry{}
|
||||
var latency, ping sql.NullInt64
|
||||
if err := rows.Scan(&monitorID, &entry.Status, &latency, &ping, &entry.CheckedAt); err != nil {
|
||||
return nil, fmt.Errorf("scan recent history row: %w", err)
|
||||
}
|
||||
assignNullInt(&entry.LatencyMs, latency)
|
||||
assignNullInt(&entry.PingLatencyMs, ping)
|
||||
out[monitorID] = append(out[monitorID], entry)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// buildMonitorModelPairs 基于 ids 过滤出有效的 (monitor_id, model) 对,model 为空时跳过。
|
||||
// 保证两个数组长度一致且一一对应,供 unnest 展开。
|
||||
func buildMonitorModelPairs(ids []int64, primaryModels map[int64]string) ([]int64, []string) {
|
||||
if len(ids) == 0 || len(primaryModels) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
pairIDs := make([]int64, 0, len(ids))
|
||||
pairModels := make([]string, 0, len(ids))
|
||||
for _, id := range ids {
|
||||
model, ok := primaryModels[id]
|
||||
if !ok || strings.TrimSpace(model) == "" {
|
||||
continue
|
||||
}
|
||||
pairIDs = append(pairIDs, id)
|
||||
pairModels = append(pairModels, model)
|
||||
}
|
||||
return pairIDs, pairModels
|
||||
}
|
||||
|
||||
// timelineLimit* 批量 timeline 查询的 perMonitorLimit 夹紧范围。
|
||||
// 下限 1 表示至少返回最近一条;上限 200 控制单次响应体与 SQL 内存占用(ROW_NUMBER 窗口上限)。
|
||||
const (
|
||||
timelineLimitMin = 1
|
||||
timelineLimitMax = 200
|
||||
)
|
||||
|
||||
// clampTimelineLimit 把 perMonitorLimit 夹紧到 [timelineLimitMin, timelineLimitMax],避免非法值或超大查询。
|
||||
func clampTimelineLimit(n int) int {
|
||||
if n < timelineLimitMin {
|
||||
return timelineLimitMin
|
||||
}
|
||||
if n > timelineLimitMax {
|
||||
return timelineLimitMax
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// ComputeAvailabilityForMonitors 一次性计算多个监控在某个窗口内的每模型可用率与平均延迟。
|
||||
// 明细保留 30 天,直接扫 histories(窗口 <= 30 天时无需聚合)。
|
||||
func (r *channelMonitorRepository) ComputeAvailabilityForMonitors(ctx context.Context, ids []int64, windowDays int) (map[int64][]*service.ChannelMonitorAvailability, error) {
|
||||
out := make(map[int64][]*service.ChannelMonitorAvailability, len(ids))
|
||||
if len(ids) == 0 {
|
||||
return out, nil
|
||||
}
|
||||
if windowDays <= 0 {
|
||||
windowDays = 7
|
||||
}
|
||||
const q = `
|
||||
SELECT monitor_id,
|
||||
model,
|
||||
COUNT(*) AS total,
|
||||
COUNT(*) FILTER (WHERE status IN ('operational','degraded')) AS ok,
|
||||
CASE WHEN COUNT(latency_ms) > 0
|
||||
THEN SUM(latency_ms) FILTER (WHERE latency_ms IS NOT NULL)::float8 / COUNT(latency_ms)
|
||||
ELSE NULL END AS avg_latency_ms
|
||||
FROM channel_monitor_histories
|
||||
WHERE monitor_id = ANY($1)
|
||||
AND checked_at >= NOW() - ($2::int || ' days')::interval
|
||||
GROUP BY monitor_id, model
|
||||
`
|
||||
rows, err := r.db.QueryContext(ctx, q, pq.Array(ids), windowDays)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("query availability batch: %w", err)
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
for rows.Next() {
|
||||
var monitorID int64
|
||||
row := &service.ChannelMonitorAvailability{WindowDays: windowDays}
|
||||
var avgLatency sql.NullFloat64
|
||||
if err := rows.Scan(&monitorID, &row.Model, &row.TotalChecks, &row.OperationalChecks, &avgLatency); err != nil {
|
||||
return nil, fmt.Errorf("scan availability batch row: %w", err)
|
||||
}
|
||||
// 批量查询多了首列 monitor_id;其余字段的可用率/平均延迟换算与单 monitor 版本一致,
|
||||
// 抽出 finalizeAvailabilityRow 复用,避免两处分别维护除法与 NullFloat 解包。
|
||||
finalizeAvailabilityRow(row, avgLatency)
|
||||
out[monitorID] = append(out[monitorID], row)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// ---------- 聚合维护 ----------
|
||||
|
||||
// UpsertDailyRollupsFor 把 targetDate 当天([targetDate, targetDate+1d))的明细
|
||||
// 按 (monitor_id, model, bucket_date) 聚合写入 channel_monitor_daily_rollups。
|
||||
// - 用 ON CONFLICT (monitor_id, model, bucket_date) DO UPDATE 实现幂等回填,
|
||||
// 重复执行只会用最新统计覆盖;
|
||||
// - $1::date 让 PG 自动把入参 truncate 到 UTC 日期,调用方不需要预处理 targetDate。
|
||||
func (r *channelMonitorRepository) UpsertDailyRollupsFor(ctx context.Context, targetDate time.Time) (int64, error) {
|
||||
const q = `
|
||||
INSERT INTO channel_monitor_daily_rollups (
|
||||
monitor_id, model, bucket_date,
|
||||
total_checks, ok_count,
|
||||
operational_count, degraded_count, failed_count, error_count,
|
||||
sum_latency_ms, count_latency,
|
||||
sum_ping_latency_ms, count_ping_latency,
|
||||
computed_at
|
||||
)
|
||||
SELECT
|
||||
monitor_id,
|
||||
model,
|
||||
$1::date AS bucket_date,
|
||||
COUNT(*) AS total_checks,
|
||||
COUNT(*) FILTER (WHERE status IN ('operational','degraded')) AS ok_count,
|
||||
COUNT(*) FILTER (WHERE status = 'operational') AS operational_count,
|
||||
COUNT(*) FILTER (WHERE status = 'degraded') AS degraded_count,
|
||||
COUNT(*) FILTER (WHERE status = 'failed') AS failed_count,
|
||||
COUNT(*) FILTER (WHERE status = 'error') AS error_count,
|
||||
COALESCE(SUM(latency_ms) FILTER (WHERE latency_ms IS NOT NULL), 0) AS sum_latency_ms,
|
||||
COUNT(latency_ms) AS count_latency,
|
||||
COALESCE(SUM(ping_latency_ms) FILTER (WHERE ping_latency_ms IS NOT NULL), 0) AS sum_ping_latency_ms,
|
||||
COUNT(ping_latency_ms) AS count_ping_latency,
|
||||
NOW()
|
||||
FROM channel_monitor_histories
|
||||
WHERE checked_at >= $1::date
|
||||
AND checked_at < ($1::date + INTERVAL '1 day')
|
||||
GROUP BY monitor_id, model
|
||||
ON CONFLICT (monitor_id, model, bucket_date) DO UPDATE SET
|
||||
total_checks = EXCLUDED.total_checks,
|
||||
ok_count = EXCLUDED.ok_count,
|
||||
operational_count = EXCLUDED.operational_count,
|
||||
degraded_count = EXCLUDED.degraded_count,
|
||||
failed_count = EXCLUDED.failed_count,
|
||||
error_count = EXCLUDED.error_count,
|
||||
sum_latency_ms = EXCLUDED.sum_latency_ms,
|
||||
count_latency = EXCLUDED.count_latency,
|
||||
sum_ping_latency_ms = EXCLUDED.sum_ping_latency_ms,
|
||||
count_ping_latency = EXCLUDED.count_ping_latency,
|
||||
computed_at = NOW()
|
||||
`
|
||||
res, err := r.db.ExecContext(ctx, q, targetDate)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("upsert daily rollups for %s: %w", targetDate.Format("2006-01-02"), err)
|
||||
}
|
||||
n, err := res.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("rows affected (upsert rollups): %w", err)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// DeleteRollupsBefore 物理删 bucket_date < beforeDate 的聚合行,同样分批。
|
||||
func (r *channelMonitorRepository) DeleteRollupsBefore(ctx context.Context, beforeDate time.Time) (int64, error) {
|
||||
return deleteChannelMonitorBatched(ctx, r.db, channelMonitorPruneRollupSQL, beforeDate)
|
||||
}
|
||||
|
||||
// channelMonitorPruneBatchSize 单批删除上限。与 ops_cleanup_service 保持一致的 5000,
|
||||
// 在大表上按 id 小批删可以避免长事务和 WAL 堆积。
|
||||
const channelMonitorPruneBatchSize = 5000
|
||||
|
||||
// channelMonitorPruneHistorySQL 分批物理删明细表过期行。
|
||||
const channelMonitorPruneHistorySQL = `
|
||||
WITH batch AS (
|
||||
SELECT id FROM channel_monitor_histories
|
||||
WHERE checked_at < $1
|
||||
ORDER BY id
|
||||
LIMIT $2
|
||||
)
|
||||
DELETE FROM channel_monitor_histories
|
||||
WHERE id IN (SELECT id FROM batch)
|
||||
`
|
||||
|
||||
// channelMonitorPruneRollupSQL 分批物理删 rollup 表过期行。bucket_date 需要 ::date 转型
|
||||
// 保证与 DATE 列一致比较。
|
||||
const channelMonitorPruneRollupSQL = `
|
||||
WITH batch AS (
|
||||
SELECT id FROM channel_monitor_daily_rollups
|
||||
WHERE bucket_date < $1::date
|
||||
ORDER BY id
|
||||
LIMIT $2
|
||||
)
|
||||
DELETE FROM channel_monitor_daily_rollups
|
||||
WHERE id IN (SELECT id FROM batch)
|
||||
`
|
||||
|
||||
// deleteChannelMonitorBatched 循环执行分批 DELETE,直到影响行为 0。返回累计删除行数。
|
||||
// cutoff 由调用方按列类型传入(明细用 time.Time 对 TIMESTAMPTZ,rollup 用 time.Time SQL 侧 ::date 转型)。
|
||||
func deleteChannelMonitorBatched(ctx context.Context, db *sql.DB, query string, cutoff time.Time) (int64, error) {
|
||||
var total int64
|
||||
for {
|
||||
res, err := db.ExecContext(ctx, query, cutoff, channelMonitorPruneBatchSize)
|
||||
if err != nil {
|
||||
return total, fmt.Errorf("channel_monitor prune batch: %w", err)
|
||||
}
|
||||
affected, err := res.RowsAffected()
|
||||
if err != nil {
|
||||
return total, fmt.Errorf("channel_monitor prune rows affected: %w", err)
|
||||
}
|
||||
total += affected
|
||||
if affected == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return total, nil
|
||||
}
|
||||
|
||||
// LoadAggregationWatermark 读 watermark 表(id=1)。
|
||||
// watermark 表不是 ent schema(只有一行),直接走原生 SQL。
|
||||
// - 行不存在或 last_aggregated_date IS NULL:返回 (nil, nil),由调用方决定首次回填策略
|
||||
func (r *channelMonitorRepository) LoadAggregationWatermark(ctx context.Context) (*time.Time, error) {
|
||||
const q = `SELECT last_aggregated_date FROM channel_monitor_aggregation_watermark WHERE id = 1`
|
||||
var t sql.NullTime
|
||||
if err := r.db.QueryRowContext(ctx, q).Scan(&t); err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("load aggregation watermark: %w", err)
|
||||
}
|
||||
if !t.Valid {
|
||||
return nil, nil
|
||||
}
|
||||
return &t.Time, nil
|
||||
}
|
||||
|
||||
// UpdateAggregationWatermark 更新 watermark(UPSERT 到 id=1)。
|
||||
// $1::date 让 PG 把入参 truncate 到 UTC 日期,与 last_aggregated_date 列的 DATE 类型一致。
|
||||
func (r *channelMonitorRepository) UpdateAggregationWatermark(ctx context.Context, date time.Time) error {
|
||||
const q = `
|
||||
INSERT INTO channel_monitor_aggregation_watermark (id, last_aggregated_date, updated_at)
|
||||
VALUES (1, $1::date, NOW())
|
||||
ON CONFLICT (id) DO UPDATE SET
|
||||
last_aggregated_date = EXCLUDED.last_aggregated_date,
|
||||
updated_at = NOW()
|
||||
`
|
||||
if _, err := r.db.ExecContext(ctx, q, date); err != nil {
|
||||
return fmt.Errorf("update aggregation watermark: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ---------- helpers ----------
|
||||
|
||||
func entToServiceMonitor(row *dbent.ChannelMonitor) *service.ChannelMonitor {
|
||||
if row == nil {
|
||||
return nil
|
||||
}
|
||||
extras := row.ExtraModels
|
||||
if extras == nil {
|
||||
extras = []string{}
|
||||
}
|
||||
headers := row.ExtraHeaders
|
||||
if headers == nil {
|
||||
headers = map[string]string{}
|
||||
}
|
||||
out := &service.ChannelMonitor{
|
||||
ID: row.ID,
|
||||
Name: row.Name,
|
||||
Provider: string(row.Provider),
|
||||
Endpoint: row.Endpoint,
|
||||
APIKey: row.APIKeyEncrypted, // 仍为密文,service 层负责解密
|
||||
PrimaryModel: row.PrimaryModel,
|
||||
ExtraModels: extras,
|
||||
GroupName: row.GroupName,
|
||||
Enabled: row.Enabled,
|
||||
IntervalSeconds: row.IntervalSeconds,
|
||||
LastCheckedAt: row.LastCheckedAt,
|
||||
CreatedBy: row.CreatedBy,
|
||||
CreatedAt: row.CreatedAt,
|
||||
UpdatedAt: row.UpdatedAt,
|
||||
ExtraHeaders: headers,
|
||||
BodyOverrideMode: row.BodyOverrideMode,
|
||||
BodyOverride: row.BodyOverride,
|
||||
}
|
||||
if row.TemplateID != nil {
|
||||
id := *row.TemplateID
|
||||
out.TemplateID = &id
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// emptyHeadersIfNilRepo 与 service.emptyHeadersIfNil 功能一致,
|
||||
// repo 独立一份避免 import 循环。
|
||||
func emptyHeadersIfNilRepo(h map[string]string) map[string]string {
|
||||
if h == nil {
|
||||
return map[string]string{}
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// defaultBodyModeRepo 空串归一为 off(同上不循环)。
|
||||
func defaultBodyModeRepo(mode string) string {
|
||||
if mode == "" {
|
||||
return "off"
|
||||
}
|
||||
return mode
|
||||
}
|
||||
|
||||
func emptySliceIfNil(in []string) []string {
|
||||
if in == nil {
|
||||
return []string{}
|
||||
}
|
||||
return in
|
||||
}
|
||||
195
backend/internal/repository/channel_monitor_template_repo.go
Normal file
195
backend/internal/repository/channel_monitor_template_repo.go
Normal file
@@ -0,0 +1,195 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
dbent "github.com/Wei-Shaw/sub2api/ent"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorrequesttemplate"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
)
|
||||
|
||||
// channelMonitorRequestTemplateRepository 实现 service.ChannelMonitorRequestTemplateRepository。
|
||||
// 与 channelMonitorRepository 分开一个文件,职责清晰。
|
||||
type channelMonitorRequestTemplateRepository struct {
|
||||
client *dbent.Client
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
// NewChannelMonitorRequestTemplateRepository 创建模板仓储实例。
|
||||
func NewChannelMonitorRequestTemplateRepository(client *dbent.Client, db *sql.DB) service.ChannelMonitorRequestTemplateRepository {
|
||||
return &channelMonitorRequestTemplateRepository{client: client, db: db}
|
||||
}
|
||||
|
||||
// ---------- CRUD ----------
|
||||
|
||||
func (r *channelMonitorRequestTemplateRepository) Create(ctx context.Context, t *service.ChannelMonitorRequestTemplate) error {
|
||||
client := clientFromContext(ctx, r.client)
|
||||
builder := client.ChannelMonitorRequestTemplate.Create().
|
||||
SetName(t.Name).
|
||||
SetProvider(channelmonitorrequesttemplate.Provider(t.Provider)).
|
||||
SetDescription(t.Description).
|
||||
SetExtraHeaders(emptyHeadersIfNilRepo(t.ExtraHeaders)).
|
||||
SetBodyOverrideMode(defaultBodyModeRepo(t.BodyOverrideMode))
|
||||
if t.BodyOverride != nil {
|
||||
builder = builder.SetBodyOverride(t.BodyOverride)
|
||||
}
|
||||
|
||||
created, err := builder.Save(ctx)
|
||||
if err != nil {
|
||||
return translatePersistenceError(err, service.ErrChannelMonitorTemplateNotFound, nil)
|
||||
}
|
||||
t.ID = created.ID
|
||||
t.CreatedAt = created.CreatedAt
|
||||
t.UpdatedAt = created.UpdatedAt
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *channelMonitorRequestTemplateRepository) GetByID(ctx context.Context, id int64) (*service.ChannelMonitorRequestTemplate, error) {
|
||||
row, err := r.client.ChannelMonitorRequestTemplate.Query().
|
||||
Where(channelmonitorrequesttemplate.IDEQ(id)).
|
||||
Only(ctx)
|
||||
if err != nil {
|
||||
return nil, translatePersistenceError(err, service.ErrChannelMonitorTemplateNotFound, nil)
|
||||
}
|
||||
return entToServiceTemplate(row), nil
|
||||
}
|
||||
|
||||
func (r *channelMonitorRequestTemplateRepository) Update(ctx context.Context, t *service.ChannelMonitorRequestTemplate) error {
|
||||
client := clientFromContext(ctx, r.client)
|
||||
updater := client.ChannelMonitorRequestTemplate.UpdateOneID(t.ID).
|
||||
SetName(t.Name).
|
||||
SetDescription(t.Description).
|
||||
SetExtraHeaders(emptyHeadersIfNilRepo(t.ExtraHeaders)).
|
||||
SetBodyOverrideMode(defaultBodyModeRepo(t.BodyOverrideMode))
|
||||
if t.BodyOverride != nil {
|
||||
updater = updater.SetBodyOverride(t.BodyOverride)
|
||||
} else {
|
||||
updater = updater.ClearBodyOverride()
|
||||
}
|
||||
updated, err := updater.Save(ctx)
|
||||
if err != nil {
|
||||
return translatePersistenceError(err, service.ErrChannelMonitorTemplateNotFound, nil)
|
||||
}
|
||||
t.UpdatedAt = updated.UpdatedAt
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *channelMonitorRequestTemplateRepository) Delete(ctx context.Context, id int64) error {
|
||||
client := clientFromContext(ctx, r.client)
|
||||
if err := client.ChannelMonitorRequestTemplate.DeleteOneID(id).Exec(ctx); err != nil {
|
||||
return translatePersistenceError(err, service.ErrChannelMonitorTemplateNotFound, nil)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *channelMonitorRequestTemplateRepository) List(ctx context.Context, params service.ChannelMonitorRequestTemplateListParams) ([]*service.ChannelMonitorRequestTemplate, error) {
|
||||
q := r.client.ChannelMonitorRequestTemplate.Query()
|
||||
if params.Provider != "" {
|
||||
q = q.Where(channelmonitorrequesttemplate.ProviderEQ(channelmonitorrequesttemplate.Provider(params.Provider)))
|
||||
}
|
||||
rows, err := q.
|
||||
Order(dbent.Asc(channelmonitorrequesttemplate.FieldProvider), dbent.Asc(channelmonitorrequesttemplate.FieldName)).
|
||||
All(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list monitor templates: %w", err)
|
||||
}
|
||||
out := make([]*service.ChannelMonitorRequestTemplate, 0, len(rows))
|
||||
for _, row := range rows {
|
||||
out = append(out, entToServiceTemplate(row))
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// ApplyToMonitors 把模板当前配置覆盖到 monitorIDs 列表里的关联监控。
|
||||
// WHERE 双重过滤:template_id = id AND id IN (monitorIDs),防止用户传了未关联本模板的 id
|
||||
// 就被覆盖。走 ent UpdateMany 保留 hooks。
|
||||
func (r *channelMonitorRequestTemplateRepository) ApplyToMonitors(ctx context.Context, id int64, monitorIDs []int64) (int64, error) {
|
||||
if len(monitorIDs) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
client := clientFromContext(ctx, r.client)
|
||||
tpl, err := client.ChannelMonitorRequestTemplate.Query().
|
||||
Where(channelmonitorrequesttemplate.IDEQ(id)).
|
||||
Only(ctx)
|
||||
if err != nil {
|
||||
return 0, translatePersistenceError(err, service.ErrChannelMonitorTemplateNotFound, nil)
|
||||
}
|
||||
|
||||
updater := client.ChannelMonitor.Update().
|
||||
Where(
|
||||
channelmonitor.TemplateIDEQ(id),
|
||||
channelmonitor.IDIn(monitorIDs...),
|
||||
).
|
||||
SetExtraHeaders(emptyHeadersIfNilRepo(tpl.ExtraHeaders)).
|
||||
SetBodyOverrideMode(defaultBodyModeRepo(tpl.BodyOverrideMode))
|
||||
if tpl.BodyOverride != nil {
|
||||
updater = updater.SetBodyOverride(tpl.BodyOverride)
|
||||
} else {
|
||||
updater = updater.ClearBodyOverride()
|
||||
}
|
||||
|
||||
affected, err := updater.Save(ctx)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("apply template to monitors: %w", err)
|
||||
}
|
||||
return int64(affected), nil
|
||||
}
|
||||
|
||||
// CountAssociatedMonitors 统计关联监控数(UI 展示「N 个配置」用)。
|
||||
func (r *channelMonitorRequestTemplateRepository) CountAssociatedMonitors(ctx context.Context, id int64) (int64, error) {
|
||||
count, err := r.client.ChannelMonitor.Query().
|
||||
Where(channelmonitor.TemplateIDEQ(id)).
|
||||
Count(ctx)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("count monitors for template %d: %w", id, err)
|
||||
}
|
||||
return int64(count), nil
|
||||
}
|
||||
|
||||
// ListAssociatedMonitors 列出模板关联的所有监控简略字段。
|
||||
// ORDER BY name 稳定输出方便前端展示。
|
||||
func (r *channelMonitorRequestTemplateRepository) ListAssociatedMonitors(ctx context.Context, id int64) ([]*service.AssociatedMonitorBrief, error) {
|
||||
rows, err := r.client.ChannelMonitor.Query().
|
||||
Where(channelmonitor.TemplateIDEQ(id)).
|
||||
Order(dbent.Asc(channelmonitor.FieldName)).
|
||||
All(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list associated monitors for template %d: %w", id, err)
|
||||
}
|
||||
out := make([]*service.AssociatedMonitorBrief, 0, len(rows))
|
||||
for _, row := range rows {
|
||||
out = append(out, &service.AssociatedMonitorBrief{
|
||||
ID: row.ID,
|
||||
Name: row.Name,
|
||||
Provider: string(row.Provider),
|
||||
Enabled: row.Enabled,
|
||||
})
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// ---------- helpers ----------
|
||||
|
||||
func entToServiceTemplate(row *dbent.ChannelMonitorRequestTemplate) *service.ChannelMonitorRequestTemplate {
|
||||
if row == nil {
|
||||
return nil
|
||||
}
|
||||
headers := row.ExtraHeaders
|
||||
if headers == nil {
|
||||
headers = map[string]string{}
|
||||
}
|
||||
return &service.ChannelMonitorRequestTemplate{
|
||||
ID: row.ID,
|
||||
Name: row.Name,
|
||||
Provider: string(row.Provider),
|
||||
Description: row.Description,
|
||||
ExtraHeaders: headers,
|
||||
BodyOverrideMode: row.BodyOverrideMode,
|
||||
BodyOverride: row.BodyOverride,
|
||||
CreatedAt: row.CreatedAt,
|
||||
UpdatedAt: row.UpdatedAt,
|
||||
}
|
||||
}
|
||||
@@ -89,6 +89,9 @@ var ProviderSet = wire.NewSet(
|
||||
NewErrorPassthroughRepository,
|
||||
NewTLSFingerprintProfileRepository,
|
||||
NewChannelRepository,
|
||||
NewChannelMonitorRepository,
|
||||
NewChannelMonitorRequestTemplateRepository,
|
||||
NewAffiliateRepository,
|
||||
|
||||
// Cache implementations
|
||||
NewGatewayCache,
|
||||
|
||||
@@ -715,6 +715,10 @@ func TestAPIContracts(t *testing.T) {
|
||||
"force_email_on_third_party_signup": false,
|
||||
"default_concurrency": 5,
|
||||
"default_balance": 1.25,
|
||||
"affiliate_rebate_rate": 20,
|
||||
"affiliate_rebate_freeze_hours": 0,
|
||||
"affiliate_rebate_duration_days": 0,
|
||||
"affiliate_rebate_per_invitee_cap": 0,
|
||||
"default_user_rpm_limit": 0,
|
||||
"default_subscriptions": [],
|
||||
"enable_model_fallback": false,
|
||||
@@ -771,6 +775,10 @@ func TestAPIContracts(t *testing.T) {
|
||||
"balance_low_notify_threshold": 0,
|
||||
"balance_low_notify_recharge_url": "",
|
||||
"account_quota_notify_emails": [],
|
||||
"channel_monitor_enabled": true,
|
||||
"channel_monitor_default_interval_seconds": 60,
|
||||
"available_channels_enabled": false,
|
||||
"affiliate_enabled": false,
|
||||
"wechat_connect_enabled": false,
|
||||
"wechat_connect_app_id": "",
|
||||
"wechat_connect_app_secret_configured": false,
|
||||
@@ -892,6 +900,10 @@ func TestAPIContracts(t *testing.T) {
|
||||
"custom_endpoints": [],
|
||||
"default_concurrency": 0,
|
||||
"default_balance": 0,
|
||||
"affiliate_rebate_rate": 20,
|
||||
"affiliate_rebate_freeze_hours": 0,
|
||||
"affiliate_rebate_duration_days": 0,
|
||||
"affiliate_rebate_per_invitee_cap": 0,
|
||||
"default_user_rpm_limit": 0,
|
||||
"default_subscriptions": [],
|
||||
"enable_model_fallback": false,
|
||||
@@ -943,6 +955,10 @@ func TestAPIContracts(t *testing.T) {
|
||||
"balance_low_notify_threshold": 0,
|
||||
"balance_low_notify_recharge_url": "",
|
||||
"account_quota_notify_emails": [],
|
||||
"channel_monitor_enabled": true,
|
||||
"channel_monitor_default_interval_seconds": 60,
|
||||
"available_channels_enabled": false,
|
||||
"affiliate_enabled": false,
|
||||
"wechat_connect_enabled": true,
|
||||
"wechat_connect_app_id": "wx-open-config",
|
||||
"wechat_connect_app_secret_configured": true,
|
||||
|
||||
@@ -20,7 +20,7 @@ func TestAdminAuthJWTValidatesTokenVersion(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
cfg := &config.Config{JWT: config.JWTConfig{Secret: "test-secret", ExpireHour: 1}}
|
||||
authService := service.NewAuthService(nil, nil, nil, nil, cfg, nil, nil, nil, nil, nil, nil)
|
||||
authService := service.NewAuthService(nil, nil, nil, nil, cfg, nil, nil, nil, nil, nil, nil, nil)
|
||||
|
||||
admin := &service.User{
|
||||
ID: 1,
|
||||
|
||||
@@ -60,7 +60,7 @@ func newJWTTestEnv(users map[int64]*service.User) (*gin.Engine, *service.AuthSer
|
||||
cfg.JWT.AccessTokenExpireMinutes = 60
|
||||
|
||||
userRepo := &stubJWTUserRepo{users: users}
|
||||
authSvc := service.NewAuthService(nil, userRepo, nil, nil, cfg, nil, nil, nil, nil, nil, nil)
|
||||
authSvc := service.NewAuthService(nil, userRepo, nil, nil, cfg, nil, nil, nil, nil, nil, nil, nil)
|
||||
userSvc := service.NewUserService(userRepo, nil, nil, nil)
|
||||
mw := NewJWTAuthMiddleware(authSvc, userSvc)
|
||||
|
||||
@@ -143,7 +143,7 @@ func TestJWTAuth_ValidToken_TouchesLastActive(t *testing.T) {
|
||||
cfg.JWT.AccessTokenExpireMinutes = 60
|
||||
|
||||
userRepo := &stubJWTUserRepo{users: map[int64]*service.User{1: user}}
|
||||
authSvc := service.NewAuthService(nil, userRepo, nil, nil, cfg, nil, nil, nil, nil, nil, nil)
|
||||
authSvc := service.NewAuthService(nil, userRepo, nil, nil, cfg, nil, nil, nil, nil, nil, nil, nil)
|
||||
userSvc := service.NewUserService(userRepo, nil, nil, nil)
|
||||
toucher := &recordingActivityToucher{}
|
||||
|
||||
|
||||
@@ -88,6 +88,12 @@ func RegisterAdminRoutes(
|
||||
|
||||
// 渠道管理
|
||||
registerChannelRoutes(admin, h)
|
||||
|
||||
// 渠道监控
|
||||
registerChannelMonitorRoutes(admin, h)
|
||||
|
||||
// 邀请返利(专属用户管理)
|
||||
registerAffiliateRoutes(admin, h)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -567,3 +573,42 @@ func registerChannelRoutes(admin *gin.RouterGroup, h *handler.Handlers) {
|
||||
channels.DELETE("/:id", h.Admin.Channel.Delete)
|
||||
}
|
||||
}
|
||||
|
||||
func registerChannelMonitorRoutes(admin *gin.RouterGroup, h *handler.Handlers) {
|
||||
monitors := admin.Group("/channel-monitors")
|
||||
{
|
||||
monitors.GET("", h.Admin.ChannelMonitor.List)
|
||||
monitors.POST("", h.Admin.ChannelMonitor.Create)
|
||||
monitors.GET("/:id", h.Admin.ChannelMonitor.Get)
|
||||
monitors.PUT("/:id", h.Admin.ChannelMonitor.Update)
|
||||
monitors.DELETE("/:id", h.Admin.ChannelMonitor.Delete)
|
||||
monitors.POST("/:id/run", h.Admin.ChannelMonitor.Run)
|
||||
monitors.GET("/:id/history", h.Admin.ChannelMonitor.History)
|
||||
}
|
||||
|
||||
templates := admin.Group("/channel-monitor-templates")
|
||||
{
|
||||
templates.GET("", h.Admin.ChannelMonitorTemplate.List)
|
||||
templates.POST("", h.Admin.ChannelMonitorTemplate.Create)
|
||||
templates.GET("/:id", h.Admin.ChannelMonitorTemplate.Get)
|
||||
templates.PUT("/:id", h.Admin.ChannelMonitorTemplate.Update)
|
||||
templates.DELETE("/:id", h.Admin.ChannelMonitorTemplate.Delete)
|
||||
templates.GET("/:id/monitors", h.Admin.ChannelMonitorTemplate.AssociatedMonitors)
|
||||
templates.POST("/:id/apply", h.Admin.ChannelMonitorTemplate.Apply)
|
||||
}
|
||||
}
|
||||
|
||||
// registerAffiliateRoutes 注册邀请返利的管理端路由(专属用户配置)
|
||||
func registerAffiliateRoutes(admin *gin.RouterGroup, h *handler.Handlers) {
|
||||
affiliates := admin.Group("/affiliates")
|
||||
{
|
||||
users := affiliates.Group("/users")
|
||||
{
|
||||
users.GET("", h.Admin.Affiliate.ListUsers)
|
||||
users.GET("/lookup", h.Admin.Affiliate.LookupUsers)
|
||||
users.POST("/batch-rate", h.Admin.Affiliate.BatchSetRate)
|
||||
users.PUT("/:user_id", h.Admin.Affiliate.UpdateUserSettings)
|
||||
users.DELETE("/:user_id", h.Admin.Affiliate.ClearUserSettings)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -140,6 +140,13 @@ func RegisterGatewayRoutes(
|
||||
r.POST("/responses", bodyLimit, clientRequestID, opsErrorLogger, endpointNorm, gin.HandlerFunc(apiKeyAuth), requireGroupAnthropic, responsesHandler)
|
||||
r.POST("/responses/*subpath", bodyLimit, clientRequestID, opsErrorLogger, endpointNorm, gin.HandlerFunc(apiKeyAuth), requireGroupAnthropic, responsesHandler)
|
||||
r.GET("/responses", bodyLimit, clientRequestID, opsErrorLogger, endpointNorm, gin.HandlerFunc(apiKeyAuth), requireGroupAnthropic, h.OpenAIGateway.ResponsesWebSocket)
|
||||
codexDirect := r.Group("/backend-api/codex")
|
||||
codexDirect.Use(bodyLimit, clientRequestID, opsErrorLogger, endpointNorm, gin.HandlerFunc(apiKeyAuth), requireGroupAnthropic)
|
||||
{
|
||||
codexDirect.POST("/responses", responsesHandler)
|
||||
codexDirect.POST("/responses/*subpath", responsesHandler)
|
||||
codexDirect.GET("/responses", h.OpenAIGateway.ResponsesWebSocket)
|
||||
}
|
||||
// OpenAI Chat Completions API(不带v1前缀的别名)— auto-route based on group platform
|
||||
r.POST("/chat/completions", bodyLimit, clientRequestID, opsErrorLogger, endpointNorm, gin.HandlerFunc(apiKeyAuth), requireGroupAnthropic, func(c *gin.Context) {
|
||||
if getGroupPlatform(c) == service.PlatformOpenAI {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user