feat: 数据库定时备份与恢复(S3 兼容存储,支持 Cloudflare R2)

新增管理员专属的数据库备份与恢复功能:
- 全量 PostgreSQL 备份(pg_dump),gzip 压缩后上传到 S3 兼容存储
- 支持手动备份和 cron 定时备份
- 支持从备份恢复(psql --single-transaction)
- 备份文件自动过期清理(默认 14 天)
- 前端完整管理页面(S3 配置、定时配置、备份列表、恢复/下载/删除)
- 内置 Cloudflare R2 配置教程弹窗
- Dockerfile 从 postgres 镜像多阶段复制 pg_dump/psql,确保版本一致

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Rose Ding
2026-03-13 10:38:19 +08:00
parent ecea13757b
commit 53ad1645cf
17 changed files with 2029 additions and 5 deletions

View File

@@ -9,6 +9,7 @@
ARG NODE_IMAGE=node:24-alpine
ARG GOLANG_IMAGE=golang:1.26.1-alpine
ARG ALPINE_IMAGE=alpine:3.21
ARG POSTGRES_IMAGE=postgres:18-alpine
ARG GOPROXY=https://goproxy.cn,direct
ARG GOSUMDB=sum.golang.google.cn
@@ -73,7 +74,12 @@ RUN VERSION_VALUE="${VERSION}" && \
./cmd/server
# -----------------------------------------------------------------------------
# Stage 3: Final Runtime Image
# Stage 3: PostgreSQL Client (version-matched with docker-compose)
# -----------------------------------------------------------------------------
FROM ${POSTGRES_IMAGE} AS pg-client
# -----------------------------------------------------------------------------
# Stage 4: Final Runtime Image
# -----------------------------------------------------------------------------
FROM ${ALPINE_IMAGE}
@@ -86,8 +92,20 @@ LABEL org.opencontainers.image.source="https://github.com/Wei-Shaw/sub2api"
RUN apk add --no-cache \
ca-certificates \
tzdata \
libpq \
zstd-libs \
lz4-libs \
krb5-libs \
libldap \
libedit \
&& rm -rf /var/cache/apk/*
# Copy pg_dump and psql from the same postgres image used in docker-compose
# This ensures version consistency between backup tools and the database server
COPY --from=pg-client /usr/local/bin/pg_dump /usr/local/bin/pg_dump
COPY --from=pg-client /usr/local/bin/psql /usr/local/bin/psql
COPY --from=pg-client /usr/local/lib/libpq.so.5* /usr/local/lib/
# Create non-root user
RUN addgroup -g 1000 sub2api && \
adduser -u 1000 -G sub2api -s /bin/sh -D sub2api

View File

@@ -94,6 +94,7 @@ func provideCleanup(
antigravityOAuth *service.AntigravityOAuthService,
openAIGateway *service.OpenAIGatewayService,
scheduledTestRunner *service.ScheduledTestRunnerService,
backupSvc *service.BackupService,
) func() {
return func() {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
@@ -230,6 +231,12 @@ func provideCleanup(
}
return nil
}},
{"BackupService", func() error {
if backupSvc != nil {
backupSvc.Stop()
}
return nil
}},
}
infraSteps := []cleanupStep{

View File

@@ -145,6 +145,8 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
adminAnnouncementHandler := admin.NewAnnouncementHandler(announcementService)
dataManagementService := service.NewDataManagementService()
dataManagementHandler := admin.NewDataManagementHandler(dataManagementService)
backupService := service.ProvideBackupService(settingRepository, configConfig)
backupHandler := admin.NewBackupHandler(backupService)
oAuthHandler := admin.NewOAuthHandler(oAuthService)
openAIOAuthHandler := admin.NewOpenAIOAuthHandler(openAIOAuthService, adminService)
geminiOAuthHandler := admin.NewGeminiOAuthHandler(geminiOAuthService)
@@ -200,7 +202,7 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
scheduledTestResultRepository := repository.NewScheduledTestResultRepository(db)
scheduledTestService := service.ProvideScheduledTestService(scheduledTestPlanRepository, scheduledTestResultRepository)
scheduledTestHandler := admin.NewScheduledTestHandler(scheduledTestService)
adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, adminAnnouncementHandler, dataManagementHandler, oAuthHandler, openAIOAuthHandler, geminiOAuthHandler, antigravityOAuthHandler, proxyHandler, adminRedeemHandler, promoHandler, settingHandler, opsHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler, userAttributeHandler, errorPassthroughHandler, adminAPIKeyHandler, scheduledTestHandler)
adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, adminAnnouncementHandler, dataManagementHandler, backupHandler, oAuthHandler, openAIOAuthHandler, geminiOAuthHandler, antigravityOAuthHandler, proxyHandler, adminRedeemHandler, promoHandler, settingHandler, opsHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler, userAttributeHandler, errorPassthroughHandler, adminAPIKeyHandler, scheduledTestHandler)
usageRecordWorkerPool := service.NewUsageRecordWorkerPool(configConfig)
userMsgQueueCache := repository.NewUserMsgQueueCache(redisClient)
userMessageQueueService := service.ProvideUserMessageQueueService(userMsgQueueCache, rpmCache, configConfig)
@@ -231,7 +233,7 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
accountExpiryService := service.ProvideAccountExpiryService(accountRepository)
subscriptionExpiryService := service.ProvideSubscriptionExpiryService(userSubscriptionRepository)
scheduledTestRunnerService := service.ProvideScheduledTestRunnerService(scheduledTestPlanRepository, scheduledTestService, accountTestService, rateLimitService, configConfig)
v := provideCleanup(client, redisClient, opsMetricsCollector, opsAggregationService, opsAlertEvaluatorService, opsCleanupService, opsScheduledReportService, opsSystemLogSink, soraMediaCleanupService, schedulerSnapshotService, tokenRefreshService, accountExpiryService, subscriptionExpiryService, usageCleanupService, idempotencyCleanupService, pricingService, emailQueueService, billingCacheService, usageRecordWorkerPool, subscriptionService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, openAIGatewayService, scheduledTestRunnerService)
v := provideCleanup(client, redisClient, opsMetricsCollector, opsAggregationService, opsAlertEvaluatorService, opsCleanupService, opsScheduledReportService, opsSystemLogSink, soraMediaCleanupService, schedulerSnapshotService, tokenRefreshService, accountExpiryService, subscriptionExpiryService, usageCleanupService, idempotencyCleanupService, pricingService, emailQueueService, billingCacheService, usageRecordWorkerPool, subscriptionService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, openAIGatewayService, scheduledTestRunnerService, backupService)
application := &Application{
Server: httpServer,
Cleanup: v,
@@ -284,6 +286,7 @@ func provideCleanup(
antigravityOAuth *service.AntigravityOAuthService,
openAIGateway *service.OpenAIGatewayService,
scheduledTestRunner *service.ScheduledTestRunnerService,
backupSvc *service.BackupService,
) func() {
return func() {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
@@ -419,6 +422,12 @@ func provideCleanup(
}
return nil
}},
{"BackupService", func() error {
if backupSvc != nil {
backupSvc.Stop()
}
return nil
}},
}
infraSteps := []cleanupStep{

View File

@@ -0,0 +1,170 @@
package admin
import (
"github.com/Wei-Shaw/sub2api/internal/pkg/response"
"github.com/Wei-Shaw/sub2api/internal/service"
"github.com/gin-gonic/gin"
)
type BackupHandler struct {
backupService *service.BackupService
}
func NewBackupHandler(backupService *service.BackupService) *BackupHandler {
return &BackupHandler{backupService: backupService}
}
// ─── S3 配置 ───
func (h *BackupHandler) GetS3Config(c *gin.Context) {
cfg, err := h.backupService.GetS3Config(c.Request.Context())
if err != nil {
response.ErrorFrom(c, err)
return
}
response.Success(c, cfg)
}
func (h *BackupHandler) UpdateS3Config(c *gin.Context) {
var req service.BackupS3Config
if err := c.ShouldBindJSON(&req); err != nil {
response.BadRequest(c, "Invalid request: "+err.Error())
return
}
cfg, err := h.backupService.UpdateS3Config(c.Request.Context(), req)
if err != nil {
response.ErrorFrom(c, err)
return
}
response.Success(c, cfg)
}
func (h *BackupHandler) TestS3Connection(c *gin.Context) {
var req service.BackupS3Config
if err := c.ShouldBindJSON(&req); err != nil {
response.BadRequest(c, "Invalid request: "+err.Error())
return
}
err := h.backupService.TestS3Connection(c.Request.Context(), req)
if err != nil {
response.Success(c, gin.H{"ok": false, "message": err.Error()})
return
}
response.Success(c, gin.H{"ok": true, "message": "connection successful"})
}
// ─── 定时备份 ───
func (h *BackupHandler) GetSchedule(c *gin.Context) {
cfg, err := h.backupService.GetSchedule(c.Request.Context())
if err != nil {
response.ErrorFrom(c, err)
return
}
response.Success(c, cfg)
}
func (h *BackupHandler) UpdateSchedule(c *gin.Context) {
var req service.BackupScheduleConfig
if err := c.ShouldBindJSON(&req); err != nil {
response.BadRequest(c, "Invalid request: "+err.Error())
return
}
cfg, err := h.backupService.UpdateSchedule(c.Request.Context(), req)
if err != nil {
response.ErrorFrom(c, err)
return
}
response.Success(c, cfg)
}
// ─── 备份操作 ───
type CreateBackupRequest struct {
ExpireDays *int `json:"expire_days"` // nil=使用默认值140=永不过期
}
func (h *BackupHandler) CreateBackup(c *gin.Context) {
var req CreateBackupRequest
_ = c.ShouldBindJSON(&req) // 允许空 body
expireDays := 14 // 默认14天过期
if req.ExpireDays != nil {
expireDays = *req.ExpireDays
}
record, err := h.backupService.CreateBackup(c.Request.Context(), "manual", expireDays)
if err != nil {
response.ErrorFrom(c, err)
return
}
response.Success(c, record)
}
func (h *BackupHandler) ListBackups(c *gin.Context) {
records, err := h.backupService.ListBackups(c.Request.Context())
if err != nil {
response.ErrorFrom(c, err)
return
}
if records == nil {
records = []service.BackupRecord{}
}
response.Success(c, gin.H{"items": records})
}
func (h *BackupHandler) GetBackup(c *gin.Context) {
backupID := c.Param("id")
if backupID == "" {
response.BadRequest(c, "backup ID is required")
return
}
record, err := h.backupService.GetBackupRecord(c.Request.Context(), backupID)
if err != nil {
response.ErrorFrom(c, err)
return
}
response.Success(c, record)
}
func (h *BackupHandler) DeleteBackup(c *gin.Context) {
backupID := c.Param("id")
if backupID == "" {
response.BadRequest(c, "backup ID is required")
return
}
if err := h.backupService.DeleteBackup(c.Request.Context(), backupID); err != nil {
response.ErrorFrom(c, err)
return
}
response.Success(c, gin.H{"deleted": true})
}
func (h *BackupHandler) GetDownloadURL(c *gin.Context) {
backupID := c.Param("id")
if backupID == "" {
response.BadRequest(c, "backup ID is required")
return
}
url, err := h.backupService.GetBackupDownloadURL(c.Request.Context(), backupID)
if err != nil {
response.ErrorFrom(c, err)
return
}
response.Success(c, gin.H{"url": url})
}
// ─── 恢复操作 ───
func (h *BackupHandler) RestoreBackup(c *gin.Context) {
backupID := c.Param("id")
if backupID == "" {
response.BadRequest(c, "backup ID is required")
return
}
if err := h.backupService.RestoreBackup(c.Request.Context(), backupID); err != nil {
response.ErrorFrom(c, err)
return
}
response.Success(c, gin.H{"restored": true})
}

View File

@@ -12,6 +12,7 @@ type AdminHandlers struct {
Account *admin.AccountHandler
Announcement *admin.AnnouncementHandler
DataManagement *admin.DataManagementHandler
Backup *admin.BackupHandler
OAuth *admin.OAuthHandler
OpenAIOAuth *admin.OpenAIOAuthHandler
GeminiOAuth *admin.GeminiOAuthHandler

View File

@@ -15,6 +15,7 @@ func ProvideAdminHandlers(
accountHandler *admin.AccountHandler,
announcementHandler *admin.AnnouncementHandler,
dataManagementHandler *admin.DataManagementHandler,
backupHandler *admin.BackupHandler,
oauthHandler *admin.OAuthHandler,
openaiOAuthHandler *admin.OpenAIOAuthHandler,
geminiOAuthHandler *admin.GeminiOAuthHandler,
@@ -39,6 +40,7 @@ func ProvideAdminHandlers(
Account: accountHandler,
Announcement: announcementHandler,
DataManagement: dataManagementHandler,
Backup: backupHandler,
OAuth: oauthHandler,
OpenAIOAuth: openaiOAuthHandler,
GeminiOAuth: geminiOAuthHandler,
@@ -128,6 +130,7 @@ var ProviderSet = wire.NewSet(
admin.NewAccountHandler,
admin.NewAnnouncementHandler,
admin.NewDataManagementHandler,
admin.NewBackupHandler,
admin.NewOAuthHandler,
admin.NewOpenAIOAuthHandler,
admin.NewGeminiOAuthHandler,

View File

@@ -58,6 +58,9 @@ func RegisterAdminRoutes(
// 数据管理
registerDataManagementRoutes(admin, h)
// 数据库备份恢复
registerBackupRoutes(admin, h)
// 运维监控Ops
registerOpsRoutes(admin, h)
@@ -436,6 +439,30 @@ func registerDataManagementRoutes(admin *gin.RouterGroup, h *handler.Handlers) {
}
}
func registerBackupRoutes(admin *gin.RouterGroup, h *handler.Handlers) {
backup := admin.Group("/backups")
{
// S3 存储配置
backup.GET("/s3-config", h.Admin.Backup.GetS3Config)
backup.PUT("/s3-config", h.Admin.Backup.UpdateS3Config)
backup.POST("/s3-config/test", h.Admin.Backup.TestS3Connection)
// 定时备份配置
backup.GET("/schedule", h.Admin.Backup.GetSchedule)
backup.PUT("/schedule", h.Admin.Backup.UpdateSchedule)
// 备份操作
backup.POST("", h.Admin.Backup.CreateBackup)
backup.GET("", h.Admin.Backup.ListBackups)
backup.GET("/:id", h.Admin.Backup.GetBackup)
backup.DELETE("/:id", h.Admin.Backup.DeleteBackup)
backup.GET("/:id/download-url", h.Admin.Backup.GetDownloadURL)
// 恢复操作
backup.POST("/:id/restore", h.Admin.Backup.RestoreBackup)
}
}
func registerSystemRoutes(admin *gin.RouterGroup, h *handler.Handlers) {
system := admin.Group("/system")
{

View File

@@ -0,0 +1,814 @@
package service
import (
"bytes"
"compress/gzip"
"context"
"encoding/json"
"fmt"
"io"
"os/exec"
"sort"
"strings"
"sync"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
awsconfig "github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/google/uuid"
"github.com/robfig/cron/v3"
"github.com/Wei-Shaw/sub2api/internal/config"
infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors"
"github.com/Wei-Shaw/sub2api/internal/pkg/logger"
)
const (
settingKeyBackupS3Config = "backup_s3_config"
settingKeyBackupSchedule = "backup_schedule"
settingKeyBackupRecords = "backup_records"
maxBackupRecords = 100
)
var (
ErrBackupS3NotConfigured = infraerrors.BadRequest("BACKUP_S3_NOT_CONFIGURED", "backup S3 storage is not configured")
ErrBackupNotFound = infraerrors.NotFound("BACKUP_NOT_FOUND", "backup record not found")
ErrBackupInProgress = infraerrors.Conflict("BACKUP_IN_PROGRESS", "a backup is already in progress")
ErrRestoreInProgress = infraerrors.Conflict("RESTORE_IN_PROGRESS", "a restore is already in progress")
)
// BackupS3Config S3 兼容存储配置(支持 Cloudflare R2
type BackupS3Config struct {
Endpoint string `json:"endpoint"` // e.g. https://<account_id>.r2.cloudflarestorage.com
Region string `json:"region"` // R2 用 "auto"
Bucket string `json:"bucket"`
AccessKeyID string `json:"access_key_id"`
SecretAccessKey string `json:"secret_access_key,omitempty"`
Prefix string `json:"prefix"` // S3 key 前缀,如 "backups/"
ForcePathStyle bool `json:"force_path_style"`
}
// IsConfigured 检查必要字段是否已配置
func (c *BackupS3Config) IsConfigured() bool {
return c.Bucket != "" && c.AccessKeyID != "" && c.SecretAccessKey != ""
}
// BackupScheduleConfig 定时备份配置
type BackupScheduleConfig struct {
Enabled bool `json:"enabled"`
CronExpr string `json:"cron_expr"` // cron 表达式,如 "0 2 * * *" 每天凌晨2点
RetainDays int `json:"retain_days"` // 备份文件过期天数默认140=不自动清理
RetainCount int `json:"retain_count"` // 最多保留份数0=不限制
}
// BackupRecord 备份记录
type BackupRecord struct {
ID string `json:"id"`
Status string `json:"status"` // pending, running, completed, failed
BackupType string `json:"backup_type"` // postgres
FileName string `json:"file_name"`
S3Key string `json:"s3_key"`
SizeBytes int64 `json:"size_bytes"`
TriggeredBy string `json:"triggered_by"` // manual, scheduled
ErrorMsg string `json:"error_message,omitempty"`
StartedAt string `json:"started_at"`
FinishedAt string `json:"finished_at,omitempty"`
ExpiresAt string `json:"expires_at,omitempty"` // 过期时间
}
// BackupService 数据库备份恢复服务
type BackupService struct {
settingRepo SettingRepository
dbCfg *config.DatabaseConfig
mu sync.Mutex
s3Client *s3.Client
s3Cfg *BackupS3Config
backingUp bool
restoring bool
cronMu sync.Mutex
cronSched *cron.Cron
cronEntryID cron.EntryID
}
func NewBackupService(settingRepo SettingRepository, cfg *config.Config) *BackupService {
svc := &BackupService{
settingRepo: settingRepo,
dbCfg: &cfg.Database,
}
return svc
}
// Start 启动定时备份调度器
func (s *BackupService) Start() {
s.cronSched = cron.New()
s.cronSched.Start()
// 加载已有的定时配置
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
schedule, err := s.GetSchedule(ctx)
if err != nil {
logger.LegacyPrintf("service.backup", "[Backup] 加载定时备份配置失败: %v", err)
return
}
if schedule.Enabled && schedule.CronExpr != "" {
if err := s.applyCronSchedule(schedule); err != nil {
logger.LegacyPrintf("service.backup", "[Backup] 应用定时备份配置失败: %v", err)
}
}
}
// Stop 停止定时备份
func (s *BackupService) Stop() {
s.cronMu.Lock()
defer s.cronMu.Unlock()
if s.cronSched != nil {
s.cronSched.Stop()
}
}
// ─── S3 配置管理 ───
func (s *BackupService) GetS3Config(ctx context.Context) (*BackupS3Config, error) {
raw, err := s.settingRepo.GetValue(ctx, settingKeyBackupS3Config)
if err != nil || raw == "" {
return &BackupS3Config{}, nil
}
var cfg BackupS3Config
if err := json.Unmarshal([]byte(raw), &cfg); err != nil {
return &BackupS3Config{}, nil
}
// 脱敏返回
cfg.SecretAccessKey = ""
return &cfg, nil
}
func (s *BackupService) UpdateS3Config(ctx context.Context, cfg BackupS3Config) (*BackupS3Config, error) {
// 如果没提供 secret保留原有值
if cfg.SecretAccessKey == "" {
old, _ := s.loadS3Config(ctx)
if old != nil {
cfg.SecretAccessKey = old.SecretAccessKey
}
}
data, err := json.Marshal(cfg)
if err != nil {
return nil, fmt.Errorf("marshal s3 config: %w", err)
}
if err := s.settingRepo.Set(ctx, settingKeyBackupS3Config, string(data)); err != nil {
return nil, fmt.Errorf("save s3 config: %w", err)
}
// 清除缓存的 S3 客户端
s.mu.Lock()
s.s3Client = nil
s.s3Cfg = nil
s.mu.Unlock()
cfg.SecretAccessKey = ""
return &cfg, nil
}
func (s *BackupService) TestS3Connection(ctx context.Context, cfg BackupS3Config) error {
// 如果没提供 secret用已保存的
if cfg.SecretAccessKey == "" {
old, _ := s.loadS3Config(ctx)
if old != nil {
cfg.SecretAccessKey = old.SecretAccessKey
}
}
if cfg.Bucket == "" || cfg.AccessKeyID == "" || cfg.SecretAccessKey == "" {
return fmt.Errorf("incomplete S3 config: bucket, access_key_id, secret_access_key are required")
}
client, err := s.buildS3Client(ctx, &cfg)
if err != nil {
return err
}
_, err = client.HeadBucket(ctx, &s3.HeadBucketInput{
Bucket: &cfg.Bucket,
})
if err != nil {
return fmt.Errorf("S3 HeadBucket failed: %w", err)
}
return nil
}
// ─── 定时备份管理 ───
func (s *BackupService) GetSchedule(ctx context.Context) (*BackupScheduleConfig, error) {
raw, err := s.settingRepo.GetValue(ctx, settingKeyBackupSchedule)
if err != nil || raw == "" {
return &BackupScheduleConfig{}, nil
}
var cfg BackupScheduleConfig
if err := json.Unmarshal([]byte(raw), &cfg); err != nil {
return &BackupScheduleConfig{}, nil
}
return &cfg, nil
}
func (s *BackupService) UpdateSchedule(ctx context.Context, cfg BackupScheduleConfig) (*BackupScheduleConfig, error) {
if cfg.Enabled && cfg.CronExpr == "" {
return nil, infraerrors.BadRequest("INVALID_CRON", "cron expression is required when schedule is enabled")
}
// 验证 cron 表达式
if cfg.CronExpr != "" {
parser := cron.NewParser(cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow)
if _, err := parser.Parse(cfg.CronExpr); err != nil {
return nil, infraerrors.BadRequest("INVALID_CRON", fmt.Sprintf("invalid cron expression: %v", err))
}
}
data, err := json.Marshal(cfg)
if err != nil {
return nil, fmt.Errorf("marshal schedule config: %w", err)
}
if err := s.settingRepo.Set(ctx, settingKeyBackupSchedule, string(data)); err != nil {
return nil, fmt.Errorf("save schedule config: %w", err)
}
// 应用或停止定时任务
if cfg.Enabled {
if err := s.applyCronSchedule(&cfg); err != nil {
return nil, err
}
} else {
s.removeCronSchedule()
}
return &cfg, nil
}
func (s *BackupService) applyCronSchedule(cfg *BackupScheduleConfig) error {
s.cronMu.Lock()
defer s.cronMu.Unlock()
if s.cronSched == nil {
return fmt.Errorf("cron scheduler not initialized")
}
// 移除旧任务
if s.cronEntryID != 0 {
s.cronSched.Remove(s.cronEntryID)
s.cronEntryID = 0
}
entryID, err := s.cronSched.AddFunc(cfg.CronExpr, func() {
s.runScheduledBackup()
})
if err != nil {
return infraerrors.BadRequest("INVALID_CRON", fmt.Sprintf("failed to schedule: %v", err))
}
s.cronEntryID = entryID
logger.LegacyPrintf("service.backup", "[Backup] 定时备份已启用: %s", cfg.CronExpr)
return nil
}
func (s *BackupService) removeCronSchedule() {
s.cronMu.Lock()
defer s.cronMu.Unlock()
if s.cronSched != nil && s.cronEntryID != 0 {
s.cronSched.Remove(s.cronEntryID)
s.cronEntryID = 0
logger.LegacyPrintf("service.backup", "[Backup] 定时备份已停用")
}
}
func (s *BackupService) runScheduledBackup() {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute)
defer cancel()
// 读取定时备份配置中的过期天数
schedule, _ := s.GetSchedule(ctx)
expireDays := 14 // 默认14天过期
if schedule != nil && schedule.RetainDays > 0 {
expireDays = schedule.RetainDays
}
logger.LegacyPrintf("service.backup", "[Backup] 开始执行定时备份, 过期天数: %d", expireDays)
record, err := s.CreateBackup(ctx, "scheduled", expireDays)
if err != nil {
logger.LegacyPrintf("service.backup", "[Backup] 定时备份失败: %v", err)
return
}
logger.LegacyPrintf("service.backup", "[Backup] 定时备份完成: id=%s size=%d", record.ID, record.SizeBytes)
// 清理过期备份(复用已加载的 schedule
if schedule == nil {
return
}
if err := s.cleanupOldBackups(ctx, schedule); err != nil {
logger.LegacyPrintf("service.backup", "[Backup] 清理过期备份失败: %v", err)
}
}
// ─── 备份/恢复核心 ───
// CreateBackup 创建全量数据库备份并上传到 S3
// expireDays: 备份过期天数0=永不过期默认14天
func (s *BackupService) CreateBackup(ctx context.Context, triggeredBy string, expireDays int) (*BackupRecord, error) {
s.mu.Lock()
if s.backingUp {
s.mu.Unlock()
return nil, ErrBackupInProgress
}
s.backingUp = true
s.mu.Unlock()
defer func() {
s.mu.Lock()
s.backingUp = false
s.mu.Unlock()
}()
s3Cfg, err := s.loadS3Config(ctx)
if err != nil {
return nil, err
}
if s3Cfg == nil || !s3Cfg.IsConfigured() {
return nil, ErrBackupS3NotConfigured
}
client, err := s.getOrCreateS3Client(ctx, s3Cfg)
if err != nil {
return nil, fmt.Errorf("init S3 client: %w", err)
}
now := time.Now()
backupID := uuid.New().String()[:8]
fileName := fmt.Sprintf("%s_%s.sql.gz", s.dbCfg.DBName, now.Format("20060102_150405"))
s3Key := s.buildS3Key(s3Cfg, fileName)
var expiresAt string
if expireDays > 0 {
expiresAt = now.AddDate(0, 0, expireDays).Format(time.RFC3339)
}
record := &BackupRecord{
ID: backupID,
Status: "running",
BackupType: "postgres",
FileName: fileName,
S3Key: s3Key,
TriggeredBy: triggeredBy,
StartedAt: now.Format(time.RFC3339),
ExpiresAt: expiresAt,
}
// 执行全量 pg_dump
dumpData, err := s.pgDump(ctx)
if err != nil {
record.Status = "failed"
record.ErrorMsg = fmt.Sprintf("pg_dump failed: %v", err)
record.FinishedAt = time.Now().Format(time.RFC3339)
_ = s.saveRecord(ctx, record)
return record, fmt.Errorf("pg_dump: %w", err)
}
// gzip 压缩
var compressed bytes.Buffer
gzWriter := gzip.NewWriter(&compressed)
if _, err := gzWriter.Write(dumpData); err != nil {
record.Status = "failed"
record.ErrorMsg = fmt.Sprintf("gzip failed: %v", err)
record.FinishedAt = time.Now().Format(time.RFC3339)
_ = s.saveRecord(ctx, record)
return record, fmt.Errorf("gzip: %w", err)
}
if err := gzWriter.Close(); err != nil {
return nil, fmt.Errorf("gzip close: %w", err)
}
record.SizeBytes = int64(compressed.Len())
// 上传到 S3
contentType := "application/gzip"
_, err = client.PutObject(ctx, &s3.PutObjectInput{
Bucket: &s3Cfg.Bucket,
Key: &s3Key,
Body: bytes.NewReader(compressed.Bytes()),
ContentType: &contentType,
})
if err != nil {
record.Status = "failed"
record.ErrorMsg = fmt.Sprintf("S3 upload failed: %v", err)
record.FinishedAt = time.Now().Format(time.RFC3339)
_ = s.saveRecord(ctx, record)
return record, fmt.Errorf("s3 upload: %w", err)
}
record.Status = "completed"
record.FinishedAt = time.Now().Format(time.RFC3339)
if err := s.saveRecord(ctx, record); err != nil {
logger.LegacyPrintf("service.backup", "[Backup] 保存备份记录失败: %v", err)
}
return record, nil
}
// RestoreBackup 从 S3 下载备份并恢复到数据库
func (s *BackupService) RestoreBackup(ctx context.Context, backupID string) error {
s.mu.Lock()
if s.restoring {
s.mu.Unlock()
return ErrRestoreInProgress
}
s.restoring = true
s.mu.Unlock()
defer func() {
s.mu.Lock()
s.restoring = false
s.mu.Unlock()
}()
record, err := s.GetBackupRecord(ctx, backupID)
if err != nil {
return err
}
if record.Status != "completed" {
return infraerrors.BadRequest("BACKUP_NOT_COMPLETED", "can only restore from a completed backup")
}
s3Cfg, err := s.loadS3Config(ctx)
if err != nil {
return err
}
client, err := s.getOrCreateS3Client(ctx, s3Cfg)
if err != nil {
return fmt.Errorf("init S3 client: %w", err)
}
// 从 S3 下载
result, err := client.GetObject(ctx, &s3.GetObjectInput{
Bucket: &s3Cfg.Bucket,
Key: &record.S3Key,
})
if err != nil {
return fmt.Errorf("S3 download failed: %w", err)
}
defer result.Body.Close()
// 解压 gzip
gzReader, err := gzip.NewReader(result.Body)
if err != nil {
return fmt.Errorf("gzip reader: %w", err)
}
defer gzReader.Close()
sqlData, err := io.ReadAll(gzReader)
if err != nil {
return fmt.Errorf("read backup data: %w", err)
}
// 执行 psql 恢复
if err := s.pgRestore(ctx, sqlData); err != nil {
return fmt.Errorf("pg restore: %w", err)
}
return nil
}
// ─── 备份记录管理 ───
func (s *BackupService) ListBackups(ctx context.Context) ([]BackupRecord, error) {
records, err := s.loadRecords(ctx)
if err != nil {
return nil, err
}
// 倒序返回(最新在前)
sort.Slice(records, func(i, j int) bool {
return records[i].StartedAt > records[j].StartedAt
})
return records, nil
}
func (s *BackupService) GetBackupRecord(ctx context.Context, backupID string) (*BackupRecord, error) {
records, err := s.loadRecords(ctx)
if err != nil {
return nil, err
}
for i := range records {
if records[i].ID == backupID {
return &records[i], nil
}
}
return nil, ErrBackupNotFound
}
func (s *BackupService) DeleteBackup(ctx context.Context, backupID string) error {
records, err := s.loadRecords(ctx)
if err != nil {
return err
}
var found *BackupRecord
var remaining []BackupRecord
for i := range records {
if records[i].ID == backupID {
found = &records[i]
} else {
remaining = append(remaining, records[i])
}
}
if found == nil {
return ErrBackupNotFound
}
// 从 S3 删除
if found.S3Key != "" && found.Status == "completed" {
s3Cfg, err := s.loadS3Config(ctx)
if err == nil && s3Cfg != nil && s3Cfg.IsConfigured() {
client, err := s.getOrCreateS3Client(ctx, s3Cfg)
if err == nil {
_, _ = client.DeleteObject(ctx, &s3.DeleteObjectInput{
Bucket: &s3Cfg.Bucket,
Key: &found.S3Key,
})
}
}
}
return s.saveRecords(ctx, remaining)
}
// GetBackupDownloadURL 获取备份文件预签名下载 URL
func (s *BackupService) GetBackupDownloadURL(ctx context.Context, backupID string) (string, error) {
record, err := s.GetBackupRecord(ctx, backupID)
if err != nil {
return "", err
}
if record.Status != "completed" {
return "", infraerrors.BadRequest("BACKUP_NOT_COMPLETED", "backup is not completed")
}
s3Cfg, err := s.loadS3Config(ctx)
if err != nil {
return "", err
}
client, err := s.getOrCreateS3Client(ctx, s3Cfg)
if err != nil {
return "", err
}
presignClient := s3.NewPresignClient(client)
result, err := presignClient.PresignGetObject(ctx, &s3.GetObjectInput{
Bucket: &s3Cfg.Bucket,
Key: &record.S3Key,
}, s3.WithPresignExpires(1*time.Hour))
if err != nil {
return "", fmt.Errorf("presign url: %w", err)
}
return result.URL, nil
}
// ─── 内部方法 ───
func (s *BackupService) loadS3Config(ctx context.Context) (*BackupS3Config, error) {
raw, err := s.settingRepo.GetValue(ctx, settingKeyBackupS3Config)
if err != nil || raw == "" {
return nil, nil
}
var cfg BackupS3Config
if err := json.Unmarshal([]byte(raw), &cfg); err != nil {
return nil, nil
}
return &cfg, nil
}
func (s *BackupService) buildS3Client(ctx context.Context, cfg *BackupS3Config) (*s3.Client, error) {
region := cfg.Region
if region == "" {
region = "auto" // Cloudflare R2 默认 region
}
awsCfg, err := awsconfig.LoadDefaultConfig(ctx,
awsconfig.WithRegion(region),
awsconfig.WithCredentialsProvider(
credentials.NewStaticCredentialsProvider(cfg.AccessKeyID, cfg.SecretAccessKey, ""),
),
)
if err != nil {
return nil, fmt.Errorf("load aws config: %w", err)
}
client := s3.NewFromConfig(awsCfg, func(o *s3.Options) {
if cfg.Endpoint != "" {
o.BaseEndpoint = &cfg.Endpoint
}
if cfg.ForcePathStyle {
o.UsePathStyle = true
}
o.APIOptions = append(o.APIOptions, v4.SwapComputePayloadSHA256ForUnsignedPayloadMiddleware)
o.RequestChecksumCalculation = aws.RequestChecksumCalculationWhenRequired
})
return client, nil
}
func (s *BackupService) getOrCreateS3Client(ctx context.Context, cfg *BackupS3Config) (*s3.Client, error) {
s.mu.Lock()
defer s.mu.Unlock()
if s.s3Client != nil && s.s3Cfg != nil {
return s.s3Client, nil
}
if cfg == nil {
return nil, ErrBackupS3NotConfigured
}
client, err := s.buildS3Client(ctx, cfg)
if err != nil {
return nil, err
}
s.s3Client = client
s.s3Cfg = cfg
return client, nil
}
func (s *BackupService) buildS3Key(cfg *BackupS3Config, fileName string) string {
prefix := strings.TrimRight(cfg.Prefix, "/")
if prefix == "" {
prefix = "backups"
}
return fmt.Sprintf("%s/%s/%s", prefix, time.Now().Format("2006/01/02"), fileName)
}
func (s *BackupService) pgDump(ctx context.Context) ([]byte, error) {
args := []string{
"-h", s.dbCfg.Host,
"-p", fmt.Sprintf("%d", s.dbCfg.Port),
"-U", s.dbCfg.User,
"-d", s.dbCfg.DBName,
"--no-owner",
"--no-acl",
"--clean",
"--if-exists",
}
cmd := exec.CommandContext(ctx, "pg_dump", args...)
if s.dbCfg.Password != "" {
cmd.Env = append(cmd.Environ(), "PGPASSWORD="+s.dbCfg.Password)
}
if s.dbCfg.SSLMode != "" {
cmd.Env = append(cmd.Environ(), "PGSSLMODE="+s.dbCfg.SSLMode)
}
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
return nil, fmt.Errorf("%v: %s", err, stderr.String())
}
return stdout.Bytes(), nil
}
func (s *BackupService) pgRestore(ctx context.Context, sqlData []byte) error {
args := []string{
"-h", s.dbCfg.Host,
"-p", fmt.Sprintf("%d", s.dbCfg.Port),
"-U", s.dbCfg.User,
"-d", s.dbCfg.DBName,
"--single-transaction",
}
cmd := exec.CommandContext(ctx, "psql", args...)
if s.dbCfg.Password != "" {
cmd.Env = append(cmd.Environ(), "PGPASSWORD="+s.dbCfg.Password)
}
if s.dbCfg.SSLMode != "" {
cmd.Env = append(cmd.Environ(), "PGSSLMODE="+s.dbCfg.SSLMode)
}
cmd.Stdin = bytes.NewReader(sqlData)
var stderr bytes.Buffer
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("%v: %s", err, stderr.String())
}
return nil
}
func (s *BackupService) loadRecords(ctx context.Context) ([]BackupRecord, error) {
raw, err := s.settingRepo.GetValue(ctx, settingKeyBackupRecords)
if err != nil || raw == "" {
return nil, nil
}
var records []BackupRecord
if err := json.Unmarshal([]byte(raw), &records); err != nil {
return nil, nil
}
return records, nil
}
func (s *BackupService) saveRecords(ctx context.Context, records []BackupRecord) error {
data, err := json.Marshal(records)
if err != nil {
return err
}
return s.settingRepo.Set(ctx, settingKeyBackupRecords, string(data))
}
func (s *BackupService) saveRecord(ctx context.Context, record *BackupRecord) error {
records, _ := s.loadRecords(ctx)
// 更新已有记录或追加
found := false
for i := range records {
if records[i].ID == record.ID {
records[i] = *record
found = true
break
}
}
if !found {
records = append(records, *record)
}
// 限制记录数量
if len(records) > maxBackupRecords {
records = records[len(records)-maxBackupRecords:]
}
return s.saveRecords(ctx, records)
}
func (s *BackupService) cleanupOldBackups(ctx context.Context, schedule *BackupScheduleConfig) error {
if schedule == nil {
return nil
}
records, err := s.loadRecords(ctx)
if err != nil {
return err
}
// 按时间倒序
sort.Slice(records, func(i, j int) bool {
return records[i].StartedAt > records[j].StartedAt
})
var toDelete []BackupRecord
var toKeep []BackupRecord
for i, r := range records {
shouldDelete := false
// 按保留份数清理
if schedule.RetainCount > 0 && i >= schedule.RetainCount {
shouldDelete = true
}
// 按保留天数清理
if schedule.RetainDays > 0 && r.StartedAt != "" {
startedAt, err := time.Parse(time.RFC3339, r.StartedAt)
if err == nil && time.Since(startedAt) > time.Duration(schedule.RetainDays)*24*time.Hour {
shouldDelete = true
}
}
if shouldDelete && r.Status == "completed" {
toDelete = append(toDelete, r)
} else {
toKeep = append(toKeep, r)
}
}
// 删除 S3 上的文件
for _, r := range toDelete {
if r.S3Key != "" {
_ = s.deleteS3Object(ctx, r.S3Key)
}
}
if len(toDelete) > 0 {
logger.LegacyPrintf("service.backup", "[Backup] 自动清理了 %d 个过期备份", len(toDelete))
return s.saveRecords(ctx, toKeep)
}
return nil
}
func (s *BackupService) deleteS3Object(ctx context.Context, key string) error {
s3Cfg, err := s.loadS3Config(ctx)
if err != nil || s3Cfg == nil {
return nil
}
client, err := s.getOrCreateS3Client(ctx, s3Cfg)
if err != nil {
return err
}
_, err = client.DeleteObject(ctx, &s3.DeleteObjectInput{
Bucket: &s3Cfg.Bucket,
Key: &key,
})
return err
}

View File

@@ -322,6 +322,13 @@ func ProvideAPIKeyAuthCacheInvalidator(apiKeyService *APIKeyService) APIKeyAuthC
return apiKeyService
}
// ProvideBackupService creates and starts BackupService
func ProvideBackupService(settingRepo SettingRepository, cfg *config.Config) *BackupService {
svc := NewBackupService(settingRepo, cfg)
svc.Start()
return svc
}
// ProvideSettingService wires SettingService with group reader for default subscription validation.
func ProvideSettingService(settingRepo SettingRepository, groupRepo GroupRepository, cfg *config.Config) *SettingService {
svc := NewSettingService(settingRepo, cfg)
@@ -373,6 +380,7 @@ var ProviderSet = wire.NewSet(
NewAccountTestService,
ProvideSettingService,
NewDataManagementService,
ProvideBackupService,
ProvideOpsSystemLogSink,
NewOpsService,
ProvideOpsMetricsCollector,

View File

@@ -0,0 +1,105 @@
# =============================================================================
# Sub2API Docker Compose - Local Development Build
# =============================================================================
# Build from local source code for testing changes.
#
# Usage:
# cd deploy
# docker compose -f docker-compose.dev.yml up --build
# =============================================================================
services:
sub2api:
build:
context: ..
dockerfile: Dockerfile
container_name: sub2api-dev
restart: unless-stopped
ports:
- "${BIND_HOST:-127.0.0.1}:${SERVER_PORT:-8080}:8080"
volumes:
- ./data:/app/data
environment:
- AUTO_SETUP=true
- SERVER_HOST=0.0.0.0
- SERVER_PORT=8080
- SERVER_MODE=debug
- RUN_MODE=${RUN_MODE:-standard}
- DATABASE_HOST=postgres
- DATABASE_PORT=5432
- DATABASE_USER=${POSTGRES_USER:-sub2api}
- DATABASE_PASSWORD=${POSTGRES_PASSWORD:?POSTGRES_PASSWORD is required}
- DATABASE_DBNAME=${POSTGRES_DB:-sub2api}
- DATABASE_SSLMODE=disable
- REDIS_HOST=redis
- REDIS_PORT=6379
- REDIS_PASSWORD=${REDIS_PASSWORD:-}
- REDIS_DB=${REDIS_DB:-0}
- ADMIN_EMAIL=${ADMIN_EMAIL:-admin@sub2api.local}
- ADMIN_PASSWORD=${ADMIN_PASSWORD:-}
- JWT_SECRET=${JWT_SECRET:-}
- TOTP_ENCRYPTION_KEY=${TOTP_ENCRYPTION_KEY:-}
- TZ=${TZ:-Asia/Shanghai}
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
networks:
- sub2api-network
healthcheck:
test: ["CMD", "wget", "-q", "-T", "5", "-O", "/dev/null", "http://localhost:8080/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
postgres:
image: postgres:18-alpine
container_name: sub2api-postgres-dev
restart: unless-stopped
volumes:
- ./postgres_data:/var/lib/postgresql/data
environment:
- POSTGRES_USER=${POSTGRES_USER:-sub2api}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:?POSTGRES_PASSWORD is required}
- POSTGRES_DB=${POSTGRES_DB:-sub2api}
- PGDATA=/var/lib/postgresql/data
- TZ=${TZ:-Asia/Shanghai}
networks:
- sub2api-network
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-sub2api} -d ${POSTGRES_DB:-sub2api}"]
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
redis:
image: redis:8-alpine
container_name: sub2api-redis-dev
restart: unless-stopped
volumes:
- ./redis_data:/data
command: >
sh -c '
redis-server
--save 60 1
--appendonly yes
--appendfsync everysec
${REDIS_PASSWORD:+--requirepass "$REDIS_PASSWORD"}'
environment:
- TZ=${TZ:-Asia/Shanghai}
- REDISCLI_AUTH=${REDIS_PASSWORD:-}
networks:
- sub2api-network
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 5s
retries: 5
start_period: 5s
networks:
sub2api-network:
driver: bridge

View File

@@ -0,0 +1,114 @@
import { apiClient } from '../client'
export interface BackupS3Config {
endpoint: string
region: string
bucket: string
access_key_id: string
secret_access_key?: string
prefix: string
force_path_style: boolean
}
export interface BackupScheduleConfig {
enabled: boolean
cron_expr: string
retain_days: number
retain_count: number
}
export interface BackupRecord {
id: string
status: 'pending' | 'running' | 'completed' | 'failed'
backup_type: string
file_name: string
s3_key: string
size_bytes: number
triggered_by: string
error_message?: string
started_at: string
finished_at?: string
expires_at?: string
}
export interface CreateBackupRequest {
expire_days?: number
}
export interface TestS3Response {
ok: boolean
message: string
}
// S3 Config
export async function getS3Config(): Promise<BackupS3Config> {
const { data } = await apiClient.get<BackupS3Config>('/admin/backups/s3-config')
return data
}
export async function updateS3Config(config: BackupS3Config): Promise<BackupS3Config> {
const { data } = await apiClient.put<BackupS3Config>('/admin/backups/s3-config', config)
return data
}
export async function testS3Connection(config: BackupS3Config): Promise<TestS3Response> {
const { data } = await apiClient.post<TestS3Response>('/admin/backups/s3-config/test', config)
return data
}
// Schedule
export async function getSchedule(): Promise<BackupScheduleConfig> {
const { data } = await apiClient.get<BackupScheduleConfig>('/admin/backups/schedule')
return data
}
export async function updateSchedule(config: BackupScheduleConfig): Promise<BackupScheduleConfig> {
const { data } = await apiClient.put<BackupScheduleConfig>('/admin/backups/schedule', config)
return data
}
// Backup operations
export async function createBackup(req?: CreateBackupRequest): Promise<BackupRecord> {
const { data } = await apiClient.post<BackupRecord>('/admin/backups', req || {}, { timeout: 600000 })
return data
}
export async function listBackups(): Promise<{ items: BackupRecord[] }> {
const { data } = await apiClient.get<{ items: BackupRecord[] }>('/admin/backups')
return data
}
export async function getBackup(id: string): Promise<BackupRecord> {
const { data } = await apiClient.get<BackupRecord>(`/admin/backups/${id}`)
return data
}
export async function deleteBackup(id: string): Promise<void> {
await apiClient.delete(`/admin/backups/${id}`)
}
export async function getDownloadURL(id: string): Promise<{ url: string }> {
const { data } = await apiClient.get<{ url: string }>(`/admin/backups/${id}/download-url`)
return data
}
// Restore
export async function restoreBackup(id: string): Promise<void> {
await apiClient.post(`/admin/backups/${id}/restore`, {}, { timeout: 600000 })
}
export const backupAPI = {
getS3Config,
updateS3Config,
testS3Connection,
getSchedule,
updateSchedule,
createBackup,
listBackups,
getBackup,
deleteBackup,
getDownloadURL,
restoreBackup,
}
export default backupAPI

View File

@@ -23,6 +23,7 @@ import errorPassthroughAPI from './errorPassthrough'
import dataManagementAPI from './dataManagement'
import apiKeysAPI from './apiKeys'
import scheduledTestsAPI from './scheduledTests'
import backupAPI from './backup'
/**
* Unified admin API object for convenient access
@@ -47,7 +48,8 @@ export const adminAPI = {
errorPassthrough: errorPassthroughAPI,
dataManagement: dataManagementAPI,
apiKeys: apiKeysAPI,
scheduledTests: scheduledTestsAPI
scheduledTests: scheduledTestsAPI,
backup: backupAPI
}
export {
@@ -70,7 +72,8 @@ export {
errorPassthroughAPI,
dataManagementAPI,
apiKeysAPI,
scheduledTestsAPI
scheduledTestsAPI,
backupAPI
}
export default adminAPI

View File

@@ -387,6 +387,21 @@ const DatabaseIcon = {
)
}
const CloudArrowUpIcon = {
render: () =>
h(
'svg',
{ fill: 'none', viewBox: '0 0 24 24', stroke: 'currentColor', 'stroke-width': '1.5' },
[
h('path', {
'stroke-linecap': 'round',
'stroke-linejoin': 'round',
d: 'M12 16.5V9.75m0 0l3 3m-3-3l-3 3M6.75 19.5a4.5 4.5 0 01-1.41-8.775 5.25 5.25 0 0110.233-2.33 3 3 0 013.758 3.848A3.752 3.752 0 0118 19.5H6.75z'
})
]
)
}
const BellIcon = {
render: () =>
h(
@@ -611,6 +626,7 @@ const adminNavItems = computed((): NavItem[] => {
if (authStore.isSimpleMode) {
const filtered = baseItems.filter(item => !item.hideInSimpleMode)
filtered.push({ path: '/keys', label: t('nav.apiKeys'), icon: KeyIcon })
filtered.push({ path: '/admin/backup', label: t('nav.backup'), icon: CloudArrowUpIcon })
filtered.push({ path: '/admin/data-management', label: t('nav.dataManagement'), icon: DatabaseIcon })
filtered.push({ path: '/admin/settings', label: t('nav.settings'), icon: CogIcon })
// Add admin custom menu items after settings
@@ -620,6 +636,7 @@ const adminNavItems = computed((): NavItem[] => {
return filtered
}
baseItems.push({ path: '/admin/backup', label: t('nav.backup'), icon: CloudArrowUpIcon })
baseItems.push({ path: '/admin/data-management', label: t('nav.dataManagement'), icon: DatabaseIcon })
baseItems.push({ path: '/admin/settings', label: t('nav.settings'), icon: CogIcon })
// Add admin custom menu items after settings

View File

@@ -340,6 +340,7 @@ export default {
redeemCodes: 'Redeem Codes',
ops: 'Ops',
promoCodes: 'Promo Codes',
backup: 'DB Backup',
dataManagement: 'Data Management',
settings: 'Settings',
myAccount: 'My Account',
@@ -966,6 +967,110 @@ export default {
failedToLoad: 'Failed to load dashboard statistics'
},
backup: {
title: 'Database Backup',
description: 'Full database backup to S3-compatible storage with scheduled backup and restore',
s3: {
title: 'S3 Storage Configuration',
description: 'Configure S3-compatible storage (supports Cloudflare R2)',
descriptionPrefix: 'Configure S3-compatible storage (supports',
descriptionSuffix: ')',
enabled: 'Enable S3 Storage',
endpoint: 'Endpoint',
region: 'Region',
bucket: 'Bucket',
prefix: 'Key Prefix',
accessKeyId: 'Access Key ID',
secretAccessKey: 'Secret Access Key',
secretConfigured: 'Already configured, leave empty to keep',
forcePathStyle: 'Force Path Style',
testConnection: 'Test Connection',
testSuccess: 'S3 connection test successful',
testFailed: 'S3 connection test failed',
saved: 'S3 configuration saved'
},
schedule: {
title: 'Scheduled Backup',
description: 'Configure automatic scheduled backups',
enabled: 'Enable Scheduled Backup',
cronExpr: 'Cron Expression',
cronHint: 'e.g. "0 2 * * *" means every day at 2:00 AM',
retainDays: 'Backup Expire Days',
retainDaysHint: 'Backup files auto-delete after this many days, 0 = never expire',
retainCount: 'Max Retain Count',
retainCountHint: 'Maximum number of backups to keep, 0 = unlimited',
saved: 'Schedule configuration saved'
},
operations: {
title: 'Backup Records',
description: 'Create manual backups and manage existing backup records',
createBackup: 'Create Backup',
backing: 'Backing up...',
backupCreated: 'Backup created successfully',
expireDays: 'Expire Days'
},
columns: {
status: 'Status',
fileName: 'File Name',
size: 'Size',
expiresAt: 'Expires At',
triggeredBy: 'Triggered By',
startedAt: 'Started At',
actions: 'Actions'
},
status: {
pending: 'Pending',
running: 'Running',
completed: 'Completed',
failed: 'Failed'
},
trigger: {
manual: 'Manual',
scheduled: 'Scheduled'
},
neverExpire: 'Never',
empty: 'No backup records',
actions: {
download: 'Download',
restore: 'Restore',
restoreConfirm: 'Are you sure you want to restore from this backup? This will overwrite the current database!',
restoreSuccess: 'Database restored successfully',
deleteConfirm: 'Are you sure you want to delete this backup?',
deleted: 'Backup deleted'
},
r2Guide: {
title: 'Cloudflare R2 Setup Guide',
intro: 'Cloudflare R2 provides S3-compatible object storage with a free tier of 10GB storage + 1M Class A requests/month, ideal for database backups.',
step1: {
title: 'Create an R2 Bucket',
line1: 'Log in to the Cloudflare Dashboard (dash.cloudflare.com), select "R2 Object Storage" from the sidebar',
line2: 'Click "Create bucket", enter a name (e.g. sub2api-backups), choose a region',
line3: 'Click create to finish'
},
step2: {
title: 'Create an API Token',
line1: 'On the R2 page, click "Manage R2 API Tokens" in the top right',
line2: 'Click "Create API token", set permission to "Object Read & Write"',
line3: 'Recommended: restrict to specific bucket for better security',
line4: 'After creation, you will see the Access Key ID and Secret Access Key',
warning: 'The Secret Access Key is only shown once — copy and save it immediately!'
},
step3: {
title: 'Get the S3 Endpoint',
desc: 'Find your Account ID on the R2 overview page (in the URL or the right panel). The endpoint format is:',
accountId: 'your_account_id'
},
step4: {
title: 'Fill in the Configuration',
checkEnabled: 'Checked',
bucketValue: 'Your bucket name',
fromStep2: 'Value from Step 2',
unchecked: 'Unchecked'
},
freeTier: 'R2 Free Tier: 10GB storage + 1M Class A requests + 10M Class B requests per month — more than enough for database backups.'
}
},
dataManagement: {
title: 'Data Management',
description: 'Manage data management agent status, object storage settings, and backup jobs in one place',

View File

@@ -340,6 +340,7 @@ export default {
redeemCodes: '兑换码',
ops: '运维监控',
promoCodes: '优惠码',
backup: '数据库备份',
dataManagement: '数据管理',
settings: '系统设置',
myAccount: '我的账户',
@@ -988,6 +989,110 @@ export default {
failedToLoad: '加载仪表盘数据失败'
},
backup: {
title: '数据库备份',
description: '全量数据库备份到 S3 兼容存储,支持定时备份与恢复',
s3: {
title: 'S3 存储配置',
description: '配置 S3 兼容存储(支持 Cloudflare R2',
descriptionPrefix: '配置 S3 兼容存储(支持',
descriptionSuffix: '',
enabled: '启用 S3 存储',
endpoint: '端点地址',
region: '区域',
bucket: '存储桶',
prefix: 'Key 前缀',
accessKeyId: 'Access Key ID',
secretAccessKey: 'Secret Access Key',
secretConfigured: '已配置,留空保持不变',
forcePathStyle: '强制路径风格',
testConnection: '测试连接',
testSuccess: 'S3 连接测试成功',
testFailed: 'S3 连接测试失败',
saved: 'S3 配置已保存'
},
schedule: {
title: '定时备份',
description: '配置自动定时备份',
enabled: '启用定时备份',
cronExpr: 'Cron 表达式',
cronHint: '例如 "0 2 * * *" 表示每天凌晨 2 点',
retainDays: '备份过期天数',
retainDaysHint: '备份文件超过此天数后自动删除0 = 永不过期',
retainCount: '最大保留份数',
retainCountHint: '最多保留的备份数量0 = 不限制',
saved: '定时备份配置已保存'
},
operations: {
title: '备份记录',
description: '创建手动备份和管理已有备份记录',
createBackup: '创建备份',
backing: '备份中...',
backupCreated: '备份创建成功',
expireDays: '过期天数'
},
columns: {
status: '状态',
fileName: '文件名',
size: '大小',
expiresAt: '过期时间',
triggeredBy: '触发方式',
startedAt: '开始时间',
actions: '操作'
},
status: {
pending: '等待中',
running: '执行中',
completed: '已完成',
failed: '失败'
},
trigger: {
manual: '手动',
scheduled: '定时'
},
neverExpire: '永不过期',
empty: '暂无备份记录',
actions: {
download: '下载',
restore: '恢复',
restoreConfirm: '确定要从此备份恢复吗?这将覆盖当前数据库!',
restoreSuccess: '数据库恢复成功',
deleteConfirm: '确定要删除此备份吗?',
deleted: '备份已删除'
},
r2Guide: {
title: 'Cloudflare R2 配置教程',
intro: 'Cloudflare R2 提供 S3 兼容的对象存储,免费额度为 10GB 存储 + 每月 100 万次 A 类请求,非常适合数据库备份。',
step1: {
title: '创建 R2 存储桶',
line1: '登录 Cloudflare Dashboard (dash.cloudflare.com)左侧菜单选择「R2 对象存储」',
line2: '点击「创建存储桶」,输入名称(如 sub2api-backups选择区域',
line3: '点击创建完成'
},
step2: {
title: '创建 API 令牌',
line1: '在 R2 页面,点击右上角「管理 R2 API 令牌」',
line2: '点击「创建 API 令牌」,权限选择「对象读和写」',
line3: '建议指定存储桶范围(仅允许访问备份桶,更安全)',
line4: '创建后会显示 Access Key ID 和 Secret Access Key',
warning: 'Secret Access Key 只会显示一次,请立即复制保存!'
},
step3: {
title: '获取 S3 端点地址',
desc: '在 R2 概览页面找到你的账户 ID在 URL 或右侧面板中),端点格式为:',
accountId: '你的账户 ID'
},
step4: {
title: '填写以下配置',
checkEnabled: '勾选',
bucketValue: '你创建的存储桶名称',
fromStep2: '第 2 步获取的值',
unchecked: '不勾选'
},
freeTier: 'R2 免费额度10GB 存储 + 每月 100 万次 A 类请求 + 1000 万次 B 类请求,对数据库备份完全够用。'
}
},
dataManagement: {
title: '数据管理',
description: '统一管理数据管理代理状态、对象存储配置和备份任务',

View File

@@ -350,6 +350,18 @@ const routes: RouteRecordRaw[] = [
descriptionKey: 'admin.promo.description'
}
},
{
path: '/admin/backup',
name: 'AdminBackup',
component: () => import('@/views/admin/BackupView.vue'),
meta: {
requiresAuth: true,
requiresAdmin: true,
title: 'Database Backup',
titleKey: 'admin.backup.title',
descriptionKey: 'admin.backup.description'
}
},
{
path: '/admin/data-management',
name: 'AdminDataManagement',

View File

@@ -0,0 +1,506 @@
<template>
<AppLayout>
<div class="space-y-6">
<!-- S3 Storage Config -->
<div class="card p-6">
<div class="mb-4 flex flex-wrap items-center justify-between gap-3">
<div>
<h3 class="text-base font-semibold text-gray-900 dark:text-white">
{{ t('admin.backup.s3.title') }}
</h3>
<p class="mt-1 text-sm text-gray-500 dark:text-gray-400">
{{ t('admin.backup.s3.descriptionPrefix') }}
<button type="button" class="text-primary-600 underline hover:text-primary-700 dark:text-primary-400 dark:hover:text-primary-300" @click="showR2Guide = true">Cloudflare R2</button>
{{ t('admin.backup.s3.descriptionSuffix') }}
</p>
</div>
</div>
<div class="grid grid-cols-1 gap-3 md:grid-cols-2">
<div>
<label class="mb-1 block text-xs font-medium text-gray-600 dark:text-gray-400">{{ t('admin.backup.s3.endpoint') }}</label>
<input v-model="s3Form.endpoint" class="input w-full" placeholder="https://<account_id>.r2.cloudflarestorage.com" />
</div>
<div>
<label class="mb-1 block text-xs font-medium text-gray-600 dark:text-gray-400">{{ t('admin.backup.s3.region') }}</label>
<input v-model="s3Form.region" class="input w-full" placeholder="auto" />
</div>
<div>
<label class="mb-1 block text-xs font-medium text-gray-600 dark:text-gray-400">{{ t('admin.backup.s3.bucket') }}</label>
<input v-model="s3Form.bucket" class="input w-full" />
</div>
<div>
<label class="mb-1 block text-xs font-medium text-gray-600 dark:text-gray-400">{{ t('admin.backup.s3.prefix') }}</label>
<input v-model="s3Form.prefix" class="input w-full" placeholder="backups/" />
</div>
<div>
<label class="mb-1 block text-xs font-medium text-gray-600 dark:text-gray-400">{{ t('admin.backup.s3.accessKeyId') }}</label>
<input v-model="s3Form.access_key_id" class="input w-full" />
</div>
<div>
<label class="mb-1 block text-xs font-medium text-gray-600 dark:text-gray-400">{{ t('admin.backup.s3.secretAccessKey') }}</label>
<input v-model="s3Form.secret_access_key" type="password" class="input w-full" :placeholder="s3SecretConfigured ? t('admin.backup.s3.secretConfigured') : ''" />
</div>
<label class="inline-flex items-center gap-2 text-sm text-gray-700 dark:text-gray-300 md:col-span-2">
<input v-model="s3Form.force_path_style" type="checkbox" />
<span>{{ t('admin.backup.s3.forcePathStyle') }}</span>
</label>
</div>
<div class="mt-4 flex flex-wrap gap-2">
<button type="button" class="btn btn-secondary btn-sm" :disabled="testingS3" @click="testS3">
{{ testingS3 ? t('common.loading') : t('admin.backup.s3.testConnection') }}
</button>
<button type="button" class="btn btn-primary btn-sm" :disabled="savingS3" @click="saveS3Config">
{{ savingS3 ? t('common.loading') : t('common.save') }}
</button>
</div>
</div>
<!-- Schedule Config -->
<div class="card p-6">
<div class="mb-4">
<h3 class="text-base font-semibold text-gray-900 dark:text-white">
{{ t('admin.backup.schedule.title') }}
</h3>
<p class="mt-1 text-sm text-gray-500 dark:text-gray-400">
{{ t('admin.backup.schedule.description') }}
</p>
</div>
<div class="grid grid-cols-1 gap-3 md:grid-cols-2">
<label class="inline-flex items-center gap-2 text-sm text-gray-700 dark:text-gray-300 md:col-span-2">
<input v-model="scheduleForm.enabled" type="checkbox" />
<span>{{ t('admin.backup.schedule.enabled') }}</span>
</label>
<div>
<label class="mb-1 block text-xs font-medium text-gray-600 dark:text-gray-400">{{ t('admin.backup.schedule.cronExpr') }}</label>
<input v-model="scheduleForm.cron_expr" class="input w-full" placeholder="0 2 * * *" />
<p class="mt-1 text-xs text-gray-500 dark:text-gray-400">{{ t('admin.backup.schedule.cronHint') }}</p>
</div>
<div>
<label class="mb-1 block text-xs font-medium text-gray-600 dark:text-gray-400">{{ t('admin.backup.schedule.retainDays') }}</label>
<input v-model.number="scheduleForm.retain_days" type="number" min="0" class="input w-full" />
<p class="mt-1 text-xs text-gray-500 dark:text-gray-400">{{ t('admin.backup.schedule.retainDaysHint') }}</p>
</div>
<div>
<label class="mb-1 block text-xs font-medium text-gray-600 dark:text-gray-400">{{ t('admin.backup.schedule.retainCount') }}</label>
<input v-model.number="scheduleForm.retain_count" type="number" min="0" class="input w-full" />
<p class="mt-1 text-xs text-gray-500 dark:text-gray-400">{{ t('admin.backup.schedule.retainCountHint') }}</p>
</div>
</div>
<div class="mt-4">
<button type="button" class="btn btn-primary btn-sm" :disabled="savingSchedule" @click="saveSchedule">
{{ savingSchedule ? t('common.loading') : t('common.save') }}
</button>
</div>
</div>
<!-- Backup Operations -->
<div class="card p-6">
<div class="mb-4 flex flex-wrap items-center justify-between gap-3">
<div>
<h3 class="text-base font-semibold text-gray-900 dark:text-white">
{{ t('admin.backup.operations.title') }}
</h3>
<p class="mt-1 text-sm text-gray-500 dark:text-gray-400">
{{ t('admin.backup.operations.description') }}
</p>
</div>
<div class="flex flex-wrap items-center gap-2">
<div class="flex items-center gap-1">
<label class="text-xs text-gray-600 dark:text-gray-400">{{ t('admin.backup.operations.expireDays') }}</label>
<input v-model.number="manualExpireDays" type="number" min="0" class="input w-20 text-xs" />
</div>
<button type="button" class="btn btn-primary btn-sm" :disabled="creatingBackup" @click="createBackup">
{{ creatingBackup ? t('admin.backup.operations.backing') : t('admin.backup.operations.createBackup') }}
</button>
<button type="button" class="btn btn-secondary btn-sm" :disabled="loadingBackups" @click="loadBackups">
{{ loadingBackups ? t('common.loading') : t('common.refresh') }}
</button>
</div>
</div>
<div class="overflow-x-auto">
<table class="w-full min-w-[800px] text-sm">
<thead>
<tr class="border-b border-gray-200 text-left text-xs uppercase tracking-wide text-gray-500 dark:border-dark-700 dark:text-gray-400">
<th class="py-2 pr-4">ID</th>
<th class="py-2 pr-4">{{ t('admin.backup.columns.status') }}</th>
<th class="py-2 pr-4">{{ t('admin.backup.columns.fileName') }}</th>
<th class="py-2 pr-4">{{ t('admin.backup.columns.size') }}</th>
<th class="py-2 pr-4">{{ t('admin.backup.columns.expiresAt') }}</th>
<th class="py-2 pr-4">{{ t('admin.backup.columns.triggeredBy') }}</th>
<th class="py-2 pr-4">{{ t('admin.backup.columns.startedAt') }}</th>
<th class="py-2">{{ t('admin.backup.columns.actions') }}</th>
</tr>
</thead>
<tbody>
<tr v-for="record in backups" :key="record.id" class="border-b border-gray-100 align-top dark:border-dark-800">
<td class="py-3 pr-4 font-mono text-xs">{{ record.id }}</td>
<td class="py-3 pr-4">
<span
class="rounded px-2 py-0.5 text-xs"
:class="statusClass(record.status)"
>
{{ t(`admin.backup.status.${record.status}`) }}
</span>
</td>
<td class="py-3 pr-4 text-xs">{{ record.file_name }}</td>
<td class="py-3 pr-4 text-xs">{{ formatSize(record.size_bytes) }}</td>
<td class="py-3 pr-4 text-xs">
{{ record.expires_at ? formatDate(record.expires_at) : t('admin.backup.neverExpire') }}
</td>
<td class="py-3 pr-4 text-xs">
{{ record.triggered_by === 'scheduled' ? t('admin.backup.trigger.scheduled') : t('admin.backup.trigger.manual') }}
</td>
<td class="py-3 pr-4 text-xs">{{ formatDate(record.started_at) }}</td>
<td class="py-3 text-xs">
<div class="flex flex-wrap gap-1">
<button
v-if="record.status === 'completed'"
type="button"
class="btn btn-secondary btn-xs"
@click="downloadBackup(record.id)"
>
{{ t('admin.backup.actions.download') }}
</button>
<button
v-if="record.status === 'completed'"
type="button"
class="btn btn-secondary btn-xs"
:disabled="restoringId === record.id"
@click="restoreBackup(record.id)"
>
{{ restoringId === record.id ? t('common.loading') : t('admin.backup.actions.restore') }}
</button>
<button
type="button"
class="btn btn-danger btn-xs"
@click="removeBackup(record.id)"
>
{{ t('common.delete') }}
</button>
</div>
</td>
</tr>
<tr v-if="backups.length === 0">
<td colspan="8" class="py-6 text-center text-sm text-gray-500 dark:text-gray-400">
{{ t('admin.backup.empty') }}
</td>
</tr>
</tbody>
</table>
</div>
</div>
</div>
<!-- Cloudflare R2 Setup Guide Modal -->
<teleport to="body">
<transition name="modal">
<div v-if="showR2Guide" class="fixed inset-0 z-50 flex items-center justify-center p-4" @mousedown.self="showR2Guide = false">
<div class="fixed inset-0 bg-black/50" @click="showR2Guide = false"></div>
<div class="relative max-h-[85vh] w-full max-w-2xl overflow-y-auto rounded-xl bg-white p-6 shadow-2xl dark:bg-dark-800">
<button type="button" class="absolute right-4 top-4 text-gray-400 hover:text-gray-600 dark:hover:text-gray-200" @click="showR2Guide = false">
<svg class="h-5 w-5" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2"><path stroke-linecap="round" stroke-linejoin="round" d="M6 18L18 6M6 6l12 12" /></svg>
</button>
<h2 class="mb-4 text-lg font-bold text-gray-900 dark:text-white">{{ t('admin.backup.r2Guide.title') }}</h2>
<p class="mb-4 text-sm text-gray-500 dark:text-gray-400">{{ t('admin.backup.r2Guide.intro') }}</p>
<!-- Step 1 -->
<div class="mb-5">
<h3 class="mb-2 flex items-center gap-2 text-sm font-semibold text-gray-900 dark:text-white">
<span class="flex h-6 w-6 items-center justify-center rounded-full bg-primary-100 text-xs font-bold text-primary-700 dark:bg-primary-900/40 dark:text-primary-300">1</span>
{{ t('admin.backup.r2Guide.step1.title') }}
</h3>
<ol class="ml-8 list-decimal space-y-1 text-sm text-gray-600 dark:text-gray-300">
<li>{{ t('admin.backup.r2Guide.step1.line1') }}</li>
<li>{{ t('admin.backup.r2Guide.step1.line2') }}</li>
<li>{{ t('admin.backup.r2Guide.step1.line3') }}</li>
</ol>
</div>
<!-- Step 2 -->
<div class="mb-5">
<h3 class="mb-2 flex items-center gap-2 text-sm font-semibold text-gray-900 dark:text-white">
<span class="flex h-6 w-6 items-center justify-center rounded-full bg-primary-100 text-xs font-bold text-primary-700 dark:bg-primary-900/40 dark:text-primary-300">2</span>
{{ t('admin.backup.r2Guide.step2.title') }}
</h3>
<ol class="ml-8 list-decimal space-y-1 text-sm text-gray-600 dark:text-gray-300">
<li>{{ t('admin.backup.r2Guide.step2.line1') }}</li>
<li>{{ t('admin.backup.r2Guide.step2.line2') }}</li>
<li>{{ t('admin.backup.r2Guide.step2.line3') }}</li>
<li>{{ t('admin.backup.r2Guide.step2.line4') }}</li>
</ol>
<div class="mt-2 rounded-lg bg-amber-50 p-3 text-xs text-amber-700 dark:bg-amber-900/20 dark:text-amber-300">
{{ t('admin.backup.r2Guide.step2.warning') }}
</div>
</div>
<!-- Step 3 -->
<div class="mb-5">
<h3 class="mb-2 flex items-center gap-2 text-sm font-semibold text-gray-900 dark:text-white">
<span class="flex h-6 w-6 items-center justify-center rounded-full bg-primary-100 text-xs font-bold text-primary-700 dark:bg-primary-900/40 dark:text-primary-300">3</span>
{{ t('admin.backup.r2Guide.step3.title') }}
</h3>
<p class="ml-8 text-sm text-gray-600 dark:text-gray-300">{{ t('admin.backup.r2Guide.step3.desc') }}</p>
<code class="ml-8 mt-1 block rounded bg-gray-100 px-3 py-2 text-xs text-gray-800 dark:bg-dark-700 dark:text-gray-200">https://&lt;{{ t('admin.backup.r2Guide.step3.accountId') }}&gt;.r2.cloudflarestorage.com</code>
</div>
<!-- Step 4: Fill form -->
<div class="mb-5">
<h3 class="mb-2 flex items-center gap-2 text-sm font-semibold text-gray-900 dark:text-white">
<span class="flex h-6 w-6 items-center justify-center rounded-full bg-primary-100 text-xs font-bold text-primary-700 dark:bg-primary-900/40 dark:text-primary-300">4</span>
{{ t('admin.backup.r2Guide.step4.title') }}
</h3>
<div class="ml-8 overflow-hidden rounded-lg border border-gray-200 dark:border-dark-600">
<table class="w-full text-sm">
<tbody>
<tr v-for="(row, i) in r2ConfigRows" :key="i" class="border-b border-gray-100 dark:border-dark-700 last:border-0">
<td class="whitespace-nowrap bg-gray-50 px-3 py-2 font-medium text-gray-700 dark:bg-dark-700 dark:text-gray-300">{{ row.field }}</td>
<td class="px-3 py-2 text-gray-600 dark:text-gray-400"><code class="text-xs">{{ row.value }}</code></td>
</tr>
</tbody>
</table>
</div>
</div>
<!-- Free tier note -->
<div class="rounded-lg bg-green-50 p-3 text-xs text-green-700 dark:bg-green-900/20 dark:text-green-300">
{{ t('admin.backup.r2Guide.freeTier') }}
</div>
<div class="mt-4 text-right">
<button type="button" class="btn btn-primary btn-sm" @click="showR2Guide = false">{{ t('common.close') }}</button>
</div>
</div>
</div>
</transition>
</teleport>
</AppLayout>
</template>
<script setup lang="ts">
import { computed, onMounted, ref } from 'vue'
import { useI18n } from 'vue-i18n'
import AppLayout from '@/components/layout/AppLayout.vue'
import { adminAPI } from '@/api'
import { useAppStore } from '@/stores'
import type { BackupS3Config, BackupScheduleConfig, BackupRecord } from '@/api/admin/backup'
const { t } = useI18n()
const appStore = useAppStore()
// S3 config
const s3Form = ref<BackupS3Config>({
endpoint: '',
region: 'auto',
bucket: '',
access_key_id: '',
secret_access_key: '',
prefix: 'backups/',
force_path_style: false,
})
const s3SecretConfigured = ref(false)
const savingS3 = ref(false)
const testingS3 = ref(false)
// Schedule config
const scheduleForm = ref<BackupScheduleConfig>({
enabled: false,
cron_expr: '0 2 * * *',
retain_days: 14,
retain_count: 10,
})
const savingSchedule = ref(false)
// Backups
const backups = ref<BackupRecord[]>([])
const loadingBackups = ref(false)
const creatingBackup = ref(false)
const restoringId = ref('')
const manualExpireDays = ref(14)
// R2 guide
const showR2Guide = ref(false)
const r2ConfigRows = computed(() => [
{ field: t('admin.backup.s3.endpoint'), value: 'https://<account_id>.r2.cloudflarestorage.com' },
{ field: t('admin.backup.s3.region'), value: 'auto' },
{ field: t('admin.backup.s3.bucket'), value: t('admin.backup.r2Guide.step4.bucketValue') },
{ field: t('admin.backup.s3.prefix'), value: 'backups/' },
{ field: 'Access Key ID', value: t('admin.backup.r2Guide.step4.fromStep2') },
{ field: 'Secret Access Key', value: t('admin.backup.r2Guide.step4.fromStep2') },
{ field: t('admin.backup.s3.forcePathStyle'), value: t('admin.backup.r2Guide.step4.unchecked') },
])
async function loadS3Config() {
try {
const cfg = await adminAPI.backup.getS3Config()
s3Form.value = {
endpoint: cfg.endpoint || '',
region: cfg.region || 'auto',
bucket: cfg.bucket || '',
access_key_id: cfg.access_key_id || '',
secret_access_key: '',
prefix: cfg.prefix || 'backups/',
force_path_style: cfg.force_path_style,
}
s3SecretConfigured.value = Boolean(cfg.access_key_id)
} catch (error) {
appStore.showError((error as { message?: string })?.message || t('errors.networkError'))
}
}
async function saveS3Config() {
savingS3.value = true
try {
await adminAPI.backup.updateS3Config(s3Form.value)
appStore.showSuccess(t('admin.backup.s3.saved'))
await loadS3Config()
} catch (error) {
appStore.showError((error as { message?: string })?.message || t('errors.networkError'))
} finally {
savingS3.value = false
}
}
async function testS3() {
testingS3.value = true
try {
const result = await adminAPI.backup.testS3Connection(s3Form.value)
if (result.ok) {
appStore.showSuccess(result.message || t('admin.backup.s3.testSuccess'))
} else {
appStore.showError(result.message || t('admin.backup.s3.testFailed'))
}
} catch (error) {
appStore.showError((error as { message?: string })?.message || t('errors.networkError'))
} finally {
testingS3.value = false
}
}
async function loadSchedule() {
try {
const cfg = await adminAPI.backup.getSchedule()
scheduleForm.value = {
enabled: cfg.enabled,
cron_expr: cfg.cron_expr || '0 2 * * *',
retain_days: cfg.retain_days || 14,
retain_count: cfg.retain_count || 10,
}
} catch (error) {
appStore.showError((error as { message?: string })?.message || t('errors.networkError'))
}
}
async function saveSchedule() {
savingSchedule.value = true
try {
await adminAPI.backup.updateSchedule(scheduleForm.value)
appStore.showSuccess(t('admin.backup.schedule.saved'))
} catch (error) {
appStore.showError((error as { message?: string })?.message || t('errors.networkError'))
} finally {
savingSchedule.value = false
}
}
async function loadBackups() {
loadingBackups.value = true
try {
const result = await adminAPI.backup.listBackups()
backups.value = result.items || []
} catch (error) {
appStore.showError((error as { message?: string })?.message || t('errors.networkError'))
} finally {
loadingBackups.value = false
}
}
async function createBackup() {
creatingBackup.value = true
try {
await adminAPI.backup.createBackup({ expire_days: manualExpireDays.value })
appStore.showSuccess(t('admin.backup.operations.backupCreated'))
await loadBackups()
} catch (error) {
appStore.showError((error as { message?: string })?.message || t('errors.networkError'))
} finally {
creatingBackup.value = false
}
}
async function downloadBackup(id: string) {
try {
const result = await adminAPI.backup.getDownloadURL(id)
window.open(result.url, '_blank')
} catch (error) {
appStore.showError((error as { message?: string })?.message || t('errors.networkError'))
}
}
async function restoreBackup(id: string) {
if (!window.confirm(t('admin.backup.actions.restoreConfirm'))) return
restoringId.value = id
try {
await adminAPI.backup.restoreBackup(id)
appStore.showSuccess(t('admin.backup.actions.restoreSuccess'))
} catch (error) {
appStore.showError((error as { message?: string })?.message || t('errors.networkError'))
} finally {
restoringId.value = ''
}
}
async function removeBackup(id: string) {
if (!window.confirm(t('admin.backup.actions.deleteConfirm'))) return
try {
await adminAPI.backup.deleteBackup(id)
appStore.showSuccess(t('admin.backup.actions.deleted'))
await loadBackups()
} catch (error) {
appStore.showError((error as { message?: string })?.message || t('errors.networkError'))
}
}
function statusClass(status: string): string {
switch (status) {
case 'completed':
return 'bg-green-100 text-green-700 dark:bg-green-900/30 dark:text-green-300'
case 'running':
return 'bg-blue-100 text-blue-700 dark:bg-blue-900/30 dark:text-blue-300'
case 'failed':
return 'bg-red-100 text-red-700 dark:bg-red-900/30 dark:text-red-300'
default:
return 'bg-gray-100 text-gray-700 dark:bg-dark-800 dark:text-gray-300'
}
}
function formatSize(bytes: number): string {
if (!bytes || bytes <= 0) return '-'
if (bytes < 1024) return `${bytes} B`
if (bytes < 1024 * 1024) return `${(bytes / 1024).toFixed(1)} KB`
return `${(bytes / (1024 * 1024)).toFixed(1)} MB`
}
function formatDate(value?: string): string {
if (!value) return '-'
const date = new Date(value)
if (Number.isNaN(date.getTime())) return value
return date.toLocaleString()
}
onMounted(async () => {
await Promise.all([loadS3Config(), loadSchedule(), loadBackups()])
})
</script>
<style scoped>
.modal-enter-active,
.modal-leave-active {
transition: opacity 0.2s ease;
}
.modal-enter-from,
.modal-leave-to {
opacity: 0;
}
</style>