package internal import ( "context" "fmt" "os" "path/filepath" "slices" "strings" "time" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" ) var ( MinioClient *minio.Client backupBuckets = []string{} backupRoot string s3Host string s3Secure bool s3Region string s3User string s3Pass string backupsToKeep int64 ) func init() { for _, bucket := range strings.Split(GetEnvWithDefault("BUCKET_NAMES", ""), ",") { backupBuckets = append(backupBuckets, strings.TrimSpace(bucket)) } backupRoot = GetEnvWithDefault("BACKUP_ROOT", "/backup") s3Host = GetEnvWithDefault("S3_HOST", "localhost:9000") s3Secure = GetEnvWithDefault("S3_USE_SSL", "") == "true" s3Region = GetEnvWithDefault("S3_REGION", "us-east-1") s3User = GetEnvWithDefault("S3_ACCESS_KEY_ID", "") s3Pass = GetEnvWithDefault("S3_ACCESS_KEY_SECRET", "") backupsToKeep = ParseIntDefault(GetEnvWithDefault("BACKUP_KEEP_DAYS", "0"), 0) } func Backup(ctx context.Context) error { logger.Info("Starting backup") var err error var getErrors = []string{} MinioClient, err = minio.New(s3Host, &minio.Options{ Creds: credentials.NewStaticV4(s3User, s3Pass, ""), Secure: s3Secure, Region: s3Region, }) if err != nil { logger.Error("Error connecting to S3", "error", err) return err } logger.Debug("Connected to S3", "host", s3Host) for _, bucket := range backupBuckets { bucketLogger := logger.With("bucket", bucket) bucketLogger.Debug("Downloading bucket objects") downloadRoot, err := os.MkdirTemp("", "*") if err != nil { bucketLogger.Error("Could not create temporary download directory", "error", err) return err } defer os.RemoveAll(downloadRoot) objectCount := 0 for object := range MinioClient.ListObjects(ctx, bucket, minio.ListObjectsOptions{Recursive: true}) { getError := MinioClient.FGetObject(ctx, bucket, object.Key, fmt.Sprintf("%s/%s/%s", downloadRoot, bucket, object.Key), minio.GetObjectOptions{}) if getError != nil { bucketLogger.Error("Error getting object", "error", getError) getErrors = append(getErrors, getError.Error()) continue } objectCount += 1 } bucketLogger.Debug("Compressing objects", "count", objectCount) now := time.Now().Format("2006-01-02") tarFile := fmt.Sprintf("%s/%s-%s.tar", downloadRoot, now, bucket) err = Tar(fmt.Sprintf("%s/%s", downloadRoot, bucket), tarFile) if err != nil { bucketLogger.Error("Error archiving directory", "error", err) return err } gzFile := fmt.Sprintf("%s/%s-%s.tar.gz", backupRoot, now, bucket) err = Gzip(tarFile, gzFile) if err != nil { bucketLogger.Error("Error compressing archive", "error", err) return err } if backupsToKeep > 0 { bucketLogger.Debug("Cleaning up old backups") matches, _ := filepath.Glob(fmt.Sprintf("%s/*-%s.tar.gz", backupRoot, bucket)) if len(matches) > int(backupsToKeep) { slices.Sort(matches) backupsToDelete := len(matches) - int(backupsToKeep) for i, match := range matches { if i >= backupsToDelete { break } bucketLogger.Debug("Deleting old backup", "filename", match) os.Remove(match) } } } } if len(getErrors) > 0 { err = fmt.Errorf("%d errors retrieving objects: %s", len(getErrors), strings.Join(getErrors, ", ")) } return err }