docker-s3-backup/internal/backup.go

120 lines
3.3 KiB
Go
Raw Permalink Normal View History

2023-09-14 22:15:37 +00:00
package internal
import (
"context"
"fmt"
2023-09-20 16:22:41 +00:00
"os"
"path/filepath"
"slices"
2023-09-14 22:15:37 +00:00
"strings"
2023-09-20 16:22:41 +00:00
"time"
2023-09-14 22:15:37 +00:00
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
)
var (
2023-09-19 11:48:08 +00:00
MinioClient *minio.Client
backupBuckets = []string{}
backupRoot string
s3Host string
s3Secure bool
s3Region string
s3User string
s3Pass string
2023-09-20 16:22:41 +00:00
backupsToKeep int64
2023-09-14 22:15:37 +00:00
)
2023-09-19 11:48:08 +00:00
func init() {
for _, bucket := range strings.Split(GetEnvWithDefault("BUCKET_NAMES", ""), ",") {
backupBuckets = append(backupBuckets, strings.TrimSpace(bucket))
}
backupRoot = GetEnvWithDefault("BACKUP_ROOT", "/backup")
s3Host = GetEnvWithDefault("S3_HOST", "localhost:9000")
s3Secure = GetEnvWithDefault("S3_USE_SSL", "") == "true"
s3Region = GetEnvWithDefault("S3_REGION", "us-east-1")
s3User = GetEnvWithDefault("S3_ACCESS_KEY_ID", "")
s3Pass = GetEnvWithDefault("S3_ACCESS_KEY_SECRET", "")
2023-09-20 16:22:41 +00:00
backupsToKeep = ParseIntDefault(GetEnvWithDefault("BACKUP_KEEP_DAYS", "0"), 0)
2023-09-19 11:48:08 +00:00
}
2023-09-14 22:15:37 +00:00
func Backup(ctx context.Context) error {
2023-09-19 11:48:08 +00:00
logger.Info("Starting backup")
2023-09-14 22:15:37 +00:00
var err error
var getErrors = []string{}
2023-09-19 11:48:08 +00:00
MinioClient, err = minio.New(s3Host, &minio.Options{
Creds: credentials.NewStaticV4(s3User, s3Pass, ""),
Secure: s3Secure,
Region: s3Region,
2023-09-14 22:15:37 +00:00
})
if err != nil {
2023-09-19 11:48:08 +00:00
logger.Error("Error connecting to S3", "error", err)
2023-09-14 22:15:37 +00:00
return err
}
2023-09-19 11:48:08 +00:00
logger.Debug("Connected to S3", "host", s3Host)
2023-09-14 22:15:37 +00:00
2023-09-19 11:48:08 +00:00
for _, bucket := range backupBuckets {
2023-09-20 16:22:41 +00:00
bucketLogger := logger.With("bucket", bucket)
bucketLogger.Debug("Downloading bucket objects")
downloadRoot, err := os.MkdirTemp("", "*")
if err != nil {
bucketLogger.Error("Could not create temporary download directory", "error", err)
return err
}
defer os.RemoveAll(downloadRoot)
objectCount := 0
2023-09-14 22:15:37 +00:00
for object := range MinioClient.ListObjects(ctx, bucket, minio.ListObjectsOptions{Recursive: true}) {
2023-09-20 16:22:41 +00:00
getError := MinioClient.FGetObject(ctx, bucket, object.Key, fmt.Sprintf("%s/%s/%s", downloadRoot, bucket, object.Key), minio.GetObjectOptions{})
2023-09-14 22:15:37 +00:00
if getError != nil {
2023-09-20 16:22:41 +00:00
bucketLogger.Error("Error getting object", "error", getError)
2023-09-14 22:15:37 +00:00
getErrors = append(getErrors, getError.Error())
2023-09-20 16:22:41 +00:00
continue
}
objectCount += 1
}
bucketLogger.Debug("Compressing objects", "count", objectCount)
now := time.Now().Format("2006-01-02")
tarFile := fmt.Sprintf("%s/%s-%s.tar", downloadRoot, now, bucket)
err = Tar(fmt.Sprintf("%s/%s", downloadRoot, bucket), tarFile)
if err != nil {
bucketLogger.Error("Error archiving directory", "error", err)
return err
}
gzFile := fmt.Sprintf("%s/%s-%s.tar.gz", backupRoot, now, bucket)
err = Gzip(tarFile, gzFile)
if err != nil {
bucketLogger.Error("Error compressing archive", "error", err)
return err
}
if backupsToKeep > 0 {
bucketLogger.Debug("Cleaning up old backups")
matches, _ := filepath.Glob(fmt.Sprintf("%s/*-%s.tar.gz", backupRoot, bucket))
if len(matches) > int(backupsToKeep) {
slices.Sort(matches)
backupsToDelete := len(matches) - int(backupsToKeep)
for i, match := range matches {
if i >= backupsToDelete {
break
}
bucketLogger.Debug("Deleting old backup", "filename", match)
os.Remove(match)
}
2023-09-14 22:15:37 +00:00
}
}
}
if len(getErrors) > 0 {
err = fmt.Errorf("%d errors retrieving objects: %s", len(getErrors), strings.Join(getErrors, ", "))
}
return err
}