Compression, cleanup
parent
cebb453536
commit
00afbade89
|
@ -0,0 +1,6 @@
|
|||
BUCKET_NAMES=bucket_one,bucket_two
|
||||
S3_HOST=192.168.0.1:9000
|
||||
S3_ACCESS_KEY_ID=backup
|
||||
S3_ACCESS_KEY_SECRET=supersecret
|
||||
S3_USE_SSL=false
|
||||
BACKUP_KEEP_DAYS=5
|
|
@ -0,0 +1,2 @@
|
|||
.env
|
||||
.DS_Store
|
|
@ -16,11 +16,5 @@ tasks:
|
|||
tag:
|
||||
prompt: This will push a git tag and docker image versioned {{.TAG}}... Do you want to continue?
|
||||
cmds:
|
||||
- task: build
|
||||
vars:
|
||||
NAME: "{{.NAME}}"
|
||||
- task: push
|
||||
vars:
|
||||
NAME: "{{.NAME}}"
|
||||
- git tag "{{.TAG}}"
|
||||
- git push origin "{{.TAG}}"
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
s3-backup:
|
||||
build: .
|
||||
image: docker-s3-backup:dev
|
||||
command: backup
|
||||
volumes:
|
||||
- ./backup:/backup
|
||||
env_file:
|
||||
- ./.env
|
|
@ -3,7 +3,11 @@ package internal
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
|
@ -18,6 +22,7 @@ var (
|
|||
s3Region string
|
||||
s3User string
|
||||
s3Pass string
|
||||
backupsToKeep int64
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -30,6 +35,7 @@ func init() {
|
|||
s3Region = GetEnvWithDefault("S3_REGION", "us-east-1")
|
||||
s3User = GetEnvWithDefault("S3_ACCESS_KEY_ID", "")
|
||||
s3Pass = GetEnvWithDefault("S3_ACCESS_KEY_SECRET", "")
|
||||
backupsToKeep = ParseIntDefault(GetEnvWithDefault("BACKUP_KEEP_DAYS", "0"), 0)
|
||||
}
|
||||
|
||||
func Backup(ctx context.Context) error {
|
||||
|
@ -49,12 +55,58 @@ func Backup(ctx context.Context) error {
|
|||
logger.Debug("Connected to S3", "host", s3Host)
|
||||
|
||||
for _, bucket := range backupBuckets {
|
||||
logger.Debug("Backing up bucket", "bucket", bucket)
|
||||
bucketLogger := logger.With("bucket", bucket)
|
||||
bucketLogger.Debug("Downloading bucket objects")
|
||||
|
||||
downloadRoot, err := os.MkdirTemp("", "*")
|
||||
if err != nil {
|
||||
bucketLogger.Error("Could not create temporary download directory", "error", err)
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(downloadRoot)
|
||||
|
||||
objectCount := 0
|
||||
for object := range MinioClient.ListObjects(ctx, bucket, minio.ListObjectsOptions{Recursive: true}) {
|
||||
getError := MinioClient.FGetObject(ctx, bucket, object.Key, fmt.Sprintf("%s/%s/%s", backupRoot, bucket, object.Key), minio.GetObjectOptions{})
|
||||
getError := MinioClient.FGetObject(ctx, bucket, object.Key, fmt.Sprintf("%s/%s/%s", downloadRoot, bucket, object.Key), minio.GetObjectOptions{})
|
||||
if getError != nil {
|
||||
logger.Error("Error getting object", "bucket", bucket, "error", getError)
|
||||
bucketLogger.Error("Error getting object", "error", getError)
|
||||
getErrors = append(getErrors, getError.Error())
|
||||
continue
|
||||
}
|
||||
objectCount += 1
|
||||
}
|
||||
|
||||
bucketLogger.Debug("Compressing objects", "count", objectCount)
|
||||
now := time.Now().Format("2006-01-02")
|
||||
tarFile := fmt.Sprintf("%s/%s-%s.tar", downloadRoot, now, bucket)
|
||||
err = Tar(fmt.Sprintf("%s/%s", downloadRoot, bucket), tarFile)
|
||||
if err != nil {
|
||||
bucketLogger.Error("Error archiving directory", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
gzFile := fmt.Sprintf("%s/%s-%s.tar.gz", backupRoot, now, bucket)
|
||||
err = Gzip(tarFile, gzFile)
|
||||
if err != nil {
|
||||
bucketLogger.Error("Error compressing archive", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if backupsToKeep > 0 {
|
||||
bucketLogger.Debug("Cleaning up old backups")
|
||||
matches, _ := filepath.Glob(fmt.Sprintf("%s/*-%s.tar.gz", backupRoot, bucket))
|
||||
|
||||
if len(matches) > int(backupsToKeep) {
|
||||
slices.Sort(matches)
|
||||
backupsToDelete := len(matches) - int(backupsToKeep)
|
||||
|
||||
for i, match := range matches {
|
||||
if i >= backupsToDelete {
|
||||
break
|
||||
}
|
||||
bucketLogger.Debug("Deleting old backup", "filename", match)
|
||||
os.Remove(match)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,148 @@
|
|||
package internal
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func Tar(source, target string) error {
|
||||
// filename := filepath.Base(source)
|
||||
// target = filepath.Join(target, fmt.Sprintf("%s.tar", filename))
|
||||
tarfile, err := os.Create(target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tarfile.Close()
|
||||
|
||||
tarball := tar.NewWriter(tarfile)
|
||||
defer tarball.Close()
|
||||
|
||||
info, err := os.Stat(source)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var baseDir string
|
||||
if info.IsDir() {
|
||||
baseDir = filepath.Base(source)
|
||||
}
|
||||
|
||||
return filepath.Walk(source,
|
||||
func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
header, err := tar.FileInfoHeader(info, info.Name())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if baseDir != "" {
|
||||
header.Name = filepath.Join(baseDir, strings.TrimPrefix(path, source))
|
||||
}
|
||||
|
||||
if err := tarball.WriteHeader(header); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
_, err = io.Copy(tarball, file)
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func Untar(tarball, target string) error {
|
||||
reader, err := os.Open(tarball)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer reader.Close()
|
||||
tarReader := tar.NewReader(reader)
|
||||
|
||||
for {
|
||||
header, err := tarReader.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path := filepath.Join(target, header.Name)
|
||||
info := header.FileInfo()
|
||||
if info.IsDir() {
|
||||
if err = os.MkdirAll(path, info.Mode()); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
file, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, info.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
_, err = io.Copy(file, tarReader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Gzip(source, target string) error {
|
||||
reader, err := os.Open(source)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
filename := filepath.Base(source)
|
||||
// target = filepath.Join(target, fmt.Sprintf("%s.gz", filename))
|
||||
writer, err := os.Create(target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer writer.Close()
|
||||
|
||||
archiver := gzip.NewWriter(writer)
|
||||
archiver.Name = filename
|
||||
defer archiver.Close()
|
||||
|
||||
_, err = io.Copy(archiver, reader)
|
||||
return err
|
||||
}
|
||||
|
||||
func Gunzip(source, target string) error {
|
||||
reader, err := os.Open(source)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
archive, err := gzip.NewReader(reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer archive.Close()
|
||||
|
||||
target = filepath.Join(target, archive.Name)
|
||||
writer, err := os.Create(target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer writer.Close()
|
||||
|
||||
_, err = io.Copy(writer, archive)
|
||||
return err
|
||||
}
|
|
@ -2,6 +2,7 @@ package internal
|
|||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func GetEnvWithDefault(name string, defaultValue string) string {
|
||||
|
@ -11,3 +12,11 @@ func GetEnvWithDefault(name string, defaultValue string) string {
|
|||
}
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
func ParseIntDefault(value string, defaultValue int64) int64 {
|
||||
ret, err := strconv.ParseInt(value, 10, 64)
|
||||
if err != nil {
|
||||
return defaultValue
|
||||
}
|
||||
return ret
|
||||
}
|
Loading…
Reference in New Issue