Compare commits

..

No commits in common. "main" and "v0.1.0-beta.3" have entirely different histories.

11 changed files with 15 additions and 364 deletions

View File

@ -1,65 +0,0 @@
kind: pipeline
type: docker
name: ci
steps:
- name: build
image: code.jhot.me/drone-users/drone-builder-image:main
volumes:
- name: docker
path: /var/lib/docker
- name: dockersocket
path: /var/run/docker.sock
commands:
- task setup-buildx BUILDER_NAME=builder${DRONE_COMMIT_SHA}${DRONE_BUILD_NUMBER}
- tag=$${DRONE_TAG:-$DRONE_BRANCH} # set tag to tag name (if applicable) or branch name
- task build TAG=$tag # build the docker image
- name: publish tag
image: code.jhot.me/drone-users/drone-builder-image:main
depends_on:
- build
environment:
PASSWORD:
from_secret: FORGEJO_PASSWORD
volumes:
- name: docker
path: /var/lib/docker
- name: dockersocket
path: /var/run/docker.sock
commands:
- docker login code.jhot.me -u jhot -p $PASSWORD
- tag=$${DRONE_TAG:-$DRONE_BRANCH} # set tag to tag name (if applicable) or branch name
- task setup-buildx BUILDER_NAME=builder${DRONE_COMMIT_SHA}${DRONE_BUILD_NUMBER}
- task push TAG=$tag
when:
event:
- tag
- name: publish main
image: code.jhot.me/drone-users/drone-builder-image:main
depends_on:
- build
environment:
PASSWORD:
from_secret: FORGEJO_PASSWORD
volumes:
- name: docker
path: /var/lib/docker
- name: dockersocket
path: /var/run/docker.sock
commands:
- docker login code.jhot.me -u jhot -p $PASSWORD
- task setup-buildx BUILDER_NAME=builder${DRONE_COMMIT_SHA}${DRONE_BUILD_NUMBER}
- task push TAG=main
when:
branch:
- main
volumes:
- name: dockersocket
host:
path: /var/run/docker.sock
- name: docker
temp: {}
image_pull_secrets:
- dockerconfig

View File

@ -1,6 +0,0 @@
BUCKET_NAMES=bucket_one,bucket_two
S3_HOST=192.168.0.1:9000
S3_ACCESS_KEY_ID=backup
S3_ACCESS_KEY_SECRET=supersecret
S3_USE_SSL=false
BACKUP_KEEP_DAYS=5

2
.gitignore vendored
View File

@ -1,2 +0,0 @@
.env
.DS_Store

View File

@ -1,28 +1,9 @@
version: '3'
vars:
NAME: code.jhot.me/jhot/docker-s3-backup
tasks:
build:
cmds:
- docker buildx build --platform linux/amd64,linux/arm64 -t "{{.NAME}}:{{.TAG}}" .
- docker buildx build -t {{.NAME}}:{{.TAG}} .
vars:
TAG: '{{.TAG | default "latest"}}'
push:
cmds:
- docker buildx build --push --platform linux/amd64,linux/arm64 -t "{{.NAME}}:{{.TAG}}" .
vars:
TAG: '{{.TAG | default "latest"}}'
tag:
prompt: This will push a git tag and docker image versioned {{.TAG}}... Do you want to continue?
cmds:
- git tag "{{.TAG}}"
- git push origin "{{.TAG}}"
vars:
TAG: '{{.TAG | default "latest"}}'
setup-buildx:
cmds:
- docker buildx create --name {{.BUILDER_NAME}} --use
vars:
BUILDER_NAME: '{{.BUILDER_NAME | default "mybuilder"}}'
NAME: docker-s3-backup
TAG: latest

View File

@ -1,11 +0,0 @@
version: '3.9'
services:
s3-backup:
build: .
image: docker-s3-backup:dev
command: backup
volumes:
- ./backup:/backup
env_file:
- ./.env

2
go.mod
View File

@ -1,6 +1,6 @@
module code.jhot.me/jhot/docker-s3-backup
go 1.21
go 1.20
require (
github.com/go-co-op/gocron v1.33.1

View File

@ -3,11 +3,7 @@ package internal
import (
"context"
"fmt"
"os"
"path/filepath"
"slices"
"strings"
"time"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
@ -15,98 +11,29 @@ import (
var (
MinioClient *minio.Client
backupBuckets = []string{}
backupRoot string
s3Host string
s3Secure bool
s3Region string
s3User string
s3Pass string
backupsToKeep int64
)
func init() {
for _, bucket := range strings.Split(GetEnvWithDefault("BUCKET_NAMES", ""), ",") {
backupBuckets = append(backupBuckets, strings.TrimSpace(bucket))
}
backupRoot = GetEnvWithDefault("BACKUP_ROOT", "/backup")
s3Host = GetEnvWithDefault("S3_HOST", "localhost:9000")
s3Secure = GetEnvWithDefault("S3_USE_SSL", "") == "true"
s3Region = GetEnvWithDefault("S3_REGION", "us-east-1")
s3User = GetEnvWithDefault("S3_ACCESS_KEY_ID", "")
s3Pass = GetEnvWithDefault("S3_ACCESS_KEY_SECRET", "")
backupsToKeep = ParseIntDefault(GetEnvWithDefault("BACKUP_KEEP_DAYS", "0"), 0)
}
func Backup(ctx context.Context) error {
logger.Info("Starting backup")
var err error
var getErrors = []string{}
bucketNames := GetEnvWithDefault("BUCKET_NAMES", "")
backupRoot := GetEnvWithDefault("BACKUP_ROOT", "/backup")
MinioClient, err = minio.New(s3Host, &minio.Options{
Creds: credentials.NewStaticV4(s3User, s3Pass, ""),
Secure: s3Secure,
Region: s3Region,
MinioClient, err = minio.New(GetEnvWithDefault("S3_HOST", "localhost"), &minio.Options{
Creds: credentials.NewStaticV4(GetEnvWithDefault("S3_ACCESS_KEY_ID", ""), GetEnvWithDefault("S3_ACCESS_KEY_SECRET", ""), ""),
Secure: GetEnvWithDefault("S3_USE_SSL", "") == "true",
Region: GetEnvWithDefault("S3_REGION", "us-east-1"),
})
if err != nil {
logger.Error("Error connecting to S3", "error", err)
return err
}
logger.Debug("Connected to S3", "host", s3Host)
for _, bucket := range backupBuckets {
bucketLogger := logger.With("bucket", bucket)
bucketLogger.Debug("Downloading bucket objects")
downloadRoot, err := os.MkdirTemp("", "*")
if err != nil {
bucketLogger.Error("Could not create temporary download directory", "error", err)
return err
}
defer os.RemoveAll(downloadRoot)
objectCount := 0
for _, bucket := range strings.Split(bucketNames, ",") {
bucket = strings.TrimSpace(bucket)
for object := range MinioClient.ListObjects(ctx, bucket, minio.ListObjectsOptions{Recursive: true}) {
getError := MinioClient.FGetObject(ctx, bucket, object.Key, fmt.Sprintf("%s/%s/%s", downloadRoot, bucket, object.Key), minio.GetObjectOptions{})
getError := MinioClient.FGetObject(ctx, bucket, object.Key, fmt.Sprintf("%s/%s/%s", backupRoot, bucket, object.Key), minio.GetObjectOptions{})
if getError != nil {
bucketLogger.Error("Error getting object", "error", getError)
getErrors = append(getErrors, getError.Error())
continue
}
objectCount += 1
}
bucketLogger.Debug("Compressing objects", "count", objectCount)
now := time.Now().Format("2006-01-02")
tarFile := fmt.Sprintf("%s/%s-%s.tar", downloadRoot, now, bucket)
err = Tar(fmt.Sprintf("%s/%s", downloadRoot, bucket), tarFile)
if err != nil {
bucketLogger.Error("Error archiving directory", "error", err)
return err
}
gzFile := fmt.Sprintf("%s/%s-%s.tar.gz", backupRoot, now, bucket)
err = Gzip(tarFile, gzFile)
if err != nil {
bucketLogger.Error("Error compressing archive", "error", err)
return err
}
if backupsToKeep > 0 {
bucketLogger.Debug("Cleaning up old backups")
matches, _ := filepath.Glob(fmt.Sprintf("%s/*-%s.tar.gz", backupRoot, bucket))
if len(matches) > int(backupsToKeep) {
slices.Sort(matches)
backupsToDelete := len(matches) - int(backupsToKeep)
for i, match := range matches {
if i >= backupsToDelete {
break
}
bucketLogger.Debug("Deleting old backup", "filename", match)
os.Remove(match)
}
}
}
}

View File

@ -1,148 +0,0 @@
package internal
import (
"archive/tar"
"compress/gzip"
"io"
"os"
"path/filepath"
"strings"
)
func Tar(source, target string) error {
// filename := filepath.Base(source)
// target = filepath.Join(target, fmt.Sprintf("%s.tar", filename))
tarfile, err := os.Create(target)
if err != nil {
return err
}
defer tarfile.Close()
tarball := tar.NewWriter(tarfile)
defer tarball.Close()
info, err := os.Stat(source)
if err != nil {
return nil
}
var baseDir string
if info.IsDir() {
baseDir = filepath.Base(source)
}
return filepath.Walk(source,
func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
header, err := tar.FileInfoHeader(info, info.Name())
if err != nil {
return err
}
if baseDir != "" {
header.Name = filepath.Join(baseDir, strings.TrimPrefix(path, source))
}
if err := tarball.WriteHeader(header); err != nil {
return err
}
if info.IsDir() {
return nil
}
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
_, err = io.Copy(tarball, file)
return err
})
}
func Untar(tarball, target string) error {
reader, err := os.Open(tarball)
if err != nil {
return err
}
defer reader.Close()
tarReader := tar.NewReader(reader)
for {
header, err := tarReader.Next()
if err == io.EOF {
break
} else if err != nil {
return err
}
path := filepath.Join(target, header.Name)
info := header.FileInfo()
if info.IsDir() {
if err = os.MkdirAll(path, info.Mode()); err != nil {
return err
}
continue
}
file, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, info.Mode())
if err != nil {
return err
}
defer file.Close()
_, err = io.Copy(file, tarReader)
if err != nil {
return err
}
}
return nil
}
func Gzip(source, target string) error {
reader, err := os.Open(source)
if err != nil {
return err
}
filename := filepath.Base(source)
// target = filepath.Join(target, fmt.Sprintf("%s.gz", filename))
writer, err := os.Create(target)
if err != nil {
return err
}
defer writer.Close()
archiver := gzip.NewWriter(writer)
archiver.Name = filename
defer archiver.Close()
_, err = io.Copy(archiver, reader)
return err
}
func Gunzip(source, target string) error {
reader, err := os.Open(source)
if err != nil {
return err
}
defer reader.Close()
archive, err := gzip.NewReader(reader)
if err != nil {
return err
}
defer archive.Close()
target = filepath.Join(target, archive.Name)
writer, err := os.Create(target)
if err != nil {
return err
}
defer writer.Close()
_, err = io.Copy(writer, archive)
return err
}

View File

@ -2,7 +2,6 @@ package internal
import (
"os"
"strconv"
)
func GetEnvWithDefault(name string, defaultValue string) string {
@ -12,11 +11,3 @@ func GetEnvWithDefault(name string, defaultValue string) string {
}
return defaultValue
}
func ParseIntDefault(value string, defaultValue int64) int64 {
ret, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return defaultValue
}
return ret
}

View File

@ -1,14 +0,0 @@
package internal
import (
"log/slog"
"os"
)
var (
logger *slog.Logger
)
func init() {
logger = slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug}))
}

View File

@ -12,14 +12,12 @@ var (
)
func StartSchedule(ctx context.Context) error {
logger.Info("Starting schedule")
cronExpression := GetEnvWithDefault("SCHEDULE_CRON", "0 0 * * *")
scheduler = gocron.NewScheduler(time.Local)
job, err := scheduler.Cron(cronExpression).Do(Backup, ctx)
_, err := scheduler.Cron(cronExpression).Do(Backup, ctx)
if err != nil {
return err
}
logger.Debug("Next scheduled backup", "time", job.NextRun())
scheduler.StartBlocking()
return nil
}