Compare commits

..

11 Commits

Author SHA1 Message Date
jhot cd575c0581
Use jhot to publish
continuous-integration/drone/push Build is passing Details
2023-11-01 13:43:45 -06:00
jhot 9141025fa4
Setup builder before push
continuous-integration/drone/push Build is failing Details
2023-11-01 13:37:53 -06:00
jhot 364522fd90
Specify docker registry URL
continuous-integration/drone/push Build is failing Details
2023-11-01 13:33:54 -06:00
jhot c1fb6d7440
Spell password correctly
continuous-integration/drone/push Build is failing Details
2023-11-01 13:30:49 -06:00
jhot 618157a235 Build with Drone.io (#5)
continuous-integration/drone/push Build is failing Details
Reviewed-on: #5
2023-11-01 19:27:58 +00:00
jhot 2e1267f20b Compression and Cleanup (#4)
/ build_and_test (push) Successful in 1m58s Details
Reviewed-on: #4
2023-09-20 16:30:46 +00:00
jhot f5808978a7
Fix image name
/ build_and_test (push) Successful in 1m56s Details
2023-09-19 11:19:49 -06:00
jhot 0546fdf474 Actions dependencies (#2)
/ build_and_test (push) Failing after 1m54s Details
Reviewed-on: #2
2023-09-19 17:15:23 +00:00
jhot 8016ff837e
csv
/ build_and_test (push) Failing after 10s Details
2023-09-19 11:01:24 -06:00
jhot 18fa1187cf Initial actions workflow (#1)
Reviewed-on: #1
2023-09-19 17:00:30 +00:00
jhot 2e90fe12a3
Logger 2023-09-19 05:48:08 -06:00
11 changed files with 364 additions and 15 deletions

65
.drone.yml Normal file
View File

@ -0,0 +1,65 @@
kind: pipeline
type: docker
name: ci
steps:
- name: build
image: code.jhot.me/drone-users/drone-builder-image:main
volumes:
- name: docker
path: /var/lib/docker
- name: dockersocket
path: /var/run/docker.sock
commands:
- task setup-buildx BUILDER_NAME=builder${DRONE_COMMIT_SHA}${DRONE_BUILD_NUMBER}
- tag=$${DRONE_TAG:-$DRONE_BRANCH} # set tag to tag name (if applicable) or branch name
- task build TAG=$tag # build the docker image
- name: publish tag
image: code.jhot.me/drone-users/drone-builder-image:main
depends_on:
- build
environment:
PASSWORD:
from_secret: FORGEJO_PASSWORD
volumes:
- name: docker
path: /var/lib/docker
- name: dockersocket
path: /var/run/docker.sock
commands:
- docker login code.jhot.me -u jhot -p $PASSWORD
- tag=$${DRONE_TAG:-$DRONE_BRANCH} # set tag to tag name (if applicable) or branch name
- task setup-buildx BUILDER_NAME=builder${DRONE_COMMIT_SHA}${DRONE_BUILD_NUMBER}
- task push TAG=$tag
when:
event:
- tag
- name: publish main
image: code.jhot.me/drone-users/drone-builder-image:main
depends_on:
- build
environment:
PASSWORD:
from_secret: FORGEJO_PASSWORD
volumes:
- name: docker
path: /var/lib/docker
- name: dockersocket
path: /var/run/docker.sock
commands:
- docker login code.jhot.me -u jhot -p $PASSWORD
- task setup-buildx BUILDER_NAME=builder${DRONE_COMMIT_SHA}${DRONE_BUILD_NUMBER}
- task push TAG=main
when:
branch:
- main
volumes:
- name: dockersocket
host:
path: /var/run/docker.sock
- name: docker
temp: {}
image_pull_secrets:
- dockerconfig

6
.env.example.txt Normal file
View File

@ -0,0 +1,6 @@
BUCKET_NAMES=bucket_one,bucket_two
S3_HOST=192.168.0.1:9000
S3_ACCESS_KEY_ID=backup
S3_ACCESS_KEY_SECRET=supersecret
S3_USE_SSL=false
BACKUP_KEEP_DAYS=5

2
.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
.env
.DS_Store

View File

@ -1,9 +1,28 @@
version: '3' version: '3'
vars:
NAME: code.jhot.me/jhot/docker-s3-backup
tasks: tasks:
build: build:
cmds: cmds:
- docker buildx build -t {{.NAME}}:{{.TAG}} . - docker buildx build --platform linux/amd64,linux/arm64 -t "{{.NAME}}:{{.TAG}}" .
vars: vars:
NAME: docker-s3-backup TAG: '{{.TAG | default "latest"}}'
TAG: latest push:
cmds:
- docker buildx build --push --platform linux/amd64,linux/arm64 -t "{{.NAME}}:{{.TAG}}" .
vars:
TAG: '{{.TAG | default "latest"}}'
tag:
prompt: This will push a git tag and docker image versioned {{.TAG}}... Do you want to continue?
cmds:
- git tag "{{.TAG}}"
- git push origin "{{.TAG}}"
vars:
TAG: '{{.TAG | default "latest"}}'
setup-buildx:
cmds:
- docker buildx create --name {{.BUILDER_NAME}} --use
vars:
BUILDER_NAME: '{{.BUILDER_NAME | default "mybuilder"}}'

11
docker-compose.yaml Normal file
View File

@ -0,0 +1,11 @@
version: '3.9'
services:
s3-backup:
build: .
image: docker-s3-backup:dev
command: backup
volumes:
- ./backup:/backup
env_file:
- ./.env

2
go.mod
View File

@ -1,6 +1,6 @@
module code.jhot.me/jhot/docker-s3-backup module code.jhot.me/jhot/docker-s3-backup
go 1.20 go 1.21
require ( require (
github.com/go-co-op/gocron v1.33.1 github.com/go-co-op/gocron v1.33.1

View File

@ -3,37 +3,110 @@ package internal
import ( import (
"context" "context"
"fmt" "fmt"
"os"
"path/filepath"
"slices"
"strings" "strings"
"time"
"github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials" "github.com/minio/minio-go/v7/pkg/credentials"
) )
var ( var (
MinioClient *minio.Client MinioClient *minio.Client
backupBuckets = []string{}
backupRoot string
s3Host string
s3Secure bool
s3Region string
s3User string
s3Pass string
backupsToKeep int64
) )
func init() {
for _, bucket := range strings.Split(GetEnvWithDefault("BUCKET_NAMES", ""), ",") {
backupBuckets = append(backupBuckets, strings.TrimSpace(bucket))
}
backupRoot = GetEnvWithDefault("BACKUP_ROOT", "/backup")
s3Host = GetEnvWithDefault("S3_HOST", "localhost:9000")
s3Secure = GetEnvWithDefault("S3_USE_SSL", "") == "true"
s3Region = GetEnvWithDefault("S3_REGION", "us-east-1")
s3User = GetEnvWithDefault("S3_ACCESS_KEY_ID", "")
s3Pass = GetEnvWithDefault("S3_ACCESS_KEY_SECRET", "")
backupsToKeep = ParseIntDefault(GetEnvWithDefault("BACKUP_KEEP_DAYS", "0"), 0)
}
func Backup(ctx context.Context) error { func Backup(ctx context.Context) error {
logger.Info("Starting backup")
var err error var err error
var getErrors = []string{} var getErrors = []string{}
bucketNames := GetEnvWithDefault("BUCKET_NAMES", "")
backupRoot := GetEnvWithDefault("BACKUP_ROOT", "/backup")
MinioClient, err = minio.New(GetEnvWithDefault("S3_HOST", "localhost"), &minio.Options{ MinioClient, err = minio.New(s3Host, &minio.Options{
Creds: credentials.NewStaticV4(GetEnvWithDefault("S3_ACCESS_KEY_ID", ""), GetEnvWithDefault("S3_ACCESS_KEY_SECRET", ""), ""), Creds: credentials.NewStaticV4(s3User, s3Pass, ""),
Secure: GetEnvWithDefault("S3_USE_SSL", "") == "true", Secure: s3Secure,
Region: GetEnvWithDefault("S3_REGION", "us-east-1"), Region: s3Region,
}) })
if err != nil { if err != nil {
logger.Error("Error connecting to S3", "error", err)
return err return err
} }
logger.Debug("Connected to S3", "host", s3Host)
for _, bucket := range strings.Split(bucketNames, ",") { for _, bucket := range backupBuckets {
bucket = strings.TrimSpace(bucket) bucketLogger := logger.With("bucket", bucket)
bucketLogger.Debug("Downloading bucket objects")
downloadRoot, err := os.MkdirTemp("", "*")
if err != nil {
bucketLogger.Error("Could not create temporary download directory", "error", err)
return err
}
defer os.RemoveAll(downloadRoot)
objectCount := 0
for object := range MinioClient.ListObjects(ctx, bucket, minio.ListObjectsOptions{Recursive: true}) { for object := range MinioClient.ListObjects(ctx, bucket, minio.ListObjectsOptions{Recursive: true}) {
getError := MinioClient.FGetObject(ctx, bucket, object.Key, fmt.Sprintf("%s/%s/%s", backupRoot, bucket, object.Key), minio.GetObjectOptions{}) getError := MinioClient.FGetObject(ctx, bucket, object.Key, fmt.Sprintf("%s/%s/%s", downloadRoot, bucket, object.Key), minio.GetObjectOptions{})
if getError != nil { if getError != nil {
bucketLogger.Error("Error getting object", "error", getError)
getErrors = append(getErrors, getError.Error()) getErrors = append(getErrors, getError.Error())
continue
}
objectCount += 1
}
bucketLogger.Debug("Compressing objects", "count", objectCount)
now := time.Now().Format("2006-01-02")
tarFile := fmt.Sprintf("%s/%s-%s.tar", downloadRoot, now, bucket)
err = Tar(fmt.Sprintf("%s/%s", downloadRoot, bucket), tarFile)
if err != nil {
bucketLogger.Error("Error archiving directory", "error", err)
return err
}
gzFile := fmt.Sprintf("%s/%s-%s.tar.gz", backupRoot, now, bucket)
err = Gzip(tarFile, gzFile)
if err != nil {
bucketLogger.Error("Error compressing archive", "error", err)
return err
}
if backupsToKeep > 0 {
bucketLogger.Debug("Cleaning up old backups")
matches, _ := filepath.Glob(fmt.Sprintf("%s/*-%s.tar.gz", backupRoot, bucket))
if len(matches) > int(backupsToKeep) {
slices.Sort(matches)
backupsToDelete := len(matches) - int(backupsToKeep)
for i, match := range matches {
if i >= backupsToDelete {
break
}
bucketLogger.Debug("Deleting old backup", "filename", match)
os.Remove(match)
}
} }
} }
} }

148
internal/compression.go Normal file
View File

@ -0,0 +1,148 @@
package internal
import (
"archive/tar"
"compress/gzip"
"io"
"os"
"path/filepath"
"strings"
)
func Tar(source, target string) error {
// filename := filepath.Base(source)
// target = filepath.Join(target, fmt.Sprintf("%s.tar", filename))
tarfile, err := os.Create(target)
if err != nil {
return err
}
defer tarfile.Close()
tarball := tar.NewWriter(tarfile)
defer tarball.Close()
info, err := os.Stat(source)
if err != nil {
return nil
}
var baseDir string
if info.IsDir() {
baseDir = filepath.Base(source)
}
return filepath.Walk(source,
func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
header, err := tar.FileInfoHeader(info, info.Name())
if err != nil {
return err
}
if baseDir != "" {
header.Name = filepath.Join(baseDir, strings.TrimPrefix(path, source))
}
if err := tarball.WriteHeader(header); err != nil {
return err
}
if info.IsDir() {
return nil
}
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
_, err = io.Copy(tarball, file)
return err
})
}
func Untar(tarball, target string) error {
reader, err := os.Open(tarball)
if err != nil {
return err
}
defer reader.Close()
tarReader := tar.NewReader(reader)
for {
header, err := tarReader.Next()
if err == io.EOF {
break
} else if err != nil {
return err
}
path := filepath.Join(target, header.Name)
info := header.FileInfo()
if info.IsDir() {
if err = os.MkdirAll(path, info.Mode()); err != nil {
return err
}
continue
}
file, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, info.Mode())
if err != nil {
return err
}
defer file.Close()
_, err = io.Copy(file, tarReader)
if err != nil {
return err
}
}
return nil
}
func Gzip(source, target string) error {
reader, err := os.Open(source)
if err != nil {
return err
}
filename := filepath.Base(source)
// target = filepath.Join(target, fmt.Sprintf("%s.gz", filename))
writer, err := os.Create(target)
if err != nil {
return err
}
defer writer.Close()
archiver := gzip.NewWriter(writer)
archiver.Name = filename
defer archiver.Close()
_, err = io.Copy(archiver, reader)
return err
}
func Gunzip(source, target string) error {
reader, err := os.Open(source)
if err != nil {
return err
}
defer reader.Close()
archive, err := gzip.NewReader(reader)
if err != nil {
return err
}
defer archive.Close()
target = filepath.Join(target, archive.Name)
writer, err := os.Create(target)
if err != nil {
return err
}
defer writer.Close()
_, err = io.Copy(writer, archive)
return err
}

14
internal/logger.go Normal file
View File

@ -0,0 +1,14 @@
package internal
import (
"log/slog"
"os"
)
var (
logger *slog.Logger
)
func init() {
logger = slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug}))
}

View File

@ -12,12 +12,14 @@ var (
) )
func StartSchedule(ctx context.Context) error { func StartSchedule(ctx context.Context) error {
logger.Info("Starting schedule")
cronExpression := GetEnvWithDefault("SCHEDULE_CRON", "0 0 * * *") cronExpression := GetEnvWithDefault("SCHEDULE_CRON", "0 0 * * *")
scheduler = gocron.NewScheduler(time.Local) scheduler = gocron.NewScheduler(time.Local)
_, err := scheduler.Cron(cronExpression).Do(Backup, ctx) job, err := scheduler.Cron(cronExpression).Do(Backup, ctx)
if err != nil { if err != nil {
return err return err
} }
logger.Debug("Next scheduled backup", "time", job.NextRun())
scheduler.StartBlocking() scheduler.StartBlocking()
return nil return nil
} }

View File

@ -2,6 +2,7 @@ package internal
import ( import (
"os" "os"
"strconv"
) )
func GetEnvWithDefault(name string, defaultValue string) string { func GetEnvWithDefault(name string, defaultValue string) string {
@ -11,3 +12,11 @@ func GetEnvWithDefault(name string, defaultValue string) string {
} }
return defaultValue return defaultValue
} }
func ParseIntDefault(value string, defaultValue int64) int64 {
ret, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return defaultValue
}
return ret
}