This commit is contained in:
2026-02-14 19:09:05 +01:00
parent 05c916e9a9
commit 5892ac2a2e
13 changed files with 394 additions and 721 deletions

View File

@@ -13,7 +13,8 @@ type Config struct {
S3SecretKey string
S3BucketName string
S3UseSSL bool
S3Enabled bool // Enable/disable S3 backend
S3Enabled bool // Enable/disable S3 backend
S3Region string // AWS region
BaseDataset string
DatabasePath string // Path to SQLite database
Port string
@@ -40,6 +41,7 @@ func LoadConfig() *Config {
S3BucketName: getEnv("S3_BUCKET", "zfs-snapshots"),
S3UseSSL: getEnv("S3_USE_SSL", "true") != "false",
S3Enabled: s3Enabled,
S3Region: getEnv("S3_REGION", "us-east-1"),
BaseDataset: getEnv("ZFS_BASE_DATASET", "backup"),
DatabasePath: getEnv("DATABASE_PATH", "zfs-backup.db"),
Port: getEnv("PORT", "8080"),

View File

@@ -7,10 +7,12 @@ import (
"log"
"net/http"
"os/exec"
"time"
"strings"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/mistifyio/go-zfs"
)
@@ -23,95 +25,119 @@ type StorageBackend interface {
GetSize(ctx context.Context, key string) (int64, error)
}
// S3Backend implements StorageBackend for S3-compatible storage
// S3Backend implements StorageBackend for S3-compatible storage using AWS SDK v2
type S3Backend struct {
client *minio.Client
client *s3.Client
bucketName string
}
// NewS3Backend creates a new S3 storage backend
func NewS3Backend(endpoint, accessKey, secretKey, bucketName string, useSSL bool) (*S3Backend, error) {
// Create custom HTTP transport with extended timeouts for large file uploads
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
// Extended timeouts for streaming large ZFS snapshots
ResponseHeaderTimeout: 5 * time.Minute,
ExpectContinueTimeout: 30 * time.Second,
IdleConnTimeout: 90 * time.Second,
// Connection pooling
MaxIdleConns: 10,
MaxIdleConnsPerHost: 10,
DisableCompression: false,
func NewS3Backend(endpoint, accessKey, secretKey, bucketName string, useSSL bool, region string) (*S3Backend, error) {
// Ensure endpoint has valid URI scheme
if endpoint != "" && !strings.HasPrefix(endpoint, "http://") && !strings.HasPrefix(endpoint, "https://") {
if useSSL {
endpoint = "https://" + endpoint
} else {
endpoint = "http://" + endpoint
}
}
client, err := minio.New(endpoint, &minio.Options{
Creds: credentials.NewStaticV4(accessKey, secretKey, ""),
Secure: useSSL,
Transport: transport,
// Determine if using custom endpoint (non-AWS)
customEndpoint := endpoint != "" && endpoint != "https://s3.amazonaws.com" && endpoint != "http://s3.amazonaws.com"
// Load AWS config
awsCfg, err := config.LoadDefaultConfig(context.Background(),
config.WithRegion(region),
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(accessKey, secretKey, "")),
)
if err != nil {
return nil, fmt.Errorf("failed to load AWS config: %v", err)
}
// Create S3 client
s3Client := s3.NewFromConfig(awsCfg, func(o *s3.Options) {
if customEndpoint {
o.BaseEndpoint = aws.String(endpoint)
o.UsePathStyle = true // Required for MinIO and other S3-compatible storage
}
// Set HTTP client with extended timeout for large uploads
o.HTTPClient = &http.Client{
Timeout: 0, // No timeout for large file uploads
}
})
// Check if bucket exists (or create it for AWS S3)
ctx := context.Background()
_, err = s3Client.HeadBucket(ctx, &s3.HeadBucketInput{
Bucket: aws.String(bucketName),
})
if err != nil {
return nil, fmt.Errorf("failed to create S3 client: %v", err)
}
// Ensure bucket exists
ctx := context.Background()
exists, err := client.BucketExists(ctx, bucketName)
if err != nil {
return nil, fmt.Errorf("failed to check bucket: %v", err)
}
if !exists {
err = client.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{})
// Try to create bucket
_, err = s3Client.CreateBucket(ctx, &s3.CreateBucketInput{
Bucket: aws.String(bucketName),
})
if err != nil {
return nil, fmt.Errorf("failed to create bucket: %v", err)
log.Printf("Warning: failed to create bucket: %v", err)
} else {
log.Printf("Created S3 bucket: %s", bucketName)
}
log.Printf("Created S3 bucket: %s", bucketName)
}
return &S3Backend{
client: client,
client: s3Client,
bucketName: bucketName,
}, nil
}
// Upload uploads data to S3
func (s *S3Backend) Upload(ctx context.Context, key string, data io.Reader, size int64) error {
_, err := s.client.PutObject(ctx, s.bucketName, key, data, size,
minio.PutObjectOptions{
ContentType: "application/octet-stream",
PartSize: 10 * 1024 * 1024, // 10MB parts
})
_, err := s.client.PutObject(ctx, &s3.PutObjectInput{
Bucket: aws.String(s.bucketName),
Key: aws.String(key),
Body: data,
ContentType: aws.String("application/octet-stream"),
})
return err
}
// Download retrieves data from S3
func (s *S3Backend) Download(ctx context.Context, key string) (io.ReadCloser, error) {
obj, err := s.client.GetObject(ctx, s.bucketName, key, minio.GetObjectOptions{})
resp, err := s.client.GetObject(ctx, &s3.GetObjectInput{
Bucket: aws.String(s.bucketName),
Key: aws.String(key),
})
if err != nil {
return nil, err
}
return obj, nil
return resp.Body, nil
}
// Delete removes an object from S3
func (s *S3Backend) Delete(ctx context.Context, key string) error {
return s.client.RemoveObject(ctx, s.bucketName, key, minio.RemoveObjectOptions{})
_, err := s.client.DeleteObject(ctx, &s3.DeleteObjectInput{
Bucket: aws.String(s.bucketName),
Key: aws.String(key),
})
return err
}
// List returns all objects with the given prefix
func (s *S3Backend) List(ctx context.Context, prefix string) ([]string, error) {
var keys []string
objectCh := s.client.ListObjects(ctx, s.bucketName, minio.ListObjectsOptions{
Prefix: prefix,
Recursive: true,
paginator := s3.NewListObjectsV2Paginator(s.client, &s3.ListObjectsV2Input{
Bucket: aws.String(s.bucketName),
Prefix: aws.String(prefix),
})
for object := range objectCh {
if object.Err != nil {
return nil, object.Err
for paginator.HasMorePages() {
page, err := paginator.NextPage(ctx)
if err != nil {
return nil, err
}
for _, obj := range page.Contents {
keys = append(keys, *obj.Key)
}
keys = append(keys, object.Key)
}
return keys, nil
@@ -119,11 +145,14 @@ func (s *S3Backend) List(ctx context.Context, prefix string) ([]string, error) {
// GetSize returns the size of an object in S3
func (s *S3Backend) GetSize(ctx context.Context, key string) (int64, error) {
info, err := s.client.StatObject(ctx, s.bucketName, key, minio.StatObjectOptions{})
info, err := s.client.HeadObject(ctx, &s3.HeadObjectInput{
Bucket: aws.String(s.bucketName),
Key: aws.String(key),
})
if err != nil {
return 0, err
}
return info.Size, nil
return *info.ContentLength, nil
}
// LocalBackend implements StorageBackend for local ZFS storage

View File

@@ -260,4 +260,5 @@ templ ClientPasswordModal() {
// AdminScripts renders the JavaScript for the admin panel
templ AdminScripts() {
<script src="/admin/static/admin.js"></script>
<script>initTheme();</script>
}

View File

@@ -19,6 +19,30 @@ async function logout() {
location.reload();
}
// Toggle dark/light theme
function toggleTheme() {
const html = document.documentElement;
const isDark = html.classList.contains('dark');
if (isDark) {
html.classList.remove('dark');
localStorage.setItem('theme', 'light');
} else {
html.classList.add('dark');
localStorage.setItem('theme', 'dark');
}
}
// Initialize theme on load
function initTheme() {
const savedTheme = localStorage.getItem('theme');
const prefersDark = window.matchMedia('(prefers-color-scheme: dark)').matches;
if (savedTheme === 'dark' || (!savedTheme && prefersDark)) {
document.documentElement.classList.add('dark');
}
}
// Load stats
async function loadStats() {
try {