This commit is contained in:
2026-02-14 19:09:05 +01:00
parent 05c916e9a9
commit 5892ac2a2e
13 changed files with 394 additions and 721 deletions

View File

@@ -1,5 +1,4 @@
// Command zfs-client is the CLI tool for creating and uploading ZFS snapshots. // Command zfs-client is a simple CLI tool for creating and sending ZFS snapshots.
// It provides commands for backup, status checking, snapshot rotation, and incremental backups.
package main package main
import ( import (
@@ -22,190 +21,29 @@ func main() {
command := os.Args[1] command := os.Args[1]
switch command { switch command {
case "backup": case "snap", "snapshot":
// Default: create manual backup (full or incremental) // Create snapshot and send to server (auto full/incremental)
fmt.Println("=== Creating and sending backup ===\n") fmt.Println("=== Creating and sending snapshot ===\n")
snapshot, err := c.CreateSnapshot() snapshot, err := c.CreateAndSend()
if err != nil { if err != nil {
fmt.Printf("Error creating snapshot: %v\n", err) fmt.Printf("Error: %v\n", err)
os.Exit(1) os.Exit(1)
} }
if err := c.SendSnapshot(snapshot); err != nil { if snapshot.FullBackup {
fmt.Printf("Error sending snapshot: %v\n", err) fmt.Println("\n✓ Full backup completed!")
os.Exit(1) } else {
} fmt.Println("\n✓ Incremental backup completed!")
fmt.Println("\n✓ Backup completed successfully!")
case "backup-full":
// Force full backup (no incremental)
fmt.Println("=== Creating full backup ===\n")
snapshot, err := c.CreateSnapshot()
if err != nil {
fmt.Printf("Error creating snapshot: %v\n", err)
os.Exit(1)
}
if err := c.SendIncremental(snapshot, ""); err != nil {
fmt.Printf("Error sending snapshot: %v\n", err)
os.Exit(1)
}
// Create bookmark for future incremental backups
if err := c.CreateBookmark(snapshot); err != nil {
fmt.Printf("Warning: failed to create bookmark: %v\n", err)
}
fmt.Println("\n✓ Full backup completed successfully!")
case "backup-incremental":
// Incremental backup from last bookmark
fmt.Println("=== Creating incremental backup ===\n")
// Check for existing bookmark
lastBookmark, err := c.GetLastBookmark()
if err != nil {
fmt.Printf("Error checking bookmarks: %v\n", err)
os.Exit(1)
}
if lastBookmark == "" {
fmt.Println("No existing bookmark found. Use 'backup-full' for initial backup.")
os.Exit(1)
}
snapshot, err := c.CreateSnapshot()
if err != nil {
fmt.Printf("Error creating snapshot: %v\n", err)
os.Exit(1)
}
if err := c.SendIncremental(snapshot, lastBookmark); err != nil {
fmt.Printf("Error sending incremental snapshot: %v\n", err)
os.Exit(1)
}
// Create bookmark for future incremental backups
if err := c.CreateBookmark(snapshot); err != nil {
fmt.Printf("Warning: failed to create bookmark: %v\n", err)
}
fmt.Println("\n✓ Incremental backup completed successfully!")
case "snapshot":
// Create typed snapshots (hourly, daily, weekly, monthly)
if len(os.Args) < 3 {
fmt.Println("Usage: zfs-client snapshot <hourly|daily|weekly|monthly>")
os.Exit(1)
}
snapType := client.SnapshotType(os.Args[2])
switch snapType {
case client.SnapshotHourly, client.SnapshotDaily, client.SnapshotWeekly, client.SnapshotMonthly:
// Valid type
default:
fmt.Printf("Invalid snapshot type: %s\n", snapType)
fmt.Println("Valid types: hourly, daily, weekly, monthly")
os.Exit(1)
}
fmt.Printf("=== Creating %s snapshot ===\n\n", snapType)
snapshot, err := c.CreateSnapshotWithType(snapType)
if err != nil {
fmt.Printf("Error creating snapshot: %v\n", err)
os.Exit(1)
}
// Check for existing bookmark for incremental
lastBookmark, _ := c.GetLastBookmark()
if err := c.SendIncremental(snapshot, lastBookmark); err != nil {
fmt.Printf("Error sending snapshot: %v\n", err)
os.Exit(1)
}
// Create bookmark
if err := c.CreateBookmark(snapshot); err != nil {
fmt.Printf("Warning: failed to create bookmark: %v\n", err)
}
// Rotate local snapshots using server policy if available
policy, err := getRotationPolicy(c)
if err != nil {
fmt.Printf("Warning: failed to get rotation policy: %v\n", err)
policy = client.DefaultPolicy()
}
if err := c.RotateLocalSnapshots(policy); err != nil {
fmt.Printf("Warning: failed to rotate snapshots: %v\n", err)
}
fmt.Printf("\n✓ %s snapshot completed successfully!\n", snapType)
case "rotate":
// Rotate local snapshots using server policy if available
fmt.Println("=== Rotating local snapshots ===\n")
policy, err := getRotationPolicy(c)
if err != nil {
fmt.Printf("Warning: failed to get rotation policy: %v\n", err)
policy = client.DefaultPolicy()
}
if err := c.RotateLocalSnapshots(policy); err != nil {
fmt.Printf("Error rotating snapshots: %v\n", err)
os.Exit(1)
}
fmt.Println("\n✓ Rotation completed!")
case "rotate-remote":
// Request server to rotate remote snapshots
if err := c.RequestRotation(); err != nil {
fmt.Printf("Error requesting rotation: %v\n", err)
os.Exit(1)
} }
case "status": case "status":
// Check server connection and quota
if err := c.GetStatus(); err != nil { if err := c.GetStatus(); err != nil {
fmt.Printf("Error getting status: %v\n", err)
os.Exit(1)
}
case "bookmarks":
// List bookmarks
fmt.Println("=== ZFS Bookmarks ===\n")
bookmark, err := c.GetLastBookmark()
if err != nil {
fmt.Printf("Error: %v\n", err) fmt.Printf("Error: %v\n", err)
os.Exit(1) os.Exit(1)
} }
if bookmark == "" {
fmt.Println("No bookmarks found")
} else {
fmt.Printf("Last bookmark: %s\n", bookmark)
}
case "change-password":
// Change client API key/password
if len(os.Args) < 3 {
fmt.Println("Usage: zfs-client change-password <new-api-key>")
os.Exit(1)
}
newKey := os.Args[2]
fmt.Println("=== Changing API Key ===\n")
if err := c.ChangePassword(newKey); err != nil {
fmt.Printf("Error: %v\n", err)
os.Exit(1)
}
fmt.Println("\n✓ API key changed successfully!")
fmt.Println("Update your .env file with the new API_KEY value.")
case "help", "-h", "--help": case "help", "-h", "--help":
printUsage() printUsage()
@@ -216,56 +54,26 @@ func main() {
} }
} }
// getRotationPolicy fetches the rotation policy from the server.
// If the server has a policy configured, it must be used.
// Otherwise, the default policy is returned.
func getRotationPolicy(c *client.Client) (*client.SnapshotPolicy, error) {
serverPolicy, err := c.GetRotationPolicy()
if err != nil {
return nil, err
}
if serverPolicy.ServerManaged && serverPolicy.RotationPolicy != nil {
fmt.Println(" Using server-managed rotation policy")
return serverPolicy.RotationPolicy, nil
}
// No server policy, use default
fmt.Println(" Using default rotation policy")
return client.DefaultPolicy(), nil
}
func printUsage() { func printUsage() {
fmt.Println("ZFS Snapshot Backup Client") fmt.Println("ZFS Snapshot Backup Client - Simple Version")
fmt.Println("\nUsage: zfs-client [command]") fmt.Println("\nUsage: zfs-client [command]")
fmt.Println("\nCommands:") fmt.Println("\nCommands:")
fmt.Println(" backup - Create snapshot and send (auto incremental if bookmark exists)") fmt.Println(" snap - Create snapshot and send to server (auto full/incremental)")
fmt.Println(" backup-full - Create full backup (no incremental)") fmt.Println(" status - Check server connection and quota")
fmt.Println(" backup-incremental - Create incremental backup from last bookmark") fmt.Println(" help - Show this help message")
fmt.Println(" snapshot <type> - Create typed snapshot (hourly|daily|weekly|monthly)")
fmt.Println(" rotate - Rotate local snapshots based on retention policy")
fmt.Println(" rotate-remote - Request server to rotate old remote snapshots")
fmt.Println(" status - Check server status and quota")
fmt.Println(" bookmarks - List ZFS bookmarks")
fmt.Println(" change-password <new-key> - Change client API key")
fmt.Println(" help - Show this help message")
fmt.Println("\nSnapshot Retention Policy (default):")
fmt.Println(" Hourly: 24 snapshots")
fmt.Println(" Daily: 7 snapshots")
fmt.Println(" Weekly: 4 snapshots")
fmt.Println(" Monthly: 12 snapshots")
fmt.Println("\nEnvironment Variables (can be set in .env file):") fmt.Println("\nEnvironment Variables (can be set in .env file):")
fmt.Println(" CLIENT_ID - Client identifier (default: client1)") fmt.Println(" CLIENT_ID - Client identifier (default: client1)")
fmt.Println(" API_KEY - API key for authentication (default: secret123)") fmt.Println(" API_KEY - API key for authentication (default: secret123)")
fmt.Println(" SERVER_URL - Backup server URL (default: http://localhost:8080)") fmt.Println(" SERVER_URL - Backup server URL (default: http://localhost:8080)")
fmt.Println(" LOCAL_DATASET - ZFS dataset to backup (default: tank/data)") fmt.Println(" LOCAL_DATASET - ZFS dataset to backup (default: tank/data)")
fmt.Println(" COMPRESS - Enable LZ4 compression (default: true)") fmt.Println(" COMPRESS - Enable LZ4 compression (default: true)")
fmt.Println(" STORAGE_TYPE - Storage type: s3 or local (default: s3)") fmt.Println("\nS3 Configuration (for direct S3 uploads):")
fmt.Println(" S3_ENDPOINT - S3 endpoint URL (e.g., https://s3.amazonaws.com)")
fmt.Println(" S3_REGION - AWS region (default: us-east-1)")
fmt.Println(" S3_BUCKET - S3 bucket name (default: zfs-backups)")
fmt.Println(" S3_ACCESS_KEY - AWS access key")
fmt.Println(" S3_SECRET_KEY - AWS secret key")
fmt.Println("\nExamples:") fmt.Println("\nExamples:")
fmt.Println(" zfs-client backup") fmt.Println(" zfs-client snap")
fmt.Println(" zfs-client backup-full") fmt.Println(" zfs-client status")
fmt.Println(" zfs-client snapshot hourly")
fmt.Println(" zfs-client rotate")
fmt.Println(" zfs-client change-password mynewsecretkey")
fmt.Println(" CLIENT_ID=myclient zfs-client backup")
} }

View File

@@ -19,7 +19,7 @@ func main() {
var err error var err error
if cfg.S3Enabled { if cfg.S3Enabled {
s3Backend, err = server.NewS3Backend(cfg.S3Endpoint, cfg.S3AccessKey, cfg.S3SecretKey, cfg.S3BucketName, cfg.S3UseSSL) s3Backend, err = server.NewS3Backend(cfg.S3Endpoint, cfg.S3AccessKey, cfg.S3SecretKey, cfg.S3BucketName, cfg.S3UseSSL, cfg.S3Region)
if err != nil { if err != nil {
log.Fatalf("Failed to initialize S3 backend: %v", err) log.Fatalf("Failed to initialize S3 backend: %v", err)
} }

42
go.mod
View File

@@ -3,37 +3,41 @@ module git.ma-al.com/goc_marek/zfs
go 1.25.6 go 1.25.6
require ( require (
github.com/minio/minio-go/v7 v7.0.98 github.com/a-h/templ v0.3.977
github.com/aws/aws-sdk-go-v2 v1.41.1
github.com/aws/aws-sdk-go-v2/config v1.32.7
github.com/aws/aws-sdk-go-v2/credentials v1.19.7
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0
github.com/mistifyio/go-zfs v2.1.1+incompatible github.com/mistifyio/go-zfs v2.1.1+incompatible
github.com/pierrec/lz4/v4 v4.1.25
modernc.org/sqlite v1.45.0 modernc.org/sqlite v1.45.0
) )
require ( require (
github.com/a-h/templ v0.3.977 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 // indirect
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 // indirect
github.com/aws/smithy-go v1.24.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect github.com/dustin/go-humanize v1.0.1 // indirect
github.com/go-ini/ini v1.67.0 // indirect
github.com/google/uuid v1.6.0 // indirect github.com/google/uuid v1.6.0 // indirect
github.com/klauspost/compress v1.18.2 // indirect
github.com/klauspost/cpuid/v2 v2.2.11 // indirect
github.com/klauspost/crc32 v1.3.0 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-isatty v0.0.20 // indirect
github.com/minio/crc64nvme v1.1.1 // indirect
github.com/minio/md5-simd v1.1.2 // indirect
github.com/ncruces/go-strftime v1.0.0 // indirect github.com/ncruces/go-strftime v1.0.0 // indirect
github.com/philhofer/fwd v1.2.0 // indirect
github.com/pierrec/lz4/v4 v4.1.25 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/rs/xid v1.6.0 // indirect
github.com/tinylib/msgp v1.6.1 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/crypto v0.46.0 // indirect
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect
golang.org/x/net v0.48.0 // indirect golang.org/x/sync v0.19.0 // indirect
golang.org/x/sys v0.39.0 // indirect golang.org/x/sys v0.39.0 // indirect
golang.org/x/text v0.32.0 // indirect golang.org/x/tools v0.39.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
modernc.org/libc v1.67.6 // indirect modernc.org/libc v1.67.6 // indirect
modernc.org/mathutil v1.7.1 // indirect modernc.org/mathutil v1.7.1 // indirect
modernc.org/memory v1.11.0 // indirect modernc.org/memory v1.11.0 // indirect

79
go.sum
View File

@@ -1,73 +1,74 @@
github.com/a-h/templ v0.3.977 h1:kiKAPXTZE2Iaf8JbtM21r54A8bCNsncrfnokZZSrSDg= github.com/a-h/templ v0.3.977 h1:kiKAPXTZE2Iaf8JbtM21r54A8bCNsncrfnokZZSrSDg=
github.com/a-h/templ v0.3.977/go.mod h1:oCZcnKRf5jjsGpf2yELzQfodLphd2mwecwG4Crk5HBo= github.com/a-h/templ v0.3.977/go.mod h1:oCZcnKRf5jjsGpf2yELzQfodLphd2mwecwG4Crk5HBo=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4/go.mod h1:IOAPF6oT9KCsceNTvvYMNHy0+kMF8akOjeDvPENWxp4=
github.com/aws/aws-sdk-go-v2/config v1.32.7 h1:vxUyWGUwmkQ2g19n7JY/9YL8MfAIl7bTesIUykECXmY=
github.com/aws/aws-sdk-go-v2/config v1.32.7/go.mod h1:2/Qm5vKUU/r7Y+zUk/Ptt2MDAEKAfUtKc1+3U1Mo3oY=
github.com/aws/aws-sdk-go-v2/credentials v1.19.7 h1:tHK47VqqtJxOymRrNtUXN5SP/zUTvZKeLx4tH6PGQc8=
github.com/aws/aws-sdk-go-v2/credentials v1.19.7/go.mod h1:qOZk8sPDrxhf+4Wf4oT2urYJrYt3RejHSzgAquYeppw=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17 h1:JqcdRG//czea7Ppjb+g/n4o8i/R50aTBHkA7vu0lK+k=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17/go.mod h1:CO+WeGmIdj/MlPel2KwID9Gt7CNq4M65HUfBW97liM0=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8 h1:Z5EiPIzXKewUQK0QTMkutjiaPVeVYXX7KIqhXu/0fXs=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8/go.mod h1:FsTpJtvC4U1fyDXk7c71XoDv3HlRm8V3NiYLeYLh5YE=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 h1:RuNSMoozM8oXlgLG/n6WLaFGoea7/CddrCfIiSA+xdY=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17/go.mod h1:F2xxQ9TZz5gDWsclCtPQscGpP0VUOc8RqgFM3vDENmU=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 h1:bGeHBsGZx0Dvu/eJC0Lh9adJa3M1xREcndxLNZlve2U=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17/go.mod h1:dcW24lbU0CzHusTE8LLHhRLI42ejmINN8Lcr22bwh/g=
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0 h1:oeu8VPlOre74lBA/PMhxa5vewaMIMmILM+RraSyB8KA=
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0/go.mod h1:5jggDlZ2CLQhwJBiZJb4vfk4f0GxWdEDruWKEJ1xOdo=
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4fpmpyD9wYG1vfSu+Y=
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5/go.mod h1:k029+U8SY30/3/ras4G/Fnv/b88N4mAfliNn08Dem4M=
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 h1:v6EiMvhEYBoHABfbGB4alOYmCIrcgyPPiBE1wZAEbqk=
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 h1:gd84Omyu9JLriJVCbGApcLzVR3XtmC4ZDPcAI6Ftvds=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo=
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/pJ1jOWYlFDJTjRQ=
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6/go.mod h1:qgFDZQSD/Kys7nJnVqYlWKnh0SSdMjAi0uSwON4wgYQ=
github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs= github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs=
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.11 h1:0OwqZRYI2rFrjS4kvkDnqJkKHdHaRnCm68/DY4OxRzU=
github.com/klauspost/cpuid/v2 v2.2.11/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/klauspost/crc32 v1.3.0 h1:sSmTt3gUt81RP655XGZPElI0PelVTZ6YwCRnPSupoFM=
github.com/klauspost/crc32 v1.3.0/go.mod h1:D7kQaZhnkX/Y0tstFGf8VUzv2UofNGqCjnC3zdHB0Hw=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/minio/crc64nvme v1.1.1 h1:8dwx/Pz49suywbO+auHCBpCtlW1OfpcLN7wYgVR6wAI=
github.com/minio/crc64nvme v1.1.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
github.com/minio/minio-go/v7 v7.0.98 h1:MeAVKjLVz+XJ28zFcuYyImNSAh8Mq725uNW4beRisi0=
github.com/minio/minio-go/v7 v7.0.98/go.mod h1:cY0Y+W7yozf0mdIclrttzo1Iiu7mEf9y7nk2uXqMOvM=
github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8= github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8=
github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w= github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
github.com/philhofer/fwd v1.2.0 h1:e6DnBTl7vGY+Gz322/ASL4Gyp1FspeMvx1RNDoToZuM=
github.com/philhofer/fwd v1.2.0/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM=
github.com/pierrec/lz4/v4 v4.1.25 h1:kocOqRffaIbU5djlIBr7Wh+cx82C0vtFb0fOurZHqD0= github.com/pierrec/lz4/v4 v4.1.25 h1:kocOqRffaIbU5djlIBr7Wh+cx82C0vtFb0fOurZHqD0=
github.com/pierrec/lz4/v4 v4.1.25/go.mod h1:EoQMVJgeeEOMsCqCzqFm2O0cJvljX2nGZjcRIPL34O4= github.com/pierrec/lz4/v4 v4.1.25/go.mod h1:EoQMVJgeeEOMsCqCzqFm2O0cJvljX2nGZjcRIPL34O4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU=
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tinylib/msgp v1.6.1 h1:ESRv8eL3u+DNHUoSAAQRE50Hm162zqAnBoGv9PzScPY=
github.com/tinylib/msgp v1.6.1/go.mod h1:RSp0LW9oSxFut3KzESt5Voq4GVWyS+PSulT77roAqEA=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY= golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY=
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70=
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis= modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis=
modernc.org/cc/v4 v4.27.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0= modernc.org/cc/v4 v4.27.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
modernc.org/ccgo/v4 v4.30.1 h1:4r4U1J6Fhj98NKfSjnPUN7Ze2c6MnAdL0hWw6+LrJpc= modernc.org/ccgo/v4 v4.30.1 h1:4r4U1J6Fhj98NKfSjnPUN7Ze2c6MnAdL0hWw6+LrJpc=

View File

@@ -4,6 +4,7 @@ package client
import ( import (
"bytes" "bytes"
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
@@ -13,6 +14,10 @@ import (
"strings" "strings"
"time" "time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/mistifyio/go-zfs" "github.com/mistifyio/go-zfs"
"github.com/pierrec/lz4/v4" "github.com/pierrec/lz4/v4"
) )
@@ -109,10 +114,40 @@ func (c *Client) SendSnapshot(snapshot *zfs.Dataset) error {
return c.sendViaZFS(snapshot, uploadResp.StorageKey) return c.sendViaZFS(snapshot, uploadResp.StorageKey)
} }
// streamToS3 streams a ZFS snapshot to S3 storage via HTTP. // streamToS3 streams a ZFS snapshot to S3 storage using AWS SDK.
// The snapshot is optionally compressed with LZ4 before transmission. // The snapshot is optionally compressed with LZ4 before transmission.
func (c *Client) streamToS3(snapshot *zfs.Dataset, uploadURL, storageKey string) error { func (c *Client) streamToS3(snapshot *zfs.Dataset, uploadURL, storageKey string) error {
fmt.Printf("→ Streaming snapshot to S3...\n") fmt.Printf("→ Uploading snapshot to S3...\n")
// Ensure endpoint has valid URI scheme
endpoint := c.config.S3Endpoint
if endpoint != "" && !strings.HasPrefix(endpoint, "http://") && !strings.HasPrefix(endpoint, "https://") {
endpoint = "http://" + endpoint
}
// Create AWS config
awsCfg, err := config.LoadDefaultConfig(context.TODO(),
config.WithRegion(c.config.S3Region),
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(
c.config.S3AccessKey,
c.config.S3SecretKey,
"",
)),
)
if err != nil {
return fmt.Errorf("failed to load AWS config: %v", err)
}
// Determine if using custom endpoint (non-AWS)
customEndpoint := endpoint != "" && endpoint != "http://s3.amazonaws.com" && endpoint != "https://s3.amazonaws.com"
// Create S3 client
s3Client := s3.NewFromConfig(awsCfg, func(o *s3.Options) {
if customEndpoint {
o.BaseEndpoint = aws.String(endpoint)
o.UsePathStyle = true // Required for MinIO compatible storage
}
})
// Create ZFS send command // Create ZFS send command
cmd := exec.Command("zfs", "send", snapshot.Name) cmd := exec.Command("zfs", "send", snapshot.Name)
@@ -145,57 +180,24 @@ func (c *Client) streamToS3(snapshot *zfs.Dataset, uploadURL, storageKey string)
reader = pr reader = pr
} }
// Create HTTP request // Upload to S3 using PutObject
req, err := http.NewRequest("POST", c.config.ServerURL+uploadURL, reader) _, err = s3Client.PutObject(context.TODO(), &s3.PutObjectInput{
if err != nil { Bucket: aws.String(c.config.S3Bucket),
return fmt.Errorf("failed to create request: %v", err) Key: aws.String(storageKey),
} Body: reader,
ContentType: aws.String("application/octet-stream"),
// Set required headers })
req.Header.Set("X-API-Key", c.config.APIKey)
req.Header.Set("X-Storage-Key", storageKey)
req.Header.Set("X-Dataset-Name", c.config.LocalDataset)
req.Header.Set("X-Compressed", fmt.Sprintf("%v", c.config.Compress))
req.Header.Set("Content-Type", "application/octet-stream")
// Send request with no timeout for large uploads
client := &http.Client{
Timeout: 0,
}
httpResp, err := client.Do(req)
if err != nil {
cmd.Process.Kill()
return fmt.Errorf("failed to upload: %v", err)
}
defer httpResp.Body.Close()
if httpResp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(httpResp.Body)
return fmt.Errorf("upload failed with status %d: %s", httpResp.StatusCode, body)
}
// Wait for zfs send to complete
if err := cmd.Wait(); err != nil { if err := cmd.Wait(); err != nil {
return fmt.Errorf("zfs send failed: %v", err) return fmt.Errorf("zfs send failed: %v", err)
} }
// Parse response if err != nil {
var result struct { return fmt.Errorf("failed to upload to S3: %v", err)
Success bool `json:"success"`
Message string `json:"message"`
Size int64 `json:"size"`
} }
if err := json.NewDecoder(httpResp.Body).Decode(&result); err != nil { fmt.Printf("✓ Snapshot uploaded to S3 successfully!\n")
return fmt.Errorf("failed to decode response: %v", err)
}
if !result.Success {
return fmt.Errorf("upload failed: %s", result.Message)
}
fmt.Printf("✓ Snapshot uploaded successfully!\n")
fmt.Printf(" Size: %.2f MB\n", float64(result.Size)/(1024*1024))
return nil return nil
} }
@@ -234,6 +236,55 @@ func (c *Client) sendViaZFS(snapshot *zfs.Dataset, receivePath string) error {
return nil return nil
} }
// SnapshotResult contains the result of a snapshot creation and send operation.
type SnapshotResult struct {
FullBackup bool
Snapshot *zfs.Dataset
}
// CreateAndSend creates a snapshot and sends it to the backup server.
// It automatically detects if this is a full or incremental backup:
// - If no bookmark exists, does a full backup
// - If bookmark exists, does an incremental backup from the bookmark
func (c *Client) CreateAndSend() (*SnapshotResult, error) {
// Check for existing bookmark to determine backup type
lastBookmark, err := c.GetLastBookmark()
if err != nil {
return nil, fmt.Errorf("failed to check bookmarks: %v", err)
}
// Create new snapshot
snapshot, err := c.CreateSnapshot()
if err != nil {
return nil, fmt.Errorf("failed to create snapshot: %v", err)
}
isFullBackup := lastBookmark == ""
if isFullBackup {
fmt.Println("→ No previous backup found, doing FULL backup...")
// Send as full (no base)
if err := c.SendIncremental(snapshot, ""); err != nil {
return nil, fmt.Errorf("failed to send snapshot: %v", err)
}
} else {
fmt.Printf("→ Found previous backup, doing INCREMENTAL from %s...", lastBookmark)
// Send as incremental from bookmark
if err := c.SendIncremental(snapshot, lastBookmark); err != nil {
return nil, fmt.Errorf("failed to send incremental: %v", err)
}
}
// Create bookmark for future incremental backups
if err := c.CreateBookmark(snapshot); err != nil {
fmt.Printf("Warning: failed to create bookmark: %v\n", err)
}
return &SnapshotResult{
FullBackup: isFullBackup,
Snapshot: snapshot,
}, nil
}
// GetStatus retrieves and displays the client's backup status from the server. // GetStatus retrieves and displays the client's backup status from the server.
// Shows storage usage, quota, and snapshot count. // Shows storage usage, quota, and snapshot count.
func (c *Client) GetStatus() error { func (c *Client) GetStatus() error {
@@ -273,106 +324,3 @@ func (c *Client) GetStatus() error {
return nil return nil
} }
// RequestRotation asks the server to rotate old snapshots.
// This deletes the oldest snapshots to free up space.
func (c *Client) RequestRotation() error {
reqBody, _ := json.Marshal(map[string]string{
"client_id": c.config.ClientID,
"api_key": c.config.APIKey,
})
resp, err := http.Post(c.config.ServerURL+"/rotate", "application/json", bytes.NewBuffer(reqBody))
if err != nil {
return fmt.Errorf("failed to request rotation: %v", err)
}
defer resp.Body.Close()
var rotateResp struct {
Success bool `json:"success"`
DeletedCount int `json:"deleted_count"`
ReclaimedBytes int64 `json:"reclaimed_bytes"`
}
if err := json.NewDecoder(resp.Body).Decode(&rotateResp); err != nil {
return fmt.Errorf("failed to decode response: %v", err)
}
if !rotateResp.Success {
return fmt.Errorf("rotation failed")
}
fmt.Printf("✓ Rotation complete\n")
fmt.Printf(" Deleted: %d snapshots\n", rotateResp.DeletedCount)
fmt.Printf(" Freed: %.2f GB\n", float64(rotateResp.ReclaimedBytes)/(1024*1024*1024))
return nil
}
// ServerRotationPolicy represents the rotation policy response from the server
type ServerRotationPolicy struct {
Success bool `json:"success"`
Message string `json:"message"`
RotationPolicy *SnapshotPolicy `json:"rotation_policy"`
ServerManaged bool `json:"server_managed"`
}
// GetRotationPolicy fetches the rotation policy from the server.
// If the server has a policy configured for this client, it must be used.
// Returns the policy and whether it's server-managed (mandatory).
func (c *Client) GetRotationPolicy() (*ServerRotationPolicy, error) {
url := fmt.Sprintf("%s/rotation-policy?client_id=%s&api_key=%s",
c.config.ServerURL, c.config.ClientID, c.config.APIKey)
resp, err := http.Get(url)
if err != nil {
return nil, fmt.Errorf("failed to get rotation policy: %v", err)
}
defer resp.Body.Close()
var policyResp ServerRotationPolicy
if err := json.NewDecoder(resp.Body).Decode(&policyResp); err != nil {
return nil, fmt.Errorf("failed to decode response: %v", err)
}
if !policyResp.Success {
return nil, fmt.Errorf("failed to get rotation policy: %s", policyResp.Message)
}
return &policyResp, nil
}
// ChangePassword changes the client's API key on the server.
// Requires the current API key for authentication and the new key.
func (c *Client) ChangePassword(newAPIKey string) error {
reqBody, _ := json.Marshal(map[string]string{
"client_id": c.config.ClientID,
"current_key": c.config.APIKey,
"new_key": newAPIKey,
})
resp, err := http.Post(c.config.ServerURL+"/client/change-password", "application/json", bytes.NewBuffer(reqBody))
if err != nil {
return fmt.Errorf("failed to change password: %v", err)
}
defer resp.Body.Close()
var result struct {
Success bool `json:"success"`
Message string `json:"message"`
}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return fmt.Errorf("failed to decode response: %v", err)
}
if !result.Success {
return fmt.Errorf("failed to change password: %s", result.Message)
}
// Update local config with new key
c.config.APIKey = newAPIKey
fmt.Printf("✓ Password changed successfully\n")
return nil
}

View File

@@ -21,6 +21,16 @@ type Config struct {
LocalDataset string `json:"local_dataset"` LocalDataset string `json:"local_dataset"`
// Compress enables LZ4 compression for transfers // Compress enables LZ4 compression for transfers
Compress bool `json:"compress"` Compress bool `json:"compress"`
// S3Endpoint is the S3 endpoint URL (optional, for direct S3 uploads)
S3Endpoint string `json:"s3_endpoint"`
// S3Region is the AWS region
S3Region string `json:"s3_region"`
// S3Bucket is the S3 bucket name
S3Bucket string `json:"s3_bucket"`
// S3AccessKey is the AWS access key
S3AccessKey string `json:"s3_access_key"`
// S3SecretKey is the AWS secret key
S3SecretKey string `json:"s3_secret_key"`
} }
// LoadConfig loads client configuration from environment variables and .env file. // LoadConfig loads client configuration from environment variables and .env file.
@@ -35,6 +45,11 @@ func LoadConfig() *Config {
ServerURL: getEnv("SERVER_URL", "http://backup-server:8080"), ServerURL: getEnv("SERVER_URL", "http://backup-server:8080"),
LocalDataset: getEnv("LOCAL_DATASET", "tank/data"), LocalDataset: getEnv("LOCAL_DATASET", "tank/data"),
Compress: getEnv("COMPRESS", "true") == "true", Compress: getEnv("COMPRESS", "true") == "true",
S3Endpoint: getEnv("S3_ENDPOINT", ""),
S3Region: getEnv("S3_REGION", "us-east-1"),
S3Bucket: getEnv("S3_BUCKET", "zfs-backups"),
S3AccessKey: getEnv("S3_ACCESS_KEY", ""),
S3SecretKey: getEnv("S3_SECRET_KEY", ""),
} }
} }

View File

@@ -1,77 +1,27 @@
// Package client provides ZFS snapshot backup client functionality. // Package client provides ZFS snapshot backup client functionality.
// This file contains snapshot management functions including creation, // This file contains snapshot management functions for creating and sending snapshots.
// bookmarking, and rotation similar to zfs-auto-snapshot.
package client package client
import ( import (
"bytes" "bytes"
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
"os" "os"
"os/exec" "os/exec"
"sort"
"strings" "strings"
"time" "time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/mistifyio/go-zfs" "github.com/mistifyio/go-zfs"
"github.com/pierrec/lz4/v4" "github.com/pierrec/lz4/v4"
) )
// SnapshotPolicy defines retention settings for automatic snapshots.
type SnapshotPolicy struct {
// KeepHourly is the number of hourly snapshots to keep
KeepHourly int
// KeepDaily is the number of daily snapshots to keep
KeepDaily int
// KeepWeekly is the number of weekly snapshots to keep
KeepWeekly int
// KeepMonthly is the number of monthly snapshots to keep
KeepMonthly int
}
// DefaultPolicy returns the default snapshot retention policy.
func DefaultPolicy() *SnapshotPolicy {
return &SnapshotPolicy{
KeepHourly: 24,
KeepDaily: 7,
KeepWeekly: 4,
KeepMonthly: 12,
}
}
// SnapshotType represents the type of snapshot (hourly, daily, etc.)
type SnapshotType string
const (
SnapshotHourly SnapshotType = "hourly"
SnapshotDaily SnapshotType = "daily"
SnapshotWeekly SnapshotType = "weekly"
SnapshotMonthly SnapshotType = "monthly"
SnapshotManual SnapshotType = "manual"
)
// CreateSnapshotWithType creates a snapshot with a specific type label.
// The snapshot name follows the pattern: zfs-backup-<type>-<timestamp>
func (c *Client) CreateSnapshotWithType(snapshotType SnapshotType) (*zfs.Dataset, error) {
ds, err := zfs.GetDataset(c.config.LocalDataset)
if err != nil {
return nil, fmt.Errorf("failed to get dataset: %v", err)
}
timestamp := time.Now().Format("2006-01-02_15-04-05")
snapshotName := fmt.Sprintf("zfs-backup-%s-%s", snapshotType, timestamp)
snapshot, err := ds.Snapshot(snapshotName, false)
if err != nil {
return nil, fmt.Errorf("failed to create snapshot: %v", err)
}
fmt.Printf("✓ Created %s snapshot: %s@%s\n", snapshotType, c.config.LocalDataset, snapshotName)
return snapshot, nil
}
// CreateBookmark creates a ZFS bookmark from a snapshot. // CreateBookmark creates a ZFS bookmark from a snapshot.
// Bookmarks allow incremental sends even after the source snapshot is deleted. // Bookmarks allow incremental sends even after the source snapshot is deleted.
func (c *Client) CreateBookmark(snapshot *zfs.Dataset) error { func (c *Client) CreateBookmark(snapshot *zfs.Dataset) error {
@@ -194,9 +144,39 @@ func (c *Client) SendIncremental(snapshot *zfs.Dataset, base string) error {
return c.sendIncrementalViaZFS(snapshot, base, uploadResp.StorageKey) return c.sendIncrementalViaZFS(snapshot, base, uploadResp.StorageKey)
} }
// streamIncrementalToS3 streams an incremental ZFS snapshot to S3. // streamIncrementalToS3 streams an incremental ZFS snapshot to S3 using AWS SDK.
func (c *Client) streamIncrementalToS3(snapshot *zfs.Dataset, base, uploadURL, storageKey string) error { func (c *Client) streamIncrementalToS3(snapshot *zfs.Dataset, base, uploadURL, storageKey string) error {
fmt.Printf("→ Streaming snapshot to S3...\n") fmt.Printf("→ Uploading snapshot to S3...\n")
// Ensure endpoint has valid URI scheme
endpoint := c.config.S3Endpoint
if endpoint != "" && !strings.HasPrefix(endpoint, "http://") && !strings.HasPrefix(endpoint, "https://") {
endpoint = "http://" + endpoint
}
// Create AWS config
awsCfg, err := config.LoadDefaultConfig(context.TODO(),
config.WithRegion(c.config.S3Region),
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(
c.config.S3AccessKey,
c.config.S3SecretKey,
"",
)),
)
if err != nil {
return fmt.Errorf("failed to load AWS config: %v", err)
}
// Determine if using custom endpoint (non-AWS)
customEndpoint := endpoint != "" && endpoint != "http://s3.amazonaws.com" && endpoint != "https://s3.amazonaws.com"
// Create S3 client
s3Client := s3.NewFromConfig(awsCfg, func(o *s3.Options) {
if customEndpoint {
o.BaseEndpoint = aws.String(endpoint)
o.UsePathStyle = true // Required for MinIO compatible storage
}
})
// Create ZFS send command // Create ZFS send command
var cmd *exec.Cmd var cmd *exec.Cmd
@@ -238,61 +218,24 @@ func (c *Client) streamIncrementalToS3(snapshot *zfs.Dataset, base, uploadURL, s
reader = pr reader = pr
} }
// Create HTTP request // Upload to S3 using PutObject
req, err := http.NewRequest("POST", c.config.ServerURL+uploadURL, reader) _, err = s3Client.PutObject(context.TODO(), &s3.PutObjectInput{
if err != nil { Bucket: aws.String(c.config.S3Bucket),
return fmt.Errorf("failed to create request: %v", err) Key: aws.String(storageKey),
} Body: reader,
ContentType: aws.String("application/octet-stream"),
// Set required headers })
req.Header.Set("X-API-Key", c.config.APIKey)
req.Header.Set("X-Storage-Key", storageKey)
req.Header.Set("X-Dataset-Name", c.config.LocalDataset)
req.Header.Set("X-Compressed", fmt.Sprintf("%v", c.config.Compress))
req.Header.Set("X-Incremental", fmt.Sprintf("%v", base != ""))
if base != "" {
req.Header.Set("X-Base-Snapshot", base)
}
req.Header.Set("Content-Type", "application/octet-stream")
// Send request with no timeout for large uploads
client := &http.Client{
Timeout: 0,
}
httpResp, err := client.Do(req)
if err != nil {
cmd.Process.Kill()
return fmt.Errorf("failed to upload: %v", err)
}
defer httpResp.Body.Close()
if httpResp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(httpResp.Body)
return fmt.Errorf("upload failed with status %d: %s", httpResp.StatusCode, body)
}
// Wait for zfs send to complete
if err := cmd.Wait(); err != nil { if err := cmd.Wait(); err != nil {
return fmt.Errorf("zfs send failed: %v", err) return fmt.Errorf("zfs send failed: %v", err)
} }
// Parse response if err != nil {
var result struct { return fmt.Errorf("failed to upload to S3: %v", err)
Success bool `json:"success"`
Message string `json:"message"`
Size int64 `json:"size"`
} }
if err := json.NewDecoder(httpResp.Body).Decode(&result); err != nil { fmt.Printf("✓ Snapshot uploaded to S3 successfully!\n")
return fmt.Errorf("failed to decode response: %v", err)
}
if !result.Success {
return fmt.Errorf("upload failed: %s", result.Message)
}
fmt.Printf("✓ Snapshot uploaded successfully!\n")
fmt.Printf(" Size: %.2f MB\n", float64(result.Size)/(1024*1024))
return nil return nil
} }
@@ -300,7 +243,7 @@ func (c *Client) streamIncrementalToS3(snapshot *zfs.Dataset, base, uploadURL, s
// sendIncrementalViaZFS sends an incremental snapshot via ZFS send/receive over SSH. // sendIncrementalViaZFS sends an incremental snapshot via ZFS send/receive over SSH.
// This method is used when the server uses local ZFS storage. // This method is used when the server uses local ZFS storage.
func (c *Client) sendIncrementalViaZFS(snapshot *zfs.Dataset, base, receivePath string) error { func (c *Client) sendIncrementalViaZFS(snapshot *zfs.Dataset, base, receivePath string) error {
fmt.Printf("-> Sending via ZFS send/receive...\n") fmt.Printf(" Sending via ZFS send/receive...\n")
// Extract server host from URL // Extract server host from URL
serverHost := c.config.ServerURL serverHost := c.config.ServerURL
@@ -337,93 +280,6 @@ func (c *Client) sendIncrementalViaZFS(snapshot *zfs.Dataset, base, receivePath
return fmt.Errorf("failed to send snapshot: %v", err) return fmt.Errorf("failed to send snapshot: %v", err)
} }
fmt.Printf("Snapshot sent successfully!\n") fmt.Printf("Snapshot sent successfully!\n")
return nil return nil
} }
// RotateLocalSnapshots removes old snapshots based on the retention policy.
// This is similar to zfs-auto-snapshot's rotation behavior.
func (c *Client) RotateLocalSnapshots(policy *SnapshotPolicy) error {
ds, err := zfs.GetDataset(c.config.LocalDataset)
if err != nil {
return fmt.Errorf("failed to get dataset: %v", err)
}
snapshots, err := ds.Snapshots()
if err != nil {
return fmt.Errorf("failed to list snapshots: %v", err)
}
// Group snapshots by type
groups := make(map[SnapshotType][]*zfs.Dataset)
for _, snap := range snapshots {
snapType := parseSnapshotType(snap.Name)
groups[snapType] = append(groups[snapType], snap)
}
// Apply retention policy
deletedCount := 0
keepCount := map[SnapshotType]int{
SnapshotHourly: policy.KeepHourly,
SnapshotDaily: policy.KeepDaily,
SnapshotWeekly: policy.KeepWeekly,
SnapshotMonthly: policy.KeepMonthly,
SnapshotManual: -1, // Keep all manual snapshots
}
for snapType, snaps := range groups {
maxKeep := keepCount[snapType]
if maxKeep < 0 {
continue // Keep all
}
// Sort by creation time (oldest first)
sortSnapshotsByTime(snaps)
// Delete oldest snapshots exceeding the limit
if len(snaps) > maxKeep {
toDelete := snaps[:len(snaps)-maxKeep]
for _, snap := range toDelete {
fmt.Printf(" Deleting old snapshot: %s\n", snap.Name)
if err := snap.Destroy(zfs.DestroyDefault); err != nil {
fmt.Printf(" Warning: failed to delete %s: %v\n", snap.Name, err)
} else {
deletedCount++
}
}
}
}
if deletedCount > 0 {
fmt.Printf("✓ Rotated %d local snapshots\n", deletedCount)
}
return nil
}
// parseSnapshotType extracts the snapshot type from the snapshot name.
func parseSnapshotType(name string) SnapshotType {
if strings.Contains(name, "hourly") {
return SnapshotHourly
}
if strings.Contains(name, "daily") {
return SnapshotDaily
}
if strings.Contains(name, "weekly") {
return SnapshotWeekly
}
if strings.Contains(name, "monthly") {
return SnapshotMonthly
}
return SnapshotManual
}
// sortSnapshotsByTime sorts snapshots by creation time (oldest first).
// Uses the snapshot name which contains timestamp for sorting.
func sortSnapshotsByTime(snaps []*zfs.Dataset) {
sort.Slice(snaps, func(i, j int) bool {
// Extract timestamp from snapshot name for comparison
// Names are like: dataset@zfs-backup-hourly-2006-01-02_15-04-05
return snaps[i].Name < snaps[j].Name
})
}

View File

@@ -13,7 +13,8 @@ type Config struct {
S3SecretKey string S3SecretKey string
S3BucketName string S3BucketName string
S3UseSSL bool S3UseSSL bool
S3Enabled bool // Enable/disable S3 backend S3Enabled bool // Enable/disable S3 backend
S3Region string // AWS region
BaseDataset string BaseDataset string
DatabasePath string // Path to SQLite database DatabasePath string // Path to SQLite database
Port string Port string
@@ -40,6 +41,7 @@ func LoadConfig() *Config {
S3BucketName: getEnv("S3_BUCKET", "zfs-snapshots"), S3BucketName: getEnv("S3_BUCKET", "zfs-snapshots"),
S3UseSSL: getEnv("S3_USE_SSL", "true") != "false", S3UseSSL: getEnv("S3_USE_SSL", "true") != "false",
S3Enabled: s3Enabled, S3Enabled: s3Enabled,
S3Region: getEnv("S3_REGION", "us-east-1"),
BaseDataset: getEnv("ZFS_BASE_DATASET", "backup"), BaseDataset: getEnv("ZFS_BASE_DATASET", "backup"),
DatabasePath: getEnv("DATABASE_PATH", "zfs-backup.db"), DatabasePath: getEnv("DATABASE_PATH", "zfs-backup.db"),
Port: getEnv("PORT", "8080"), Port: getEnv("PORT", "8080"),

View File

@@ -7,10 +7,12 @@ import (
"log" "log"
"net/http" "net/http"
"os/exec" "os/exec"
"time" "strings"
"github.com/minio/minio-go/v7" "github.com/aws/aws-sdk-go-v2/aws"
"github.com/minio/minio-go/v7/pkg/credentials" "github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/mistifyio/go-zfs" "github.com/mistifyio/go-zfs"
) )
@@ -23,95 +25,119 @@ type StorageBackend interface {
GetSize(ctx context.Context, key string) (int64, error) GetSize(ctx context.Context, key string) (int64, error)
} }
// S3Backend implements StorageBackend for S3-compatible storage // S3Backend implements StorageBackend for S3-compatible storage using AWS SDK v2
type S3Backend struct { type S3Backend struct {
client *minio.Client client *s3.Client
bucketName string bucketName string
} }
// NewS3Backend creates a new S3 storage backend // NewS3Backend creates a new S3 storage backend
func NewS3Backend(endpoint, accessKey, secretKey, bucketName string, useSSL bool) (*S3Backend, error) { func NewS3Backend(endpoint, accessKey, secretKey, bucketName string, useSSL bool, region string) (*S3Backend, error) {
// Create custom HTTP transport with extended timeouts for large file uploads // Ensure endpoint has valid URI scheme
transport := &http.Transport{ if endpoint != "" && !strings.HasPrefix(endpoint, "http://") && !strings.HasPrefix(endpoint, "https://") {
Proxy: http.ProxyFromEnvironment, if useSSL {
// Extended timeouts for streaming large ZFS snapshots endpoint = "https://" + endpoint
ResponseHeaderTimeout: 5 * time.Minute, } else {
ExpectContinueTimeout: 30 * time.Second, endpoint = "http://" + endpoint
IdleConnTimeout: 90 * time.Second, }
// Connection pooling
MaxIdleConns: 10,
MaxIdleConnsPerHost: 10,
DisableCompression: false,
} }
client, err := minio.New(endpoint, &minio.Options{ // Determine if using custom endpoint (non-AWS)
Creds: credentials.NewStaticV4(accessKey, secretKey, ""), customEndpoint := endpoint != "" && endpoint != "https://s3.amazonaws.com" && endpoint != "http://s3.amazonaws.com"
Secure: useSSL,
Transport: transport, // Load AWS config
awsCfg, err := config.LoadDefaultConfig(context.Background(),
config.WithRegion(region),
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(accessKey, secretKey, "")),
)
if err != nil {
return nil, fmt.Errorf("failed to load AWS config: %v", err)
}
// Create S3 client
s3Client := s3.NewFromConfig(awsCfg, func(o *s3.Options) {
if customEndpoint {
o.BaseEndpoint = aws.String(endpoint)
o.UsePathStyle = true // Required for MinIO and other S3-compatible storage
}
// Set HTTP client with extended timeout for large uploads
o.HTTPClient = &http.Client{
Timeout: 0, // No timeout for large file uploads
}
})
// Check if bucket exists (or create it for AWS S3)
ctx := context.Background()
_, err = s3Client.HeadBucket(ctx, &s3.HeadBucketInput{
Bucket: aws.String(bucketName),
}) })
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create S3 client: %v", err) // Try to create bucket
} _, err = s3Client.CreateBucket(ctx, &s3.CreateBucketInput{
Bucket: aws.String(bucketName),
// Ensure bucket exists })
ctx := context.Background()
exists, err := client.BucketExists(ctx, bucketName)
if err != nil {
return nil, fmt.Errorf("failed to check bucket: %v", err)
}
if !exists {
err = client.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create bucket: %v", err) log.Printf("Warning: failed to create bucket: %v", err)
} else {
log.Printf("Created S3 bucket: %s", bucketName)
} }
log.Printf("Created S3 bucket: %s", bucketName)
} }
return &S3Backend{ return &S3Backend{
client: client, client: s3Client,
bucketName: bucketName, bucketName: bucketName,
}, nil }, nil
} }
// Upload uploads data to S3 // Upload uploads data to S3
func (s *S3Backend) Upload(ctx context.Context, key string, data io.Reader, size int64) error { func (s *S3Backend) Upload(ctx context.Context, key string, data io.Reader, size int64) error {
_, err := s.client.PutObject(ctx, s.bucketName, key, data, size, _, err := s.client.PutObject(ctx, &s3.PutObjectInput{
minio.PutObjectOptions{ Bucket: aws.String(s.bucketName),
ContentType: "application/octet-stream", Key: aws.String(key),
PartSize: 10 * 1024 * 1024, // 10MB parts Body: data,
}) ContentType: aws.String("application/octet-stream"),
})
return err return err
} }
// Download retrieves data from S3 // Download retrieves data from S3
func (s *S3Backend) Download(ctx context.Context, key string) (io.ReadCloser, error) { func (s *S3Backend) Download(ctx context.Context, key string) (io.ReadCloser, error) {
obj, err := s.client.GetObject(ctx, s.bucketName, key, minio.GetObjectOptions{}) resp, err := s.client.GetObject(ctx, &s3.GetObjectInput{
Bucket: aws.String(s.bucketName),
Key: aws.String(key),
})
if err != nil { if err != nil {
return nil, err return nil, err
} }
return obj, nil return resp.Body, nil
} }
// Delete removes an object from S3 // Delete removes an object from S3
func (s *S3Backend) Delete(ctx context.Context, key string) error { func (s *S3Backend) Delete(ctx context.Context, key string) error {
return s.client.RemoveObject(ctx, s.bucketName, key, minio.RemoveObjectOptions{}) _, err := s.client.DeleteObject(ctx, &s3.DeleteObjectInput{
Bucket: aws.String(s.bucketName),
Key: aws.String(key),
})
return err
} }
// List returns all objects with the given prefix // List returns all objects with the given prefix
func (s *S3Backend) List(ctx context.Context, prefix string) ([]string, error) { func (s *S3Backend) List(ctx context.Context, prefix string) ([]string, error) {
var keys []string var keys []string
objectCh := s.client.ListObjects(ctx, s.bucketName, minio.ListObjectsOptions{ paginator := s3.NewListObjectsV2Paginator(s.client, &s3.ListObjectsV2Input{
Prefix: prefix, Bucket: aws.String(s.bucketName),
Recursive: true, Prefix: aws.String(prefix),
}) })
for object := range objectCh { for paginator.HasMorePages() {
if object.Err != nil { page, err := paginator.NextPage(ctx)
return nil, object.Err if err != nil {
return nil, err
}
for _, obj := range page.Contents {
keys = append(keys, *obj.Key)
} }
keys = append(keys, object.Key)
} }
return keys, nil return keys, nil
@@ -119,11 +145,14 @@ func (s *S3Backend) List(ctx context.Context, prefix string) ([]string, error) {
// GetSize returns the size of an object in S3 // GetSize returns the size of an object in S3
func (s *S3Backend) GetSize(ctx context.Context, key string) (int64, error) { func (s *S3Backend) GetSize(ctx context.Context, key string) (int64, error) {
info, err := s.client.StatObject(ctx, s.bucketName, key, minio.StatObjectOptions{}) info, err := s.client.HeadObject(ctx, &s3.HeadObjectInput{
Bucket: aws.String(s.bucketName),
Key: aws.String(key),
})
if err != nil { if err != nil {
return 0, err return 0, err
} }
return info.Size, nil return *info.ContentLength, nil
} }
// LocalBackend implements StorageBackend for local ZFS storage // LocalBackend implements StorageBackend for local ZFS storage

View File

@@ -260,4 +260,5 @@ templ ClientPasswordModal() {
// AdminScripts renders the JavaScript for the admin panel // AdminScripts renders the JavaScript for the admin panel
templ AdminScripts() { templ AdminScripts() {
<script src="/admin/static/admin.js"></script> <script src="/admin/static/admin.js"></script>
<script>initTheme();</script>
} }

View File

@@ -19,6 +19,30 @@ async function logout() {
location.reload(); location.reload();
} }
// Toggle dark/light theme
function toggleTheme() {
const html = document.documentElement;
const isDark = html.classList.contains('dark');
if (isDark) {
html.classList.remove('dark');
localStorage.setItem('theme', 'light');
} else {
html.classList.add('dark');
localStorage.setItem('theme', 'dark');
}
}
// Initialize theme on load
function initTheme() {
const savedTheme = localStorage.getItem('theme');
const prefersDark = window.matchMedia('(prefers-color-scheme: dark)').matches;
if (savedTheme === 'dark' || (!savedTheme && prefersDark)) {
document.documentElement.classList.add('dark');
}
}
// Load stats // Load stats
async function loadStats() { async function loadStats() {
try { try {

View File

@@ -4,13 +4,14 @@ A distributed ZFS snapshot management system with S3-compatible storage support.
## Features ## Features
- **S3 Storage Support**: Store snapshots in any S3-compatible storage (AWS S3, MinIO, Backblaze B2, Wasabi, DigitalOcean Spaces) - **S3 Storage Support**: Store snapshots in any S3-compatible storage using AWS SDK v2 (AWS S3, MinIO, Backblaze B2, Wasabi, DigitalOcean Spaces)
- **Local ZFS Storage**: Option to use local ZFS datasets for maximum performance - **Local ZFS Storage**: Option to use local ZFS datasets for maximum performance
- **Multi-client Architecture**: Support for multiple clients with isolated storage and per-client quotas - **Multi-client Architecture**: Support for multiple clients with isolated storage and per-client quotas
- **Automatic Compression**: Gzip compression for reduced storage costs - **Automatic Compression**: LZ4 compression for reduced storage costs and faster transfers
- **Snapshot Rotation**: Automatic cleanup of old snapshots based on quota - **Snapshot Rotation**: Automatic cleanup of old snapshots based on quota
- **Server-Managed Rotation Policies**: Centralized control of client rotation policies - clients must use server-configured retention settings - **Server-Managed Rotation Policies**: Centralized control of client rotation policies - clients must use server-configured retention settings
- **API Key Authentication**: Secure client-server communication - **API Key Authentication**: Secure client-server communication
- **Simple CLI**: Just use `zfs-client snap` to backup - automatically handles full/incremental
## Project Structure ## Project Structure
@@ -93,11 +94,19 @@ API_KEY=secret123
SERVER_URL=http://backup-server:8080 SERVER_URL=http://backup-server:8080
LOCAL_DATASET=tank/data LOCAL_DATASET=tank/data
COMPRESS=true COMPRESS=true
# Optional: Direct S3 upload (bypasses server storage)
S3_ENDPOINT=https://s3.amazonaws.com
S3_REGION=us-east-1
S3_BUCKET=zfs-backups
S3_ACCESS_KEY=your_access_key
S3_SECRET_KEY=your_secret_key
``` ```
> **Important**: > **Important**:
> - The `API_KEY` in the client `.env` file must be the **raw (unhashed)** key. The server stores the SHA-256 hash in the database. > - The `API_KEY` in the client `.env` file must be the **raw (unhashed)** key. The server stores the SHA-256 hash in the database.
> - **Storage type is determined by the server**, not the client. The server decides whether to use S3 or local ZFS storage based on its configuration. > - **Storage type is determined by the server**, not the client. The server decides whether to use S3 or local ZFS storage based on its configuration.
> - The client automatically handles full vs incremental backups based on whether a bookmark exists.
### Restore Tool Configuration ### Restore Tool Configuration
@@ -128,52 +137,18 @@ zfs-server
### Client Commands ### Client Commands
The `zfs-client` tool provides the following commands for managing ZFS snapshots: The `zfs-client` tool provides simple commands for creating and sending ZFS snapshots:
#### `backup` #### `snap`
Creates a snapshot and sends it to the server. Automatically uses incremental backup if a bookmark exists. Creates a snapshot and sends it to the server. Automatically detects if this is the first backup (full) or subsequent backup (incremental).
```bash ```bash
zfs-client backup zfs-client snap
``` ```
#### `backup-full` On first run, it will print: `→ No previous backup found, doing FULL backup...`
Forces a full backup (no incremental). Use for the initial backup or when you want to resend the complete dataset.
```bash On subsequent runs, it automatically does incremental backups from the last bookmark.
zfs-client backup-full
```
#### `backup-incremental`
Creates an incremental backup from the last bookmark. Requires an existing bookmark from a previous full backup.
```bash
zfs-client backup-incremental
```
#### `snapshot <type>`
Creates a typed snapshot (hourly, daily, weekly, monthly) with automatic rotation. The rotation policy is fetched from the server if configured.
```bash
zfs-client snapshot hourly
zfs-client snapshot daily
zfs-client snapshot weekly
zfs-client snapshot monthly
```
#### `rotate`
Rotates local snapshots based on the retention policy. If the server has a rotation policy configured, it will be used; otherwise, default values apply.
```bash
zfs-client rotate
```
#### `rotate-remote`
Requests the server to rotate (delete old) remote snapshots to free up storage quota.
```bash
zfs-client rotate-remote
```
#### `status` #### `status`
Displays the current backup status including storage usage, quota, and snapshot count from the server. Displays the current backup status including storage usage, quota, and snapshot count from the server.
@@ -182,13 +157,6 @@ Displays the current backup status including storage usage, quota, and snapshot
zfs-client status zfs-client status
``` ```
#### `bookmarks`
Lists ZFS bookmarks on the local system. Bookmarks are used as reference points for incremental backups.
```bash
zfs-client bookmarks
```
#### `help` #### `help`
Shows the help message with all available commands and options. Shows the help message with all available commands and options.
@@ -196,6 +164,23 @@ Shows the help message with all available commands and options.
zfs-client help zfs-client help
``` ```
### Client Configuration
```env
CLIENT_ID=client1
API_KEY=secret123
SERVER_URL=http://backup-server:8080
LOCAL_DATASET=tank/data
COMPRESS=true
# Optional: S3 direct upload (bypasses server)
S3_ENDPOINT=https://s3.amazonaws.com
S3_REGION=us-east-1
S3_BUCKET=zfs-backups
S3_ACCESS_KEY=your_access_key
S3_SECRET_KEY=your_secret_key
```
### Restore Tool Commands ### Restore Tool Commands
The `zfs-restore` tool provides commands for listing and restoring snapshots from the backup server: The `zfs-restore` tool provides commands for listing and restoring snapshots from the backup server:

BIN
zfs-client Executable file

Binary file not shown.