5 Commits
v0.0.2 ... main

Author SHA1 Message Date
a330ce9834 multi dataset 2026-02-16 03:02:10 +01:00
1903535dc5 better snapshots management 2026-02-16 02:04:57 +01:00
2a5221c29a restore 2026-02-16 01:33:30 +01:00
8b592db3dd remove s3 from client 2026-02-15 12:58:05 +01:00
5892ac2a2e simplyfy 2026-02-14 19:57:24 +01:00
19 changed files with 999 additions and 973 deletions

View File

@@ -1,5 +1,4 @@
// Command zfs-client is the CLI tool for creating and uploading ZFS snapshots.
// It provides commands for backup, status checking, snapshot rotation, and incremental backups.
// Command zfs-client is a simple CLI tool for creating and sending ZFS snapshots.
package main
import (
@@ -22,190 +21,36 @@ func main() {
command := os.Args[1]
switch command {
case "backup":
// Default: create manual backup (full or incremental)
fmt.Println("=== Creating and sending backup ===\n")
case "snap", "snapshot":
// Create snapshot and send to server (auto full/incremental)
// Optional: specify dataset as argument
targetDataset := ""
if len(os.Args) > 2 {
targetDataset = os.Args[2]
fmt.Printf("→ Using dataset: %s\n", targetDataset)
}
snapshot, err := c.CreateSnapshot()
fmt.Println("=== Creating and sending snapshot ===\n")
snapshot, err := c.CreateAndSend(targetDataset)
if err != nil {
fmt.Printf("Error creating snapshot: %v\n", err)
fmt.Printf("Error: %v\n", err)
os.Exit(1)
}
if err := c.SendSnapshot(snapshot); err != nil {
fmt.Printf("Error sending snapshot: %v\n", err)
os.Exit(1)
}
fmt.Println("\n✓ Backup completed successfully!")
case "backup-full":
// Force full backup (no incremental)
fmt.Println("=== Creating full backup ===\n")
snapshot, err := c.CreateSnapshot()
if err != nil {
fmt.Printf("Error creating snapshot: %v\n", err)
os.Exit(1)
}
if err := c.SendIncremental(snapshot, ""); err != nil {
fmt.Printf("Error sending snapshot: %v\n", err)
os.Exit(1)
}
// Create bookmark for future incremental backups
if err := c.CreateBookmark(snapshot); err != nil {
fmt.Printf("Warning: failed to create bookmark: %v\n", err)
}
fmt.Println("\n✓ Full backup completed successfully!")
case "backup-incremental":
// Incremental backup from last bookmark
fmt.Println("=== Creating incremental backup ===\n")
// Check for existing bookmark
lastBookmark, err := c.GetLastBookmark()
if err != nil {
fmt.Printf("Error checking bookmarks: %v\n", err)
os.Exit(1)
}
if lastBookmark == "" {
fmt.Println("No existing bookmark found. Use 'backup-full' for initial backup.")
os.Exit(1)
}
snapshot, err := c.CreateSnapshot()
if err != nil {
fmt.Printf("Error creating snapshot: %v\n", err)
os.Exit(1)
}
if err := c.SendIncremental(snapshot, lastBookmark); err != nil {
fmt.Printf("Error sending incremental snapshot: %v\n", err)
os.Exit(1)
}
// Create bookmark for future incremental backups
if err := c.CreateBookmark(snapshot); err != nil {
fmt.Printf("Warning: failed to create bookmark: %v\n", err)
}
fmt.Println("\n✓ Incremental backup completed successfully!")
case "snapshot":
// Create typed snapshots (hourly, daily, weekly, monthly)
if len(os.Args) < 3 {
fmt.Println("Usage: zfs-client snapshot <hourly|daily|weekly|monthly>")
os.Exit(1)
}
snapType := client.SnapshotType(os.Args[2])
switch snapType {
case client.SnapshotHourly, client.SnapshotDaily, client.SnapshotWeekly, client.SnapshotMonthly:
// Valid type
default:
fmt.Printf("Invalid snapshot type: %s\n", snapType)
fmt.Println("Valid types: hourly, daily, weekly, monthly")
os.Exit(1)
}
fmt.Printf("=== Creating %s snapshot ===\n\n", snapType)
snapshot, err := c.CreateSnapshotWithType(snapType)
if err != nil {
fmt.Printf("Error creating snapshot: %v\n", err)
os.Exit(1)
}
// Check for existing bookmark for incremental
lastBookmark, _ := c.GetLastBookmark()
if err := c.SendIncremental(snapshot, lastBookmark); err != nil {
fmt.Printf("Error sending snapshot: %v\n", err)
os.Exit(1)
}
// Create bookmark
if err := c.CreateBookmark(snapshot); err != nil {
fmt.Printf("Warning: failed to create bookmark: %v\n", err)
}
// Rotate local snapshots using server policy if available
policy, err := getRotationPolicy(c)
if err != nil {
fmt.Printf("Warning: failed to get rotation policy: %v\n", err)
policy = client.DefaultPolicy()
}
if err := c.RotateLocalSnapshots(policy); err != nil {
fmt.Printf("Warning: failed to rotate snapshots: %v\n", err)
}
fmt.Printf("\n✓ %s snapshot completed successfully!\n", snapType)
case "rotate":
// Rotate local snapshots using server policy if available
fmt.Println("=== Rotating local snapshots ===\n")
policy, err := getRotationPolicy(c)
if err != nil {
fmt.Printf("Warning: failed to get rotation policy: %v\n", err)
policy = client.DefaultPolicy()
}
if err := c.RotateLocalSnapshots(policy); err != nil {
fmt.Printf("Error rotating snapshots: %v\n", err)
os.Exit(1)
}
fmt.Println("\n✓ Rotation completed!")
case "rotate-remote":
// Request server to rotate remote snapshots
if err := c.RequestRotation(); err != nil {
fmt.Printf("Error requesting rotation: %v\n", err)
os.Exit(1)
if snapshot.FullBackup {
fmt.Println("\n✓ Full backup completed!")
} else {
fmt.Println("\n✓ Incremental backup completed!")
}
case "status":
// Check server connection and quota
if err := c.GetStatus(); err != nil {
fmt.Printf("Error getting status: %v\n", err)
os.Exit(1)
}
case "bookmarks":
// List bookmarks
fmt.Println("=== ZFS Bookmarks ===\n")
bookmark, err := c.GetLastBookmark()
if err != nil {
fmt.Printf("Error: %v\n", err)
os.Exit(1)
}
if bookmark == "" {
fmt.Println("No bookmarks found")
} else {
fmt.Printf("Last bookmark: %s\n", bookmark)
}
case "change-password":
// Change client API key/password
if len(os.Args) < 3 {
fmt.Println("Usage: zfs-client change-password <new-api-key>")
os.Exit(1)
}
newKey := os.Args[2]
fmt.Println("=== Changing API Key ===\n")
if err := c.ChangePassword(newKey); err != nil {
fmt.Printf("Error: %v\n", err)
os.Exit(1)
}
fmt.Println("\n✓ API key changed successfully!")
fmt.Println("Update your .env file with the new API_KEY value.")
case "help", "-h", "--help":
printUsage()
@@ -216,56 +61,22 @@ func main() {
}
}
// getRotationPolicy fetches the rotation policy from the server.
// If the server has a policy configured, it must be used.
// Otherwise, the default policy is returned.
func getRotationPolicy(c *client.Client) (*client.SnapshotPolicy, error) {
serverPolicy, err := c.GetRotationPolicy()
if err != nil {
return nil, err
}
if serverPolicy.ServerManaged && serverPolicy.RotationPolicy != nil {
fmt.Println(" Using server-managed rotation policy")
return serverPolicy.RotationPolicy, nil
}
// No server policy, use default
fmt.Println(" Using default rotation policy")
return client.DefaultPolicy(), nil
}
func printUsage() {
fmt.Println("ZFS Snapshot Backup Client")
fmt.Println("\nUsage: zfs-client [command]")
fmt.Println("ZFS Snapshot Backup Client - Simple Version")
fmt.Println("\nUsage: zfs-client [command] [dataset]")
fmt.Println("\nCommands:")
fmt.Println(" backup - Create snapshot and send (auto incremental if bookmark exists)")
fmt.Println(" backup-full - Create full backup (no incremental)")
fmt.Println(" backup-incremental - Create incremental backup from last bookmark")
fmt.Println(" snapshot <type> - Create typed snapshot (hourly|daily|weekly|monthly)")
fmt.Println(" rotate - Rotate local snapshots based on retention policy")
fmt.Println(" rotate-remote - Request server to rotate old remote snapshots")
fmt.Println(" status - Check server status and quota")
fmt.Println(" bookmarks - List ZFS bookmarks")
fmt.Println(" change-password <new-key> - Change client API key")
fmt.Println(" snap [dataset] - Create snapshot and send to server")
fmt.Println(" If dataset not specified, uses LOCAL_DATASET from config")
fmt.Println(" status - Check server connection and quota")
fmt.Println(" help - Show this help message")
fmt.Println("\nSnapshot Retention Policy (default):")
fmt.Println(" Hourly: 24 snapshots")
fmt.Println(" Daily: 7 snapshots")
fmt.Println(" Weekly: 4 snapshots")
fmt.Println(" Monthly: 12 snapshots")
fmt.Println("\nEnvironment Variables (can be set in .env file):")
fmt.Println(" CLIENT_ID - Client identifier (default: client1)")
fmt.Println(" API_KEY - API key for authentication (default: secret123)")
fmt.Println(" SERVER_URL - Backup server URL (default: http://localhost:8080)")
fmt.Println(" LOCAL_DATASET - ZFS dataset to backup (default: tank/data)")
fmt.Println(" COMPRESS - Enable LZ4 compression (default: true)")
fmt.Println(" STORAGE_TYPE - Storage type: s3 or local (default: s3)")
fmt.Println("\nExamples:")
fmt.Println(" zfs-client backup")
fmt.Println(" zfs-client backup-full")
fmt.Println(" zfs-client snapshot hourly")
fmt.Println(" zfs-client rotate")
fmt.Println(" zfs-client change-password mynewsecretkey")
fmt.Println(" CLIENT_ID=myclient zfs-client backup")
fmt.Println(" zfs-client snap # Use configured dataset")
fmt.Println(" zfs-client snap tank/data # Backup specific dataset")
fmt.Println(" zfs-client status")
}

View File

@@ -1,5 +1,4 @@
// Command zfs-restore is a CLI tool for restoring ZFS snapshots from a backup server.
// It provides commands for listing, restoring, and mounting snapshots.
// Command zfs-restore is a simple CLI tool for restoring ZFS snapshots from a backup server.
package main
import (
@@ -23,7 +22,8 @@ func main() {
command := os.Args[1]
switch command {
case "list":
case "list", "ls":
// List available snapshots
snapshots, err := client.ListSnapshots()
if err != nil {
fmt.Printf("Error: %v\n", err)
@@ -32,8 +32,13 @@ func main() {
client.DisplaySnapshots(snapshots)
case "restore":
if len(os.Args) < 4 {
fmt.Println("Usage: zfs-restore restore <snapshot-number> <target-dataset> [--force]")
// Restore snapshot - can use number or "latest" keyword
if len(os.Args) < 3 {
fmt.Println("Usage: zfs-restore restore <snapshot-number-or-latest> <target-dataset> [--force]")
fmt.Println("\nExamples:")
fmt.Println(" zfs-restore restore 1 tank/restored")
fmt.Println(" zfs-restore restore latest tank/restored")
fmt.Println(" zfs-restore restore latest tank/restored --force")
os.Exit(1)
}
@@ -48,99 +53,80 @@ func main() {
return snapshots[i].Timestamp.After(snapshots[j].Timestamp)
})
// Parse snapshot number
if len(snapshots) == 0 {
fmt.Println("No snapshots available. Run 'zfs-restore list' first.")
os.Exit(1)
}
snapshotArg := os.Args[2]
var snapshot *restore.SnapshotMetadata
if snapshotArg == "latest" {
snapshot = snapshots[0]
fmt.Printf("→ Restoring latest snapshot from %s\n", snapshot.Timestamp.Format("2006-01-02 15:04:05"))
} else {
var snapNum int
fmt.Sscanf(os.Args[2], "%d", &snapNum)
fmt.Sscanf(snapshotArg, "%d", &snapNum)
if snapNum < 1 || snapNum > len(snapshots) {
fmt.Printf("Invalid snapshot number. Use 'list' to see available snapshots.\n")
fmt.Printf("Invalid snapshot number. Use 'zfs-restore list' to see available snapshots.\n")
os.Exit(1)
}
snapshot = snapshots[snapNum-1]
}
// Get target dataset (either from args or prompt)
targetDataset := ""
force := false
for i, arg := range os.Args {
if arg == "--force" {
force = true
}
if arg != "restore" && arg != snapshotArg && arg != "--force" && targetDataset == "" && i > 2 && arg != os.Args[0] {
targetDataset = arg
}
}
if targetDataset == "" {
fmt.Printf("Target dataset: ")
fmt.Scanln(&targetDataset)
}
if targetDataset == "" {
fmt.Println("Error: target dataset is required")
os.Exit(1)
}
snapshot := snapshots[snapNum-1]
targetDataset := os.Args[3]
force := len(os.Args) > 4 && os.Args[4] == "--force"
if err := client.RestoreSnapshot(snapshot, targetDataset, force); err != nil {
fmt.Printf("Error: %v\n", err)
os.Exit(1)
}
case "save":
if len(os.Args) < 4 {
fmt.Println("Usage: zfs-restore save <snapshot-number> <output-file>")
os.Exit(1)
}
snapshots, err := client.ListSnapshots()
if err != nil {
fmt.Printf("Error: %v\n", err)
os.Exit(1)
}
sort.Slice(snapshots, func(i, j int) bool {
return snapshots[i].Timestamp.After(snapshots[j].Timestamp)
})
var snapNum int
fmt.Sscanf(os.Args[2], "%d", &snapNum)
if snapNum < 1 || snapNum > len(snapshots) {
fmt.Printf("Invalid snapshot number.\n")
os.Exit(1)
}
snapshot := snapshots[snapNum-1]
outputFile := os.Args[3]
if err := client.RestoreToFile(snapshot, outputFile); err != nil {
if err := client.RestoreSnapshot(snapshot, targetDataset, force, snapshots); err != nil {
fmt.Printf("Error: %v\n", err)
os.Exit(1)
}
case "mount":
if len(os.Args) < 4 {
fmt.Println("Usage: zfs-restore mount <dataset> <mountpoint>")
// Mount a restored dataset to access files
if len(os.Args) < 3 {
fmt.Println("Usage: zfs-restore mount <dataset> [mountpoint]")
fmt.Println("\nExamples:")
fmt.Println(" zfs-restore mount tank/restored /mnt/recover")
fmt.Println(" zfs-restore mount tank/restored # interactive")
os.Exit(1)
}
dataset := os.Args[2]
mountpoint := os.Args[3]
mountpoint := ""
if err := client.MountSnapshot(dataset, mountpoint); err != nil {
fmt.Printf("Error: %v\n", err)
os.Exit(1)
if len(os.Args) > 3 {
mountpoint = os.Args[3]
} else {
fmt.Printf("Mountpoint [/mnt/recover]: ")
fmt.Scanln(&mountpoint)
if mountpoint == "" {
mountpoint = "/mnt/recover"
}
}
case "latest":
if len(os.Args) < 3 {
fmt.Println("Usage: zfs-restore latest <target-dataset> [--force]")
os.Exit(1)
}
snapshots, err := client.ListSnapshots()
if err != nil {
fmt.Printf("Error: %v\n", err)
os.Exit(1)
}
if len(snapshots) == 0 {
fmt.Println("No snapshots available")
os.Exit(1)
}
// Sort and get latest
sort.Slice(snapshots, func(i, j int) bool {
return snapshots[i].Timestamp.After(snapshots[j].Timestamp)
})
latest := snapshots[0]
targetDataset := os.Args[2]
force := len(os.Args) > 3 && os.Args[3] == "--force"
fmt.Printf("Restoring latest snapshot from %s\n", latest.Timestamp.Format("2006-01-02 15:04:05"))
if err := client.RestoreSnapshot(latest, targetDataset, force); err != nil {
if err := client.MountDataset(dataset, mountpoint); err != nil {
fmt.Printf("Error: %v\n", err)
os.Exit(1)
}
@@ -156,21 +142,18 @@ func main() {
}
func printUsage() {
fmt.Println("ZFS Snapshot Restore Tool")
fmt.Println("\nUsage: zfs-restore [command] [options]")
fmt.Println("ZFS Snapshot Restore Tool - Simple Version")
fmt.Println("\nUsage: zfs-restore [command]")
fmt.Println("\nCommands:")
fmt.Println(" list - List available snapshots")
fmt.Println(" restore <#> <dataset> [--force] - Restore snapshot to ZFS dataset")
fmt.Println(" latest <dataset> [--force] - Restore most recent snapshot")
fmt.Println(" save <#> <file> - Save snapshot to file")
fmt.Println(" mount <dataset> <mountpoint> - Mount restored dataset")
fmt.Println(" restore <#|latest> <dataset> [--force] - Restore snapshot to ZFS")
fmt.Println(" mount <dataset> [mountpoint] - Mount dataset to recover files")
fmt.Println(" help - Show this help message")
fmt.Println("\nExamples:")
fmt.Println(" zfs-restore list")
fmt.Println(" zfs-restore restore 1 tank/restored")
fmt.Println(" zfs-restore latest tank/restored --force")
fmt.Println(" zfs-restore save 2 backup.zfs.lz4")
fmt.Println(" zfs-restore mount tank/restored /mnt/restore")
fmt.Println("\nQuick Examples:")
fmt.Println(" zfs-restore list - See available backups")
fmt.Println(" zfs-restore restore latest tank/data - Restore most recent backup")
fmt.Println(" zfs-restore restore 1 tank/restored - Restore snapshot #1")
fmt.Println(" zfs-restore mount tank/restored /mnt - Mount to recover files")
fmt.Println("\nEnvironment Variables (can be set in .env file):")
fmt.Println(" CLIENT_ID - Client identifier (default: client1)")
fmt.Println(" API_KEY - API key for authentication (default: secret123)")

30
go.mod
View File

@@ -3,14 +3,32 @@ module git.ma-al.com/goc_marek/zfs
go 1.25.6
require (
github.com/minio/minio-go/v7 v7.0.98
github.com/a-h/templ v0.3.977
github.com/aws/aws-sdk-go-v2 v1.41.1
github.com/aws/aws-sdk-go-v2/config v1.32.7
github.com/aws/aws-sdk-go-v2/credentials v1.19.7
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0
github.com/mistifyio/go-zfs v2.1.1+incompatible
github.com/pierrec/lz4/v4 v4.1.25
modernc.org/sqlite v1.45.0
)
require (
github.com/a-h/templ v0.3.977 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 // indirect
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 // indirect
github.com/aws/smithy-go v1.24.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/go-ini/ini v1.67.0 // indirect
github.com/google/uuid v1.6.0 // indirect
@@ -20,10 +38,9 @@ require (
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/minio/crc64nvme v1.1.1 // indirect
github.com/minio/md5-simd v1.1.2 // indirect
github.com/minio/minio-go/v7 v7.0.98 // indirect
github.com/ncruces/go-strftime v1.0.0 // indirect
github.com/philhofer/fwd v1.2.0 // indirect
github.com/pierrec/lz4/v4 v4.1.25 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/rs/xid v1.6.0 // indirect
github.com/tinylib/msgp v1.6.1 // indirect
@@ -31,9 +48,10 @@ require (
golang.org/x/crypto v0.46.0 // indirect
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect
golang.org/x/net v0.48.0 // indirect
golang.org/x/sync v0.19.0 // indirect
golang.org/x/sys v0.39.0 // indirect
golang.org/x/text v0.32.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
golang.org/x/tools v0.39.0 // indirect
modernc.org/libc v1.67.6 // indirect
modernc.org/mathutil v1.7.1 // indirect
modernc.org/memory v1.11.0 // indirect

49
go.sum
View File

@@ -1,11 +1,49 @@
github.com/a-h/templ v0.3.977 h1:kiKAPXTZE2Iaf8JbtM21r54A8bCNsncrfnokZZSrSDg=
github.com/a-h/templ v0.3.977/go.mod h1:oCZcnKRf5jjsGpf2yELzQfodLphd2mwecwG4Crk5HBo=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU=
github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4/go.mod h1:IOAPF6oT9KCsceNTvvYMNHy0+kMF8akOjeDvPENWxp4=
github.com/aws/aws-sdk-go-v2/config v1.32.7 h1:vxUyWGUwmkQ2g19n7JY/9YL8MfAIl7bTesIUykECXmY=
github.com/aws/aws-sdk-go-v2/config v1.32.7/go.mod h1:2/Qm5vKUU/r7Y+zUk/Ptt2MDAEKAfUtKc1+3U1Mo3oY=
github.com/aws/aws-sdk-go-v2/credentials v1.19.7 h1:tHK47VqqtJxOymRrNtUXN5SP/zUTvZKeLx4tH6PGQc8=
github.com/aws/aws-sdk-go-v2/credentials v1.19.7/go.mod h1:qOZk8sPDrxhf+4Wf4oT2urYJrYt3RejHSzgAquYeppw=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17 h1:JqcdRG//czea7Ppjb+g/n4o8i/R50aTBHkA7vu0lK+k=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17/go.mod h1:CO+WeGmIdj/MlPel2KwID9Gt7CNq4M65HUfBW97liM0=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8 h1:Z5EiPIzXKewUQK0QTMkutjiaPVeVYXX7KIqhXu/0fXs=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8/go.mod h1:FsTpJtvC4U1fyDXk7c71XoDv3HlRm8V3NiYLeYLh5YE=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 h1:RuNSMoozM8oXlgLG/n6WLaFGoea7/CddrCfIiSA+xdY=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17/go.mod h1:F2xxQ9TZz5gDWsclCtPQscGpP0VUOc8RqgFM3vDENmU=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 h1:bGeHBsGZx0Dvu/eJC0Lh9adJa3M1xREcndxLNZlve2U=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17/go.mod h1:dcW24lbU0CzHusTE8LLHhRLI42ejmINN8Lcr22bwh/g=
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0 h1:oeu8VPlOre74lBA/PMhxa5vewaMIMmILM+RraSyB8KA=
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0/go.mod h1:5jggDlZ2CLQhwJBiZJb4vfk4f0GxWdEDruWKEJ1xOdo=
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4fpmpyD9wYG1vfSu+Y=
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5/go.mod h1:k029+U8SY30/3/ras4G/Fnv/b88N4mAfliNn08Dem4M=
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 h1:v6EiMvhEYBoHABfbGB4alOYmCIrcgyPPiBE1wZAEbqk=
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 h1:gd84Omyu9JLriJVCbGApcLzVR3XtmC4ZDPcAI6Ftvds=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo=
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/pJ1jOWYlFDJTjRQ=
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6/go.mod h1:qgFDZQSD/Kys7nJnVqYlWKnh0SSdMjAi0uSwON4wgYQ=
github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs=
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
@@ -35,14 +73,10 @@ github.com/philhofer/fwd v1.2.0 h1:e6DnBTl7vGY+Gz322/ASL4Gyp1FspeMvx1RNDoToZuM=
github.com/philhofer/fwd v1.2.0/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM=
github.com/pierrec/lz4/v4 v4.1.25 h1:kocOqRffaIbU5djlIBr7Wh+cx82C0vtFb0fOurZHqD0=
github.com/pierrec/lz4/v4 v4.1.25/go.mod h1:EoQMVJgeeEOMsCqCzqFm2O0cJvljX2nGZjcRIPL34O4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU=
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tinylib/msgp v1.6.1 h1:ESRv8eL3u+DNHUoSAAQRE50Hm162zqAnBoGv9PzScPY=
github.com/tinylib/msgp v1.6.1/go.mod h1:RSp0LW9oSxFut3KzESt5Voq4GVWyS+PSulT77roAqEA=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
@@ -64,10 +98,7 @@ golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis=
modernc.org/cc/v4 v4.27.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
modernc.org/ccgo/v4 v4.30.1 h1:4r4U1J6Fhj98NKfSjnPUN7Ze2c6MnAdL0hWw6+LrJpc=

View File

@@ -8,7 +8,6 @@ import (
"fmt"
"io"
"net/http"
"os"
"os/exec"
"strings"
"time"
@@ -17,9 +16,15 @@ import (
"github.com/pierrec/lz4/v4"
)
var uploadUrl = "/upload-stream/"
// SnapshotResult contains the result of a snapshot creation and send operation.
type SnapshotResult struct {
FullBackup bool
Snapshot *zfs.Dataset
}
// Client handles snapshot backup operations to a remote server.
// It manages creating local ZFS snapshots and transmitting them
// to the backup server via HTTP or SSH.
type Client struct {
config *Config
}
@@ -29,12 +34,58 @@ func New(config *Config) *Client {
return &Client{config: config}
}
// CreateAndSend creates a snapshot and sends it to the backup server via HTTP.
// It automatically detects if this is a full or incremental backup:
// - If no bookmark exists, does a full backup
// - If bookmark exists, does an incremental backup from the bookmark
// If targetDataset is provided, it overrides the configured dataset.
func (c *Client) CreateAndSend(targetDataset string) (*SnapshotResult, error) {
// Use provided dataset or fall back to config
if targetDataset == "" {
targetDataset = c.config.LocalDataset
}
// Check for existing bookmark to determine backup type
lastBookmark, err := c.GetLastBookmark()
if err != nil {
return nil, fmt.Errorf("failed to check bookmarks: %v", err)
}
// Create new snapshot
snapshot, err := c.CreateSnapshot(targetDataset)
if err != nil {
return nil, fmt.Errorf("failed to create snapshot: %v", err)
}
isFullBackup := lastBookmark == ""
if isFullBackup {
fmt.Println("→ No previous backup found, doing FULL backup...")
// Send as full (no base)
if err := c.SendIncrementalHTTP(snapshot, targetDataset, ""); err != nil {
return nil, fmt.Errorf("failed to send snapshot: %v", err)
}
} else {
fmt.Printf("→ Found previous backup, doing INCREMENTAL from %s...\n", lastBookmark)
// Send as incremental from bookmark
if err := c.SendIncrementalHTTP(snapshot, targetDataset, lastBookmark); err != nil {
return nil, fmt.Errorf("failed to send incremental: %v", err)
}
}
// Create bookmark for future incremental backups
if err := c.CreateBookmark(snapshot); err != nil {
fmt.Printf("Warning: failed to create bookmark: %v\n", err)
}
return &SnapshotResult{
FullBackup: isFullBackup,
Snapshot: snapshot,
}, nil
}
// CreateSnapshot creates a local ZFS snapshot of the configured dataset.
// The snapshot is named with a timestamp for easy identification.
// Returns the created snapshot dataset or an error.
func (c *Client) CreateSnapshot() (*zfs.Dataset, error) {
// Get the local dataset
ds, err := zfs.GetDataset(c.config.LocalDataset)
func (c *Client) CreateSnapshot(dataset string) (*zfs.Dataset, error) {
ds, err := zfs.GetDataset(dataset)
if err != nil {
return nil, fmt.Errorf("failed to get dataset: %v", err)
}
@@ -58,30 +109,41 @@ func (c *Client) GetSnapshotSize(snapshot *zfs.Dataset) int64 {
return int64(snapshot.Used)
}
// SendSnapshot sends a snapshot to the backup server.
// It first requests upload authorization, then streams the snapshot
// using the appropriate method (S3 or ZFS receive).
func (c *Client) SendSnapshot(snapshot *zfs.Dataset) error {
// SendIncrementalHTTP sends a snapshot to the server via HTTP.
// The server then handles storage (S3 or local ZFS).
// datasetName should be the ZFS dataset being backed up (e.g., "tank/data")
func (c *Client) SendIncrementalHTTP(snapshot *zfs.Dataset, datasetName, base string) error {
estimatedSize := c.GetSnapshotSize(snapshot)
// Determine if this is incremental or full
isIncremental := base != ""
// Request upload authorization from server
uploadReq := map[string]interface{}{
"client_id": c.config.ClientID,
"api_key": c.config.APIKey,
"dataset_name": c.config.LocalDataset,
"dataset_name": datasetName,
"timestamp": time.Now().Format(time.RFC3339),
"compressed": c.config.Compress,
"estimated_size": estimatedSize,
"incremental": isIncremental,
"base_snapshot": base,
}
reqBody, _ := json.Marshal(uploadReq)
resp, err := http.Post(c.config.ServerURL+"/upload", "application/json", bytes.NewBuffer(reqBody))
uploadURL := c.config.ServerURL
// Ensure proper URL format
if !strings.HasSuffix(uploadURL, "/") {
uploadURL += "/"
}
uploadURL += "upload"
resp, err := http.Post(uploadURL, "application/json", bytes.NewBuffer(reqBody))
if err != nil {
return fmt.Errorf("failed to request upload: %v", err)
}
defer resp.Body.Close()
// Parse server response
var uploadResp struct {
Success bool `json:"success"`
Message string `json:"message"`
@@ -89,7 +151,6 @@ func (c *Client) SendSnapshot(snapshot *zfs.Dataset) error {
UploadMethod string `json:"upload_method"`
StorageKey string `json:"storage_key"`
}
if err := json.NewDecoder(resp.Body).Decode(&uploadResp); err != nil {
return fmt.Errorf("failed to decode response: %v", err)
}
@@ -102,20 +163,24 @@ func (c *Client) SendSnapshot(snapshot *zfs.Dataset) error {
fmt.Printf(" Method: %s\n", uploadResp.UploadMethod)
fmt.Printf(" Storage key: %s\n", uploadResp.StorageKey)
// Choose upload method based on server response
if uploadResp.UploadMethod == "s3" {
return c.streamToS3(snapshot, uploadResp.UploadURL, uploadResp.StorageKey)
}
return c.sendViaZFS(snapshot, uploadResp.StorageKey)
// Stream to server via HTTP
return c.streamToServer(snapshot, base, uploadResp.UploadURL, uploadResp.StorageKey)
}
// streamToS3 streams a ZFS snapshot to S3 storage via HTTP.
// The snapshot is optionally compressed with LZ4 before transmission.
func (c *Client) streamToS3(snapshot *zfs.Dataset, uploadURL, storageKey string) error {
fmt.Printf("→ Streaming snapshot to S3...\n")
// streamToServer streams a ZFS snapshot to the backup server via HTTP.
func (c *Client) streamToServer(snapshot *zfs.Dataset, base, uploadURL, storageKey string) error {
fmt.Printf("→ Streaming snapshot to server...\n")
// Create ZFS send command
cmd := exec.Command("zfs", "send", snapshot.Name)
var cmd *exec.Cmd
if base != "" {
// Incremental send from bookmark or snapshot
cmd = exec.Command("zfs", "send", "-i", base, snapshot.Name)
} else {
// Full send
cmd = exec.Command("zfs", "send", snapshot.Name)
}
zfsOut, err := cmd.StdoutPipe()
if err != nil {
return fmt.Errorf("failed to create pipe: %v", err)
@@ -132,12 +197,10 @@ func (c *Client) streamToS3(snapshot *zfs.Dataset, uploadURL, storageKey string)
fmt.Printf(" Compressing with LZ4...\n")
pr, pw := io.Pipe()
lz4Writer := lz4.NewWriter(pw)
lz4Writer.Apply(lz4.BlockSizeOption(lz4.BlockSize(4 * 1024 * 1024))) // 4MB blocks for better performance
lz4Writer.Apply(lz4.BlockSizeOption(lz4.BlockSize(4 * 1024 * 1024))) // 4MB blocks
go func() {
// Copy zfs output to LZ4 writer
io.Copy(lz4Writer, zfsOut)
// Close LZ4 writer first to flush, then close pipe
lz4Writer.Close()
pw.Close()
}()
@@ -145,25 +208,45 @@ func (c *Client) streamToS3(snapshot *zfs.Dataset, uploadURL, storageKey string)
reader = pr
}
// Create HTTP request
req, err := http.NewRequest("POST", c.config.ServerURL+uploadURL, reader)
// Create HTTP request to server
// Build full URL properly - check if uploadURL is already full URL
fullURL := uploadURL
// If uploadURL is a relative path, prepend server URL
if !strings.HasPrefix(uploadURL, "http://") && !strings.HasPrefix(uploadURL, "https://") {
fullURL = c.config.ServerURL
// Remove trailing slash from base URL if present
fullURL = strings.TrimRight(fullURL, "/")
// Add leading slash to upload URL if not present
if !strings.HasPrefix(uploadURL, "/") {
uploadURL = "/" + uploadURL
}
fullURL += uploadURL
}
fmt.Printf(" Streaming to: %s\n", fullURL)
req, err := http.NewRequest("POST", fullURL, reader)
if err != nil {
return fmt.Errorf("failed to create request: %v", err)
}
// Set required headers
// Set headers
req.Header.Set("X-API-Key", c.config.APIKey)
req.Header.Set("X-Storage-Key", storageKey)
req.Header.Set("X-Dataset-Name", c.config.LocalDataset)
req.Header.Set("X-Compressed", fmt.Sprintf("%v", c.config.Compress))
req.Header.Set("X-Incremental", fmt.Sprintf("%v", base != ""))
if base != "" {
req.Header.Set("X-Base-Snapshot", base)
}
req.Header.Set("Content-Type", "application/octet-stream")
// Send request with no timeout for large uploads
client := &http.Client{
httpClient := &http.Client{
Timeout: 0,
}
httpResp, err := client.Do(req)
httpResp, err := httpClient.Do(req)
if err != nil {
cmd.Process.Kill()
return fmt.Errorf("failed to upload: %v", err)
@@ -200,42 +283,7 @@ func (c *Client) streamToS3(snapshot *zfs.Dataset, uploadURL, storageKey string)
return nil
}
// sendViaZFS sends a snapshot via traditional ZFS send/receive over SSH.
// This method is used when the server uses local ZFS storage.
func (c *Client) sendViaZFS(snapshot *zfs.Dataset, receivePath string) error {
fmt.Printf("→ Sending via ZFS send/receive...\n")
// Extract server host from URL
serverHost := c.config.ServerURL
if len(serverHost) > 7 && strings.HasPrefix(serverHost, "http://") {
serverHost = serverHost[7:]
} else if len(serverHost) > 8 && strings.HasPrefix(serverHost, "https://") {
serverHost = serverHost[8:]
}
// Remove port if present
if idx := strings.LastIndex(serverHost, ":"); idx > 0 {
serverHost = serverHost[:idx]
}
// Execute ZFS send over SSH
cmd := exec.Command("sh", "-c",
fmt.Sprintf("zfs send %s | ssh %s 'zfs recv -F %s'",
snapshot.Name, serverHost, receivePath))
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to send snapshot: %v", err)
}
fmt.Printf("✓ Snapshot sent successfully!\n")
return nil
}
// GetStatus retrieves and displays the client's backup status from the server.
// Shows storage usage, quota, and snapshot count.
func (c *Client) GetStatus() error {
url := fmt.Sprintf("%s/status?client_id=%s&api_key=%s",
c.config.ServerURL, c.config.ClientID, c.config.APIKey)
@@ -273,106 +321,3 @@ func (c *Client) GetStatus() error {
return nil
}
// RequestRotation asks the server to rotate old snapshots.
// This deletes the oldest snapshots to free up space.
func (c *Client) RequestRotation() error {
reqBody, _ := json.Marshal(map[string]string{
"client_id": c.config.ClientID,
"api_key": c.config.APIKey,
})
resp, err := http.Post(c.config.ServerURL+"/rotate", "application/json", bytes.NewBuffer(reqBody))
if err != nil {
return fmt.Errorf("failed to request rotation: %v", err)
}
defer resp.Body.Close()
var rotateResp struct {
Success bool `json:"success"`
DeletedCount int `json:"deleted_count"`
ReclaimedBytes int64 `json:"reclaimed_bytes"`
}
if err := json.NewDecoder(resp.Body).Decode(&rotateResp); err != nil {
return fmt.Errorf("failed to decode response: %v", err)
}
if !rotateResp.Success {
return fmt.Errorf("rotation failed")
}
fmt.Printf("✓ Rotation complete\n")
fmt.Printf(" Deleted: %d snapshots\n", rotateResp.DeletedCount)
fmt.Printf(" Freed: %.2f GB\n", float64(rotateResp.ReclaimedBytes)/(1024*1024*1024))
return nil
}
// ServerRotationPolicy represents the rotation policy response from the server
type ServerRotationPolicy struct {
Success bool `json:"success"`
Message string `json:"message"`
RotationPolicy *SnapshotPolicy `json:"rotation_policy"`
ServerManaged bool `json:"server_managed"`
}
// GetRotationPolicy fetches the rotation policy from the server.
// If the server has a policy configured for this client, it must be used.
// Returns the policy and whether it's server-managed (mandatory).
func (c *Client) GetRotationPolicy() (*ServerRotationPolicy, error) {
url := fmt.Sprintf("%s/rotation-policy?client_id=%s&api_key=%s",
c.config.ServerURL, c.config.ClientID, c.config.APIKey)
resp, err := http.Get(url)
if err != nil {
return nil, fmt.Errorf("failed to get rotation policy: %v", err)
}
defer resp.Body.Close()
var policyResp ServerRotationPolicy
if err := json.NewDecoder(resp.Body).Decode(&policyResp); err != nil {
return nil, fmt.Errorf("failed to decode response: %v", err)
}
if !policyResp.Success {
return nil, fmt.Errorf("failed to get rotation policy: %s", policyResp.Message)
}
return &policyResp, nil
}
// ChangePassword changes the client's API key on the server.
// Requires the current API key for authentication and the new key.
func (c *Client) ChangePassword(newAPIKey string) error {
reqBody, _ := json.Marshal(map[string]string{
"client_id": c.config.ClientID,
"current_key": c.config.APIKey,
"new_key": newAPIKey,
})
resp, err := http.Post(c.config.ServerURL+"/client/change-password", "application/json", bytes.NewBuffer(reqBody))
if err != nil {
return fmt.Errorf("failed to change password: %v", err)
}
defer resp.Body.Close()
var result struct {
Success bool `json:"success"`
Message string `json:"message"`
}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return fmt.Errorf("failed to decode response: %v", err)
}
if !result.Success {
return fmt.Errorf("failed to change password: %s", result.Message)
}
// Update local config with new key
c.config.APIKey = newAPIKey
fmt.Printf("✓ Password changed successfully\n")
return nil
}

View File

@@ -9,7 +9,6 @@ import (
)
// Config holds client-side configuration for connecting to the backup server.
// Note: Storage type is determined by the server, not the client.
type Config struct {
// ClientID is the unique identifier for this client
ClientID string `json:"client_id"`
@@ -21,6 +20,7 @@ type Config struct {
LocalDataset string `json:"local_dataset"`
// Compress enables LZ4 compression for transfers
Compress bool `json:"compress"`
UploadURL string `json:upload_url`
}
// LoadConfig loads client configuration from environment variables and .env file.
@@ -32,9 +32,10 @@ func LoadConfig() *Config {
return &Config{
ClientID: getEnv("CLIENT_ID", "client1"),
APIKey: getEnv("API_KEY", "secret123"),
ServerURL: getEnv("SERVER_URL", "http://backup-server:8080"),
ServerURL: getEnv("SERVER_URL", "http://localhost:8080"),
LocalDataset: getEnv("LOCAL_DATASET", "tank/data"),
Compress: getEnv("COMPRESS", "true") == "true",
UploadURL: "/upload-stream/",
}
}

View File

@@ -1,77 +1,15 @@
// Package client provides ZFS snapshot backup client functionality.
// This file contains snapshot management functions including creation,
// bookmarking, and rotation similar to zfs-auto-snapshot.
// This file contains snapshot management functions for creating and sending snapshots.
package client
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"sort"
"strings"
"time"
"github.com/mistifyio/go-zfs"
"github.com/pierrec/lz4/v4"
)
// SnapshotPolicy defines retention settings for automatic snapshots.
type SnapshotPolicy struct {
// KeepHourly is the number of hourly snapshots to keep
KeepHourly int
// KeepDaily is the number of daily snapshots to keep
KeepDaily int
// KeepWeekly is the number of weekly snapshots to keep
KeepWeekly int
// KeepMonthly is the number of monthly snapshots to keep
KeepMonthly int
}
// DefaultPolicy returns the default snapshot retention policy.
func DefaultPolicy() *SnapshotPolicy {
return &SnapshotPolicy{
KeepHourly: 24,
KeepDaily: 7,
KeepWeekly: 4,
KeepMonthly: 12,
}
}
// SnapshotType represents the type of snapshot (hourly, daily, etc.)
type SnapshotType string
const (
SnapshotHourly SnapshotType = "hourly"
SnapshotDaily SnapshotType = "daily"
SnapshotWeekly SnapshotType = "weekly"
SnapshotMonthly SnapshotType = "monthly"
SnapshotManual SnapshotType = "manual"
)
// CreateSnapshotWithType creates a snapshot with a specific type label.
// The snapshot name follows the pattern: zfs-backup-<type>-<timestamp>
func (c *Client) CreateSnapshotWithType(snapshotType SnapshotType) (*zfs.Dataset, error) {
ds, err := zfs.GetDataset(c.config.LocalDataset)
if err != nil {
return nil, fmt.Errorf("failed to get dataset: %v", err)
}
timestamp := time.Now().Format("2006-01-02_15-04-05")
snapshotName := fmt.Sprintf("zfs-backup-%s-%s", snapshotType, timestamp)
snapshot, err := ds.Snapshot(snapshotName, false)
if err != nil {
return nil, fmt.Errorf("failed to create snapshot: %v", err)
}
fmt.Printf("✓ Created %s snapshot: %s@%s\n", snapshotType, c.config.LocalDataset, snapshotName)
return snapshot, nil
}
// CreateBookmark creates a ZFS bookmark from a snapshot.
// Bookmarks allow incremental sends even after the source snapshot is deleted.
func (c *Client) CreateBookmark(snapshot *zfs.Dataset) error {
@@ -133,297 +71,7 @@ func (c *Client) GetLastSnapshot() (*zfs.Dataset, error) {
return snapshots[len(snapshots)-1], nil
}
// SendIncremental sends an incremental stream from a bookmark or snapshot.
// If base is empty, sends a full stream.
func (c *Client) SendIncremental(snapshot *zfs.Dataset, base string) error {
estimatedSize := c.GetSnapshotSize(snapshot)
// Determine if this is incremental or full
isIncremental := base != ""
var uploadMethod string
if isIncremental {
uploadMethod = "incremental"
} else {
uploadMethod = "full"
}
// Request upload authorization from server
uploadReq := map[string]interface{}{
"client_id": c.config.ClientID,
"api_key": c.config.APIKey,
"dataset_name": c.config.LocalDataset,
"timestamp": time.Now().Format(time.RFC3339),
"compressed": c.config.Compress,
"estimated_size": estimatedSize,
"incremental": isIncremental,
"base_snapshot": base,
}
reqBody, _ := json.Marshal(uploadReq)
resp, err := http.Post(c.config.ServerURL+"/upload", "application/json", bytes.NewBuffer(reqBody))
if err != nil {
return fmt.Errorf("failed to request upload: %v", err)
}
defer resp.Body.Close()
var uploadResp struct {
Success bool `json:"success"`
Message string `json:"message"`
UploadURL string `json:"upload_url"`
UploadMethod string `json:"upload_method"`
StorageKey string `json:"storage_key"`
}
if err := json.NewDecoder(resp.Body).Decode(&uploadResp); err != nil {
return fmt.Errorf("failed to decode response: %v", err)
}
if !uploadResp.Success {
return fmt.Errorf("upload not authorized: %s", uploadResp.Message)
}
fmt.Printf("→ Upload authorized\n")
fmt.Printf(" Method: %s\n", uploadResp.UploadMethod)
fmt.Printf(" Type: %s\n", uploadMethod)
fmt.Printf(" Storage key: %s\n", uploadResp.StorageKey)
// Choose upload method based on server response
if uploadResp.UploadMethod == "s3" {
return c.streamIncrementalToS3(snapshot, base, uploadResp.UploadURL, uploadResp.StorageKey)
}
return c.sendIncrementalViaZFS(snapshot, base, uploadResp.StorageKey)
}
// streamIncrementalToS3 streams an incremental ZFS snapshot to S3.
func (c *Client) streamIncrementalToS3(snapshot *zfs.Dataset, base, uploadURL, storageKey string) error {
fmt.Printf("→ Streaming snapshot to S3...\n")
// Create ZFS send command
var cmd *exec.Cmd
if base != "" {
// Incremental send from bookmark or snapshot
fmt.Printf(" Base: %s\n", base)
cmd = exec.Command("zfs", "send", "-i", base, snapshot.Name)
} else {
// Full send
cmd = exec.Command("zfs", "send", snapshot.Name)
}
zfsOut, err := cmd.StdoutPipe()
if err != nil {
return fmt.Errorf("failed to create pipe: %v", err)
}
if err := cmd.Start(); err != nil {
return fmt.Errorf("failed to start zfs send: %v", err)
}
var reader io.Reader = zfsOut
// Apply LZ4 compression if enabled
if c.config.Compress {
fmt.Printf(" Compressing with LZ4...\n")
pr, pw := io.Pipe()
lz4Writer := lz4.NewWriter(pw)
lz4Writer.Apply(lz4.BlockSizeOption(lz4.BlockSize(4 * 1024 * 1024))) // 4MB blocks for better performance
go func() {
// Copy zfs output to LZ4 writer
io.Copy(lz4Writer, zfsOut)
// Close LZ4 writer first to flush, then close pipe
lz4Writer.Close()
pw.Close()
}()
reader = pr
}
// Create HTTP request
req, err := http.NewRequest("POST", c.config.ServerURL+uploadURL, reader)
if err != nil {
return fmt.Errorf("failed to create request: %v", err)
}
// Set required headers
req.Header.Set("X-API-Key", c.config.APIKey)
req.Header.Set("X-Storage-Key", storageKey)
req.Header.Set("X-Dataset-Name", c.config.LocalDataset)
req.Header.Set("X-Compressed", fmt.Sprintf("%v", c.config.Compress))
req.Header.Set("X-Incremental", fmt.Sprintf("%v", base != ""))
if base != "" {
req.Header.Set("X-Base-Snapshot", base)
}
req.Header.Set("Content-Type", "application/octet-stream")
// Send request with no timeout for large uploads
client := &http.Client{
Timeout: 0,
}
httpResp, err := client.Do(req)
if err != nil {
cmd.Process.Kill()
return fmt.Errorf("failed to upload: %v", err)
}
defer httpResp.Body.Close()
if httpResp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(httpResp.Body)
return fmt.Errorf("upload failed with status %d: %s", httpResp.StatusCode, body)
}
if err := cmd.Wait(); err != nil {
return fmt.Errorf("zfs send failed: %v", err)
}
// Parse response
var result struct {
Success bool `json:"success"`
Message string `json:"message"`
Size int64 `json:"size"`
}
if err := json.NewDecoder(httpResp.Body).Decode(&result); err != nil {
return fmt.Errorf("failed to decode response: %v", err)
}
if !result.Success {
return fmt.Errorf("upload failed: %s", result.Message)
}
fmt.Printf("✓ Snapshot uploaded successfully!\n")
fmt.Printf(" Size: %.2f MB\n", float64(result.Size)/(1024*1024))
return nil
}
// sendIncrementalViaZFS sends an incremental snapshot via ZFS send/receive over SSH.
// This method is used when the server uses local ZFS storage.
func (c *Client) sendIncrementalViaZFS(snapshot *zfs.Dataset, base, receivePath string) error {
fmt.Printf("-> Sending via ZFS send/receive...\n")
// Extract server host from URL
serverHost := c.config.ServerURL
if len(serverHost) > 7 && strings.HasPrefix(serverHost, "http://") {
serverHost = serverHost[7:]
} else if len(serverHost) > 8 && strings.HasPrefix(serverHost, "https://") {
serverHost = serverHost[8:]
}
// Remove port if present
if idx := strings.LastIndex(serverHost, ":"); idx > 0 {
serverHost = serverHost[:idx]
}
// Build zfs send command
var zfsSendCmd string
if base != "" {
// Incremental send
fmt.Printf(" Base: %s\n", base)
zfsSendCmd = fmt.Sprintf("zfs send -i %s %s", base, snapshot.Name)
} else {
// Full send
zfsSendCmd = fmt.Sprintf("zfs send %s", snapshot.Name)
}
// Execute ZFS send over SSH
cmd := exec.Command("sh", "-c",
fmt.Sprintf("%s | ssh %s 'zfs recv -F %s'", zfsSendCmd, serverHost, receivePath))
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to send snapshot: %v", err)
}
fmt.Printf("Snapshot sent successfully!\n")
return nil
}
// RotateLocalSnapshots removes old snapshots based on the retention policy.
// This is similar to zfs-auto-snapshot's rotation behavior.
func (c *Client) RotateLocalSnapshots(policy *SnapshotPolicy) error {
ds, err := zfs.GetDataset(c.config.LocalDataset)
if err != nil {
return fmt.Errorf("failed to get dataset: %v", err)
}
snapshots, err := ds.Snapshots()
if err != nil {
return fmt.Errorf("failed to list snapshots: %v", err)
}
// Group snapshots by type
groups := make(map[SnapshotType][]*zfs.Dataset)
for _, snap := range snapshots {
snapType := parseSnapshotType(snap.Name)
groups[snapType] = append(groups[snapType], snap)
}
// Apply retention policy
deletedCount := 0
keepCount := map[SnapshotType]int{
SnapshotHourly: policy.KeepHourly,
SnapshotDaily: policy.KeepDaily,
SnapshotWeekly: policy.KeepWeekly,
SnapshotMonthly: policy.KeepMonthly,
SnapshotManual: -1, // Keep all manual snapshots
}
for snapType, snaps := range groups {
maxKeep := keepCount[snapType]
if maxKeep < 0 {
continue // Keep all
}
// Sort by creation time (oldest first)
sortSnapshotsByTime(snaps)
// Delete oldest snapshots exceeding the limit
if len(snaps) > maxKeep {
toDelete := snaps[:len(snaps)-maxKeep]
for _, snap := range toDelete {
fmt.Printf(" Deleting old snapshot: %s\n", snap.Name)
if err := snap.Destroy(zfs.DestroyDefault); err != nil {
fmt.Printf(" Warning: failed to delete %s: %v\n", snap.Name, err)
} else {
deletedCount++
}
}
}
}
if deletedCount > 0 {
fmt.Printf("✓ Rotated %d local snapshots\n", deletedCount)
}
return nil
}
// parseSnapshotType extracts the snapshot type from the snapshot name.
func parseSnapshotType(name string) SnapshotType {
if strings.Contains(name, "hourly") {
return SnapshotHourly
}
if strings.Contains(name, "daily") {
return SnapshotDaily
}
if strings.Contains(name, "weekly") {
return SnapshotWeekly
}
if strings.Contains(name, "monthly") {
return SnapshotMonthly
}
return SnapshotManual
}
// sortSnapshotsByTime sorts snapshots by creation time (oldest first).
// Uses the snapshot name which contains timestamp for sorting.
func sortSnapshotsByTime(snaps []*zfs.Dataset) {
sort.Slice(snaps, func(i, j int) bool {
// Extract timestamp from snapshot name for comparison
// Names are like: dataset@zfs-backup-hourly-2006-01-02_15-04-05
return snaps[i].Name < snaps[j].Name
})
// SendIncremental is kept for API compatibility - now just calls HTTP version
func (c *Client) SendIncremental(snapshot *zfs.Dataset, datasetName, base string) error {
return c.SendIncrementalHTTP(snapshot, datasetName, base)
}

View File

@@ -119,7 +119,7 @@ func (c *Client) DisplaySnapshots(snapshots []*SnapshotMetadata) {
// RestoreSnapshot downloads and restores a snapshot to a local ZFS dataset.
// If force is true, existing datasets will be overwritten.
func (c *Client) RestoreSnapshot(snapshot *SnapshotMetadata, targetDataset string, force bool) error {
func (c *Client) RestoreSnapshot(snapshot *SnapshotMetadata, targetDataset string, force bool, allSnapshots []*SnapshotMetadata) error {
fmt.Printf("\n=== Restoring Snapshot ===\n")
fmt.Printf("Source: %s\n", snapshot.SnapshotID)
fmt.Printf("Target: %s\n", targetDataset)
@@ -128,25 +128,82 @@ func (c *Client) RestoreSnapshot(snapshot *SnapshotMetadata, targetDataset strin
fmt.Printf("Compressed: %v\n", snapshot.Compressed)
fmt.Printf("Incremental: %v\n\n", snapshot.Incremental)
// For incremental snapshots, we need special handling
if snapshot.Incremental && force {
// Check if target dataset exists
if _, err := zfs.GetDataset(targetDataset); err == nil {
fmt.Printf("→ Destroying existing dataset for incremental restore...\n")
// Destroy the existing dataset to allow clean restore
cmd := exec.Command("zfs", "destroy", "-r", targetDataset)
if err := cmd.Run(); err != nil {
fmt.Printf(" Warning: could not destroy dataset (may not exist): %v\n", err)
}
// For incremental snapshots, we need to restore base first
if snapshot.Incremental && snapshot.BaseSnapshot != "" {
fmt.Printf("\n⚠ This is an INCREMENTAL backup.\n")
fmt.Printf(" Base snapshot needed: %s\n\n", snapshot.BaseSnapshot)
// Find the base snapshot in the list
var baseSnap *SnapshotMetadata
for _, s := range allSnapshots {
if s.SnapshotID == snapshot.BaseSnapshot {
baseSnap = s
break
}
}
if baseSnap == nil {
return fmt.Errorf("base snapshot %s not found on server. Cannot restore incremental without base", snapshot.BaseSnapshot)
}
fmt.Printf("Found base snapshot:\n")
fmt.Printf(" - %s from %s (%.2f GB)\n\n",
baseSnap.SnapshotID,
baseSnap.Timestamp.Format("2006-01-02 15:04:05"),
float64(baseSnap.SizeBytes)/(1024*1024*1024))
fmt.Printf("To restore this incremental, I need to:\n")
fmt.Printf(" 1. Restore base snapshot: %s\n", baseSnap.SnapshotID)
fmt.Printf(" 2. Apply incremental: %s\n\n", snapshot.SnapshotID)
// Ask for confirmation
fmt.Printf("Continue? [y/N]: ")
var confirm string
fmt.Scanln(&confirm)
if confirm != "y" && confirm != "Y" {
fmt.Println("Cancelled.")
return nil
}
// First restore the base snapshot
fmt.Printf("\n→ Restoring base snapshot...\n")
if err := c.restoreOneSnapshot(baseSnap, targetDataset, true); err != nil {
return fmt.Errorf("failed to restore base snapshot: %v", err)
}
// Then apply the incremental
fmt.Printf("\n→ Applying incremental snapshot...\n")
if err := c.restoreOneSnapshot(snapshot, targetDataset, false); err != nil {
return fmt.Errorf("failed to apply incremental: %v", err)
}
fmt.Printf("\n✓ Incremental restore completed!\n")
return nil
}
return c.restoreOneSnapshot(snapshot, targetDataset, force)
}
// restoreOneSnapshot downloads and restores a single snapshot
func (c *Client) restoreOneSnapshot(snapshot *SnapshotMetadata, targetDataset string, force bool) error {
// First, let's try to download - only destroy if download succeeds
var originalExists bool
if force {
if _, err := zfs.GetDataset(targetDataset); err == nil {
originalExists = true
fmt.Printf("→ Target dataset exists, will overwrite\n")
} else {
originalExists = false
fmt.Printf("→ Target dataset does not exist, will create new\n")
}
} else {
// Check if target dataset exists
if !force {
if _, err := zfs.GetDataset(targetDataset); err == nil {
return fmt.Errorf("target dataset %s already exists. Use --force to overwrite", targetDataset)
}
}
}
// Request download from server
downloadURL := fmt.Sprintf("%s/download?client_id=%s&api_key=%s&snapshot_id=%s",
@@ -165,6 +222,18 @@ func (c *Client) RestoreSnapshot(snapshot *SnapshotMetadata, targetDataset strin
return fmt.Errorf("download failed: %s", body)
}
// Download succeeded - now safe to destroy if needed
if force && originalExists {
fmt.Printf("→ Destroying existing dataset %s...\n", targetDataset)
cmd := exec.Command("zfs", "destroy", "-r", targetDataset)
output, err := cmd.CombinedOutput()
if err != nil {
fmt.Printf(" Destroy output: %s\n", string(output))
return fmt.Errorf("failed to destroy existing dataset: %v", err)
}
fmt.Printf(" Destroyed successfully\n")
}
// Create decompression reader if needed
var reader io.Reader = resp.Body
if snapshot.Compressed {
@@ -194,6 +263,13 @@ func (c *Client) RestoreSnapshot(snapshot *SnapshotMetadata, targetDataset strin
fmt.Printf("\n✓ Snapshot restored successfully!\n")
fmt.Printf(" Dataset: %s\n", targetDataset)
// Verify the dataset exists after restore
if _, err := zfs.GetDataset(targetDataset); err == nil {
fmt.Printf(" Verified: dataset exists\n")
} else {
fmt.Printf(" Warning: could not verify dataset exists: %v\n", err)
}
return nil
}
@@ -242,15 +318,22 @@ func (c *Client) RestoreToFile(snapshot *SnapshotMetadata, outputFile string) er
return nil
}
// MountSnapshot mounts a restored dataset to a specified mountpoint.
// This allows browsing the restored files.
func (c *Client) MountSnapshot(dataset, mountpoint string) error {
// MountDataset mounts a restored dataset to a specified mountpoint for file recovery.
func (c *Client) MountDataset(dataset, mountpoint string) error {
fmt.Printf("\n=== Mounting Dataset ===\n")
fmt.Printf("Dataset: %s\n", dataset)
fmt.Printf("Mountpoint: %s\n\n", mountpoint)
ds, err := zfs.GetDataset(dataset)
if err != nil {
return fmt.Errorf("dataset not found: %v", err)
}
// Create mountpoint if it doesn't exist
// Check current mountpoint
currentMP, _ := ds.GetProperty("mountpoint")
fmt.Printf("Current mountpoint: %s\n", currentMP)
// Create mountpoint directory if it doesn't exist
if err := os.MkdirAll(mountpoint, 0755); err != nil {
return fmt.Errorf("failed to create mountpoint: %v", err)
}
@@ -260,13 +343,17 @@ func (c *Client) MountSnapshot(dataset, mountpoint string) error {
return fmt.Errorf("failed to set mountpoint: %v", err)
}
// Mount the dataset
// Mount the dataset if not already mounted
cmd := exec.Command("zfs", "mount", dataset)
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to mount: %v", err)
// Might already be mounted, that's OK
fmt.Printf(" (dataset may already be mounted)\n")
}
fmt.Printf("✓ Mounted %s at %s\n", dataset, mountpoint)
fmt.Printf("\n✓ Mounted successfully!\n")
fmt.Printf(" Access files at: %s\n", mountpoint)
fmt.Printf(" When done, run: umount %s\n", mountpoint)
return nil
}

View File

@@ -8,6 +8,8 @@ import (
"fmt"
"io/fs"
"net/http"
"strconv"
"strings"
"time"
"git.ma-al.com/goc_marek/zfs/internal/server/templates/pages"
@@ -480,6 +482,8 @@ func (s *Server) handleAdminDeleteSnapshot(w http.ResponseWriter, r *http.Reques
if snap != nil {
if snap.StorageType == "s3" && s.s3Backend != nil {
s.s3Backend.Delete(context.Background(), snap.StorageKey)
} else if snap.StorageType == "local" && s.localBackend != nil {
s.localBackend.Delete(context.Background(), snap.StorageKey)
}
}
@@ -516,6 +520,118 @@ func (s *Server) handleAdminGetStats(w http.ResponseWriter, r *http.Request) {
})
}
// handleAdminGetDatasets returns all datasets, optionally filtered by client
func (s *Server) handleAdminGetDatasets(w http.ResponseWriter, r *http.Request) {
admin, err := s.authenticateAdmin(r)
if err != nil || admin == nil {
http.Error(w, "Unauthorized", http.StatusUnauthorized)
return
}
clientID := r.URL.Query().Get("client_id")
var datasets []*DatasetConfig
if clientID != "" {
datasets, _ = s.db.GetDatasetsByClient(clientID)
} else {
datasets, _ = s.db.GetAllDatasets()
}
// Get snapshot counts for each dataset
type DatasetResponse struct {
ID int64 `json:"id"`
ClientID string `json:"client_id"`
DatasetName string `json:"dataset_name"`
StorageType string `json:"storage_type"`
Enabled bool `json:"enabled"`
SnapshotCount int `json:"snapshot_count"`
}
response := make([]DatasetResponse, len(datasets))
for i, d := range datasets {
snapshotCount, _ := s.db.GetSnapshotCountByDataset(d.ClientID, d.DatasetName)
response[i] = DatasetResponse{
ID: d.ID,
ClientID: d.ClientID,
DatasetName: d.DatasetName,
StorageType: d.StorageType,
Enabled: d.Enabled,
SnapshotCount: snapshotCount,
}
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}
// handleAdminUpdateDeleteDataset handles PUT and DELETE for a specific dataset
func (s *Server) handleAdminUpdateDeleteDataset(w http.ResponseWriter, r *http.Request) {
admin, err := s.authenticateAdmin(r)
if err != nil || admin == nil {
http.Error(w, "Unauthorized", http.StatusUnauthorized)
return
}
// Extract dataset ID from URL
parts := strings.Split(r.URL.Path, "/")
if len(parts) < 4 {
http.Error(w, "Invalid URL", http.StatusBadRequest)
return
}
datasetID, err := strconv.ParseInt(parts[len(parts)-1], 10, 64)
if err != nil {
http.Error(w, "Invalid dataset ID", http.StatusBadRequest)
return
}
// Get dataset from database
dataset, err := s.db.GetDatasetByID(datasetID)
if err != nil || dataset == nil {
http.Error(w, "Dataset not found", http.StatusNotFound)
return
}
if r.Method == http.MethodDelete {
// Delete dataset
if err := s.db.DeleteDataset(datasetID); err != nil {
http.Error(w, "Failed to delete dataset", http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(map[string]interface{}{
"success": true,
"message": "Dataset deleted successfully",
})
return
}
if r.Method == http.MethodPut {
// Update dataset
var req struct {
Enabled bool `json:"enabled"`
}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, "Invalid request body", http.StatusBadRequest)
return
}
dataset.Enabled = req.Enabled
if err := s.db.SaveDataset(dataset); err != nil {
http.Error(w, "Failed to update dataset", http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(map[string]interface{}{
"success": true,
"message": "Dataset updated successfully",
})
return
}
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
}
// Admin management handlers
// handleAdminGetAdmins returns all admins

View File

@@ -92,6 +92,7 @@ const adminPanelHTML = `<!DOCTYPE html>
<div class="tabs">
<button class="tab active" data-tab="clients" onclick="showTab('clients')">Clients</button>
<button class="tab" data-tab="datasets" onclick="showTab('datasets')">Datasets</button>
<button class="tab" data-tab="snapshots" onclick="showTab('snapshots')">Snapshots</button>
<button class="tab" data-tab="admins" onclick="showTab('admins')">Admins</button>
</div>
@@ -121,6 +122,32 @@ const adminPanelHTML = `<!DOCTYPE html>
</div>
</div>
<div id="datasets-tab" class="hidden">
<div class="card">
<div class="card-header">
<h3>Datasets</h3>
<select id="dataset-client-filter" onchange="loadDatasets()">
<option value="">All Clients</option>
</select>
</div>
<div class="card-body">
<table>
<thead>
<tr>
<th>Client</th>
<th>Dataset Name</th>
<th>Storage Type</th>
<th>Status</th>
<th>Snapshots</th>
<th>Actions</th>
</tr>
</thead>
<tbody id="datasets-table"></tbody>
</table>
</div>
</div>
</div>
<div id="snapshots-tab" class="hidden">
<div class="card">
<div class="card-header">
@@ -134,9 +161,11 @@ const adminPanelHTML = `<!DOCTYPE html>
<thead>
<tr>
<th>Client</th>
<th>Dataset</th>
<th>Snapshot ID</th>
<th>Timestamp</th>
<th>Size</th>
<th>Storage</th>
<th>Type</th>
<th>Actions</th>
</tr>
@@ -474,6 +503,76 @@ const adminPanelHTML = `<!DOCTYPE html>
}
}
// Load datasets
async function loadDatasets() {
const clientId = document.getElementById('dataset-client-filter').value;
const url = '/admin/datasets' + (clientId ? '?client_id=' + clientId : '');
try {
const res = await fetch(url);
const datasets = await res.json();
const tbody = document.getElementById('datasets-table');
tbody.innerHTML = datasets.map(d =>
'<tr>' +
'<td>' + d.client_id + '</td>' +
'<td><strong>' + d.dataset_name + '</strong></td>' +
'<td><span class="badge badge-info">' + d.storage_type + '</span></td>' +
'<td>' + (d.enabled ? '<span class="badge badge-success">Enabled</span>' : '<span class="badge badge-danger">Disabled</span>') + '</td>' +
'<td>' + (d.snapshot_count || 0) + '</td>' +
'<td>' +
'<button class="btn btn-sm ' + (d.enabled ? 'btn-danger' : 'btn-success') + '" onclick="toggleDataset(' + d.id + ', ' + !d.enabled + ')">' + (d.enabled ? 'Disable' : 'Enable') + '</button>' +
'<button class="btn btn-sm btn-danger" onclick="deleteDataset(' + d.id + ', \'' + d.dataset_name + '\')">Delete</button>' +
'</td>' +
'</tr>'
).join('');
// Update client filter if not set
if (!clientId) {
const clientsRes = await fetch('/admin/clients');
const clients = await clientsRes.json();
const filter = document.getElementById('dataset-client-filter');
filter.innerHTML = '<option value="">All Clients</option>' +
clients.map(c => '<option value="' + c.client_id + '">' + c.client_id + '</option>').join('');
}
} catch (e) {
console.error('Failed to load datasets:', e);
}
}
// Toggle dataset enabled/disabled
async function toggleDataset(id, enabled) {
try {
const res = await fetch('/admin/datasets/' + id, {
method: 'PUT',
headers: {'Content-Type': 'application/json'},
body: JSON.stringify({enabled: enabled})
});
if (res.ok) {
loadDatasets();
} else {
alert('Failed to update dataset');
}
} catch (e) {
console.error('Failed to toggle dataset:', e);
}
}
// Delete dataset
async function deleteDataset(id, name) {
if (!confirm('Delete dataset ' + name + '?')) return;
try {
const res = await fetch('/admin/datasets/' + id, {method: 'DELETE'});
if (res.ok) {
loadDatasets();
} else {
alert('Failed to delete dataset');
}
} catch (e) {
console.error('Failed to delete dataset:', e);
}
}
// Load snapshots
async function loadSnapshots() {
const clientId = document.getElementById('snapshot-client-filter').value;
@@ -488,12 +587,14 @@ const adminPanelHTML = `<!DOCTYPE html>
const sizeGB = (s.size_bytes / (1024*1024*1024)).toFixed(2);
return '<tr>' +
'<td>' + s.client_id + '</td>' +
'<td>' + (s.dataset_name || '-') + '</td>' +
'<td>' + s.snapshot_id + '</td>' +
'<td>' + new Date(s.timestamp).toLocaleString() + '</td>' +
'<td>' + sizeGB + ' GB</td>' +
'<td><span class="badge ' + (s.storage_type === 's3' ? 'badge-info' : 'badge-warning') + '">' + s.storage_type + '</span></td>' +
'<td>' +
(s.incremental ? '<span class="badge badge-info">Incremental</span>' : '<span class="badge badge-success">Full</span>') +
(s.compressed ? ' <span class="badge badge-info">Compressed</span>' : '') +
(s.incremental ? '<span class="badge badge-info">Inc</span>' : '<span class="badge badge-success">Full</span>') +
(s.compressed ? ' <span class="badge badge-info">LZ4</span>' : '') +
'</td>' +
'<td><button class="btn btn-sm btn-danger" onclick="deleteSnapshot(\'' + s.client_id + '\', \'' + s.snapshot_id + '\')">Delete</button></td>' +
'</tr>';
@@ -534,12 +635,14 @@ const adminPanelHTML = `<!DOCTYPE html>
document.querySelector('.tab[data-tab="' + tab + '"]').classList.add('active');
document.getElementById('clients-tab').classList.add('hidden');
document.getElementById('datasets-tab').classList.add('hidden');
document.getElementById('snapshots-tab').classList.add('hidden');
document.getElementById('admins-tab').classList.add('hidden');
document.getElementById(tab + '-tab').classList.remove('hidden');
if (tab === 'snapshots') loadSnapshots();
if (tab === 'admins') loadAdmins();
if (tab === 'datasets') loadDatasets();
}
// Modal functions

View File

@@ -14,6 +14,7 @@ type Config struct {
S3BucketName string
S3UseSSL bool
S3Enabled bool // Enable/disable S3 backend
S3Region string // AWS region
BaseDataset string
DatabasePath string // Path to SQLite database
Port string
@@ -40,6 +41,7 @@ func LoadConfig() *Config {
S3BucketName: getEnv("S3_BUCKET", "zfs-snapshots"),
S3UseSSL: getEnv("S3_USE_SSL", "true") != "false",
S3Enabled: s3Enabled,
S3Region: getEnv("S3_REGION", "us-east-1"),
BaseDataset: getEnv("ZFS_BASE_DATASET", "backup"),
DatabasePath: getEnv("DATABASE_PATH", "zfs-backup.db"),
Port: getEnv("PORT", "8080"),

View File

@@ -89,6 +89,23 @@ func (d *Database) initTables() error {
return fmt.Errorf("failed to create clients table: %v", err)
}
// Datasets table - multiple datasets per client
_, err = d.db.Exec(`
CREATE TABLE IF NOT EXISTS datasets (
id INTEGER PRIMARY KEY AUTOINCREMENT,
client_id TEXT NOT NULL,
dataset_name TEXT NOT NULL,
storage_type TEXT NOT NULL DEFAULT 's3',
enabled INTEGER NOT NULL DEFAULT 1,
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (client_id) REFERENCES clients(client_id) ON DELETE CASCADE,
UNIQUE(client_id, dataset_name)
)
`)
if err != nil {
return fmt.Errorf("failed to create datasets table: %v", err)
}
// Snapshots table
_, err = d.db.Exec(`
CREATE TABLE IF NOT EXISTS snapshots (
@@ -400,6 +417,162 @@ func (d *Database) CreateDefaultClient() error {
return d.SaveClient(defaultClient)
}
// CreateDefaultDataset creates a default dataset for a client if none exists
func (d *Database) CreateDefaultDataset(clientID, datasetName string) error {
datasets, err := d.GetDatasetsByClient(clientID)
if err != nil {
return err
}
if len(datasets) > 0 {
return nil
}
// Create default dataset
dataset := &DatasetConfig{
ClientID: clientID,
DatasetName: datasetName,
StorageType: "s3",
Enabled: true,
}
return d.SaveDataset(dataset)
}
// DatasetConfig represents a dataset configuration
type DatasetConfig struct {
ID int64 `json:"id"`
ClientID string `json:"client_id"`
DatasetName string `json:"dataset_name"`
StorageType string `json:"storage_type"`
Enabled bool `json:"enabled"`
}
// GetDatasetsByClient gets all datasets for a client
func (d *Database) GetDatasetsByClient(clientID string) ([]*DatasetConfig, error) {
query := `SELECT id, client_id, dataset_name, storage_type, enabled FROM datasets WHERE client_id = ?`
rows, err := d.db.Query(query, clientID)
if err != nil {
return nil, err
}
defer rows.Close()
var datasets []*DatasetConfig
for rows.Next() {
dataset := &DatasetConfig{}
var enabled int
err := rows.Scan(&dataset.ID, &dataset.ClientID, &dataset.DatasetName, &dataset.StorageType, &enabled)
if err != nil {
return nil, err
}
dataset.Enabled = enabled == 1
datasets = append(datasets, dataset)
}
return datasets, nil
}
// GetDatasetByName gets a dataset by client and dataset name
func (d *Database) GetDatasetByName(clientID, datasetName string) (*DatasetConfig, error) {
query := `SELECT id, client_id, dataset_name, storage_type, enabled FROM datasets WHERE client_id = ? AND dataset_name = ?`
row := d.db.QueryRow(query, clientID, datasetName)
dataset := &DatasetConfig{}
var enabled int
err := row.Scan(&dataset.ID, &dataset.ClientID, &dataset.DatasetName, &dataset.StorageType, &enabled)
if err == sql.ErrNoRows {
return nil, nil
}
if err != nil {
return nil, err
}
dataset.Enabled = enabled == 1
return dataset, nil
}
// SaveDataset saves or updates a dataset
func (d *Database) SaveDataset(dataset *DatasetConfig) error {
enabled := 0
if dataset.Enabled {
enabled = 1
}
if dataset.ID == 0 {
// Insert new
_, err := d.db.Exec(`INSERT INTO datasets (client_id, dataset_name, storage_type, enabled) VALUES (?, ?, ?, ?)`,
dataset.ClientID, dataset.DatasetName, dataset.StorageType, enabled)
return err
}
// Update existing
_, err := d.db.Exec(`UPDATE datasets SET storage_type = ?, enabled = ? WHERE id = ?`,
dataset.StorageType, enabled, dataset.ID)
return err
}
// DeleteDataset deletes a dataset
func (d *Database) DeleteDataset(id int64) error {
_, err := d.db.Exec(`DELETE FROM datasets WHERE id = ?`, id)
return err
}
// GetDatasetByID gets a dataset by ID
func (d *Database) GetDatasetByID(id int64) (*DatasetConfig, error) {
query := `SELECT id, client_id, dataset_name, storage_type, enabled FROM datasets WHERE id = ?`
row := d.db.QueryRow(query, id)
dataset := &DatasetConfig{}
var enabled int
err := row.Scan(&dataset.ID, &dataset.ClientID, &dataset.DatasetName, &dataset.StorageType, &enabled)
if err != nil {
return nil, err
}
dataset.Enabled = enabled == 1
return dataset, nil
}
// GetSnapshotCountByDataset gets snapshot count for a specific dataset
func (d *Database) GetSnapshotCountByDataset(clientID, datasetName string) (int, error) {
var count int
err := d.db.QueryRow(`SELECT COUNT(*) FROM snapshots WHERE client_id = ? AND dataset_name = ?`, clientID, datasetName).Scan(&count)
return count, err
}
// GetAllDatasets gets all datasets
func (d *Database) GetAllDatasets() ([]*DatasetConfig, error) {
query := `SELECT id, client_id, dataset_name, storage_type, enabled FROM datasets`
rows, err := d.db.Query(query)
if err != nil {
return nil, err
}
defer rows.Close()
var datasets []*DatasetConfig
for rows.Next() {
dataset := &DatasetConfig{}
var enabled int
err := rows.Scan(&dataset.ID, &dataset.ClientID, &dataset.DatasetName, &dataset.StorageType, &enabled)
if err != nil {
return nil, err
}
dataset.Enabled = enabled == 1
datasets = append(datasets, dataset)
}
return datasets, nil
}
// GetSnapshotByID retrieves a specific snapshot
func (d *Database) GetSnapshotByID(clientID, snapshotID string) (*SnapshotMetadata, error) {
snap := &SnapshotMetadata{}

View File

@@ -126,21 +126,21 @@ func (s *Server) rotateSnapshots(clientID string) (int, int64) {
return 0, 0
}
// Select appropriate backend
var backend StorageBackend
if s.s3Backend != nil {
backend = s.s3Backend
} else if s.localBackend != nil {
backend = s.localBackend
} else {
log.Printf("No storage backend available for rotation")
return 0, 0
}
// Delete snapshots
// Delete snapshots - use correct backend based on each snapshot's storage type
ctx := context.Background()
for _, snap := range toDelete {
if err := backend.Delete(ctx, snap.StorageKey); err != nil {
// Determine which backend to use for this specific snapshot
var snapBackend StorageBackend
if snap.StorageType == "s3" && s.s3Backend != nil {
snapBackend = s.s3Backend
} else if snap.StorageType == "local" && s.localBackend != nil {
snapBackend = s.localBackend
} else {
log.Printf("No storage backend available for snapshot %s (type: %s)", snap.SnapshotID, snap.StorageType)
continue
}
if err := snapBackend.Delete(ctx, snap.StorageKey); err != nil {
log.Printf("Error deleting snapshot %s: %v", snap.StorageKey, err)
continue
}
@@ -238,6 +238,7 @@ func (s *Server) HandleUpload(w http.ResponseWriter, r *http.Request) {
Message: "Ready to receive snapshot",
UploadMethod: "zfs-receive",
StorageKey: snapshotName,
UploadURL: fmt.Sprintf("/upload-stream/%s", req.ClientID),
})
}
}
@@ -270,6 +271,36 @@ func (s *Server) HandleUploadStream(w http.ResponseWriter, r *http.Request) {
return
}
// Check if dataset is allowed for this client
dataset, err := s.db.GetDatasetByName(clientID, datasetName)
if err != nil || dataset == nil {
// Auto-create dataset if not exists
log.Printf("Dataset %s not found for client %s, creating...", datasetName, clientID)
newDataset := &DatasetConfig{
ClientID: clientID,
DatasetName: datasetName,
StorageType: "s3",
Enabled: true,
}
if err := s.db.SaveDataset(newDataset); err != nil {
log.Printf("Error creating dataset: %v", err)
respondJSON(w, http.StatusForbidden, UploadResponse{
Success: false,
Message: "Dataset not configured for this client",
})
return
}
dataset = newDataset
}
if !dataset.Enabled {
respondJSON(w, http.StatusForbidden, UploadResponse{
Success: false,
Message: "Dataset is disabled",
})
return
}
ctx := context.Background()
// Upload to S3
@@ -280,12 +311,24 @@ func (s *Server) HandleUploadStream(w http.ResponseWriter, r *http.Request) {
size = -1 // Use streaming upload for unknown size
}
if s.s3Backend == nil {
log.Printf("Error: S3 backend not initialized")
http.Error(w, "S3 backend not configured", http.StatusInternalServerError)
if s.s3Backend == nil && s.localBackend == nil {
log.Printf("Error: No storage backend configured")
http.Error(w, "No storage backend configured", http.StatusInternalServerError)
return
}
// Determine storage type based on client configuration
client, err := s.db.GetClient(clientID)
if err != nil || client == nil {
http.Error(w, "Client not found", http.StatusNotFound)
return
}
var actualSize int64
// Handle based on storage type
if client.StorageType == "s3" && s.s3Backend != nil {
// Upload to S3
if err := s.s3Backend.Upload(ctx, storageKey, r.Body, size); err != nil {
log.Printf("Error uploading to S3: %v", err)
http.Error(w, "Upload failed", http.StatusInternalServerError)
@@ -293,13 +336,27 @@ func (s *Server) HandleUploadStream(w http.ResponseWriter, r *http.Request) {
}
// Get actual size after upload
actualSize, err := s.s3Backend.GetSize(ctx, storageKey)
actualSize, err = s.s3Backend.GetSize(ctx, storageKey)
if err != nil {
log.Printf("Error getting object size: %v", err)
actualSize = size
}
} else if client.StorageType == "local" && s.localBackend != nil {
// Upload to local ZFS
if err := s.localBackend.Receive(storageKey, r.Body, compressedStr == "true"); err != nil {
log.Printf("Error uploading to local ZFS: %v", err)
http.Error(w, "Upload failed", http.StatusInternalServerError)
return
}
actualSize = size
} else {
log.Printf("Error: Storage type %s not configured", client.StorageType)
http.Error(w, "Storage type not configured", http.StatusInternalServerError)
return
}
// Save metadata to database
// Use actual storage type where snapshot was stored (not always s3)
metadata := &SnapshotMetadata{
ClientID: clientID,
SnapshotID: storageKey,
@@ -307,7 +364,7 @@ func (s *Server) HandleUploadStream(w http.ResponseWriter, r *http.Request) {
SizeBytes: actualSize,
DatasetName: datasetName,
StorageKey: storageKey,
StorageType: "s3",
StorageType: client.StorageType, // Use actual storage type from client config
Compressed: compressedStr == "true",
Incremental: incrementalStr == "true",
BaseSnapshot: baseSnapshot,
@@ -317,10 +374,19 @@ func (s *Server) HandleUploadStream(w http.ResponseWriter, r *http.Request) {
log.Printf("Error saving snapshot metadata: %v", err)
}
// Run rotation after successful upload
deletedCount, reclaimedBytes := s.rotateSnapshots(clientID)
if deletedCount > 0 {
log.Printf("Rotation: deleted %d snapshots, reclaimed %.2f MB for client %s",
deletedCount, float64(reclaimedBytes)/(1024*1024), clientID)
}
respondJSON(w, http.StatusOK, map[string]interface{}{
"success": true,
"message": "Snapshot uploaded successfully",
"size": actualSize,
"deleted_count": deletedCount,
"reclaimed_bytes": reclaimedBytes,
})
}
@@ -402,28 +468,23 @@ func (s *Server) HandleDownload(w http.ResponseWriter, r *http.Request) {
return
}
// Find snapshot metadata
client, err := s.db.GetClient(clientID)
if err != nil || client == nil {
http.Error(w, "Client not found", http.StatusNotFound)
return
}
targetSnapshot, err := s.db.GetSnapshotByID(clientID, snapshotID)
if err != nil || targetSnapshot == nil {
http.Error(w, "Snapshot not found", http.StatusNotFound)
return
}
// Use snapshot's own storage_type to determine which backend to use
// This enables mixed storage scenarios (e.g., full on local, incrementals on S3)
ctx := context.Background()
var backend StorageBackend
if client.StorageType == "s3" && s.s3Backend != nil {
if targetSnapshot.StorageType == "s3" && s.s3Backend != nil {
backend = s.s3Backend
} else if s.localBackend != nil {
} else if targetSnapshot.StorageType == "local" && s.localBackend != nil {
backend = s.localBackend
} else {
http.Error(w, "No storage backend available", http.StatusInternalServerError)
http.Error(w, "No storage backend available for this snapshot's storage type", http.StatusInternalServerError)
return
}
@@ -522,6 +583,8 @@ func (s *Server) RegisterRoutes(mux *http.ServeMux) {
mux.HandleFunc("/admin/snapshots", s.handleAdminGetSnapshots)
mux.HandleFunc("/admin/snapshot/delete", s.handleAdminDeleteSnapshot)
mux.HandleFunc("/admin/stats", s.handleAdminGetStats)
mux.HandleFunc("/admin/datasets", s.handleAdminGetDatasets)
mux.HandleFunc("/admin/datasets/{id}", s.handleAdminUpdateDeleteDataset)
mux.HandleFunc("/admin/admins", s.handleAdminGetAdmins)
mux.HandleFunc("/admin/admin/create", s.handleAdminCreateAdmin)
mux.HandleFunc("/admin/admin/delete", s.handleAdminDeleteAdmin)

View File

@@ -6,12 +6,15 @@ import (
"io"
"log"
"net/http"
"os"
"os/exec"
"strings"
"time"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/mistifyio/go-zfs"
"github.com/pierrec/lz4/v4"
)
// StorageBackend defines the interface for different storage types
@@ -23,25 +26,22 @@ type StorageBackend interface {
GetSize(ctx context.Context, key string) (int64, error)
}
// S3Backend implements StorageBackend for S3-compatible storage
// S3Backend implements StorageBackend for S3-compatible storage using minio-go
type S3Backend struct {
client *minio.Client
bucketName string
}
// NewS3Backend creates a new S3 storage backend
// NewS3Backend creates a new S3 storage backend using minio-go
func NewS3Backend(endpoint, accessKey, secretKey, bucketName string, useSSL bool) (*S3Backend, error) {
// Create custom HTTP transport with extended timeouts for large file uploads
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
// Extended timeouts for streaming large ZFS snapshots
ResponseHeaderTimeout: 5 * time.Minute,
ExpectContinueTimeout: 30 * time.Second,
TLSClientConfig: nil,
IdleConnTimeout: 90 * time.Second,
// Connection pooling
MaxIdleConns: 10,
MaxIdleConnsPerHost: 10,
DisableCompression: false,
}
client, err := minio.New(endpoint, &minio.Options{
@@ -74,7 +74,7 @@ func NewS3Backend(endpoint, accessKey, secretKey, bucketName string, useSSL bool
}, nil
}
// Upload uploads data to S3
// Upload uploads data to S3 using minio-go
func (s *S3Backend) Upload(ctx context.Context, key string, data io.Reader, size int64) error {
_, err := s.client.PutObject(ctx, s.bucketName, key, data, size,
minio.PutObjectOptions{
@@ -141,6 +141,41 @@ func (l *LocalBackend) Upload(ctx context.Context, key string, data io.Reader, s
return fmt.Errorf("local backend upload not supported via storage interface, use zfs receive endpoint")
}
// Receive receives a ZFS snapshot stream and restores it to the local dataset
func (l *LocalBackend) Receive(snapshotName string, data io.Reader, compressed bool) error {
// Extract the target dataset from the snapshot name
// snapshotName format: dataset@name -> we want just the dataset part
parts := strings.Split(snapshotName, "@")
if len(parts) != 2 {
return fmt.Errorf("invalid snapshot name format: %s", snapshotName)
}
targetDataset := parts[0]
log.Printf("Receiving ZFS snapshot to %s (compressed: %v)", targetDataset, compressed)
// If compressed, decompress with LZ4 first
var reader io.Reader = data
if compressed {
lz4Reader := lz4.NewReader(data)
reader = lz4Reader
}
// Use go-zfs library to receive the snapshot (with -F force flag)
// Note: The library's ReceiveSnapshot doesn't support -F, so we use exec.Command
cmd := exec.Command("zfs", "receive", "-F", snapshotName)
cmd.Stdin = reader
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("zfs receive failed: %v", err)
}
log.Printf("Successfully received snapshot: %s", snapshotName)
return nil
}
// Download creates a zfs send stream
func (l *LocalBackend) Download(ctx context.Context, key string) (io.ReadCloser, error) {
cmd := exec.CommandContext(ctx, "zfs", "send", key)

View File

@@ -260,4 +260,5 @@ templ ClientPasswordModal() {
// AdminScripts renders the JavaScript for the admin panel
templ AdminScripts() {
<script src="/admin/static/admin.js"></script>
<script>initTheme();</script>
}

View File

@@ -19,6 +19,30 @@ async function logout() {
location.reload();
}
// Toggle dark/light theme
function toggleTheme() {
const html = document.documentElement;
const isDark = html.classList.contains('dark');
if (isDark) {
html.classList.remove('dark');
localStorage.setItem('theme', 'light');
} else {
html.classList.add('dark');
localStorage.setItem('theme', 'dark');
}
}
// Initialize theme on load
function initTheme() {
const savedTheme = localStorage.getItem('theme');
const prefersDark = window.matchMedia('(prefers-color-scheme: dark)').matches;
if (savedTheme === 'dark' || (!savedTheme && prefersDark)) {
document.documentElement.classList.add('dark');
}
}
// Load stats
async function loadStats() {
try {

View File

@@ -4,13 +4,14 @@ A distributed ZFS snapshot management system with S3-compatible storage support.
## Features
- **S3 Storage Support**: Store snapshots in any S3-compatible storage (AWS S3, MinIO, Backblaze B2, Wasabi, DigitalOcean Spaces)
- **S3 Storage Support**: Store snapshots in any S3-compatible storage using AWS SDK v2 (AWS S3, MinIO, Backblaze B2, Wasabi, DigitalOcean Spaces)
- **Local ZFS Storage**: Option to use local ZFS datasets for maximum performance
- **Multi-client Architecture**: Support for multiple clients with isolated storage and per-client quotas
- **Automatic Compression**: Gzip compression for reduced storage costs
- **Automatic Compression**: LZ4 compression for reduced storage costs and faster transfers
- **Snapshot Rotation**: Automatic cleanup of old snapshots based on quota
- **Server-Managed Rotation Policies**: Centralized control of client rotation policies - clients must use server-configured retention settings
- **API Key Authentication**: Secure client-server communication
- **Simple CLI**: Just use `zfs-client snap` to backup - automatically handles full/incremental
## Project Structure
@@ -93,11 +94,19 @@ API_KEY=secret123
SERVER_URL=http://backup-server:8080
LOCAL_DATASET=tank/data
COMPRESS=true
# Optional: Direct S3 upload (bypasses server storage)
S3_ENDPOINT=https://s3.amazonaws.com
S3_REGION=us-east-1
S3_BUCKET=zfs-backups
S3_ACCESS_KEY=your_access_key
S3_SECRET_KEY=your_secret_key
```
> **Important**:
> - The `API_KEY` in the client `.env` file must be the **raw (unhashed)** key. The server stores the SHA-256 hash in the database.
> - **Storage type is determined by the server**, not the client. The server decides whether to use S3 or local ZFS storage based on its configuration.
> - The client automatically handles full vs incremental backups based on whether a bookmark exists.
### Restore Tool Configuration
@@ -128,52 +137,18 @@ zfs-server
### Client Commands
The `zfs-client` tool provides the following commands for managing ZFS snapshots:
The `zfs-client` tool provides simple commands for creating and sending ZFS snapshots:
#### `backup`
Creates a snapshot and sends it to the server. Automatically uses incremental backup if a bookmark exists.
#### `snap`
Creates a snapshot and sends it to the server. Automatically detects if this is the first backup (full) or subsequent backup (incremental).
```bash
zfs-client backup
zfs-client snap
```
#### `backup-full`
Forces a full backup (no incremental). Use for the initial backup or when you want to resend the complete dataset.
On first run, it will print: `→ No previous backup found, doing FULL backup...`
```bash
zfs-client backup-full
```
#### `backup-incremental`
Creates an incremental backup from the last bookmark. Requires an existing bookmark from a previous full backup.
```bash
zfs-client backup-incremental
```
#### `snapshot <type>`
Creates a typed snapshot (hourly, daily, weekly, monthly) with automatic rotation. The rotation policy is fetched from the server if configured.
```bash
zfs-client snapshot hourly
zfs-client snapshot daily
zfs-client snapshot weekly
zfs-client snapshot monthly
```
#### `rotate`
Rotates local snapshots based on the retention policy. If the server has a rotation policy configured, it will be used; otherwise, default values apply.
```bash
zfs-client rotate
```
#### `rotate-remote`
Requests the server to rotate (delete old) remote snapshots to free up storage quota.
```bash
zfs-client rotate-remote
```
On subsequent runs, it automatically does incremental backups from the last bookmark.
#### `status`
Displays the current backup status including storage usage, quota, and snapshot count from the server.
@@ -182,13 +157,6 @@ Displays the current backup status including storage usage, quota, and snapshot
zfs-client status
```
#### `bookmarks`
Lists ZFS bookmarks on the local system. Bookmarks are used as reference points for incremental backups.
```bash
zfs-client bookmarks
```
#### `help`
Shows the help message with all available commands and options.
@@ -196,6 +164,23 @@ Shows the help message with all available commands and options.
zfs-client help
```
### Client Configuration
```env
CLIENT_ID=client1
API_KEY=secret123
SERVER_URL=http://backup-server:8080
LOCAL_DATASET=tank/data
COMPRESS=true
# Optional: S3 direct upload (bypasses server)
S3_ENDPOINT=https://s3.amazonaws.com
S3_REGION=us-east-1
S3_BUCKET=zfs-backups
S3_ACCESS_KEY=your_access_key
S3_SECRET_KEY=your_secret_key
```
### Restore Tool Commands
The `zfs-restore` tool provides commands for listing and restoring snapshots from the backup server:

BIN
zfs-client Executable file

Binary file not shown.

BIN
zfs-server Executable file

Binary file not shown.