remove s3 from client
This commit is contained in:
@@ -58,7 +58,7 @@ func printUsage() {
|
||||
fmt.Println("ZFS Snapshot Backup Client - Simple Version")
|
||||
fmt.Println("\nUsage: zfs-client [command]")
|
||||
fmt.Println("\nCommands:")
|
||||
fmt.Println(" snap - Create snapshot and send to server (auto full/incremental)")
|
||||
fmt.Println(" snap - Create snapshot and send to server")
|
||||
fmt.Println(" status - Check server connection and quota")
|
||||
fmt.Println(" help - Show this help message")
|
||||
fmt.Println("\nEnvironment Variables (can be set in .env file):")
|
||||
@@ -67,12 +67,6 @@ func printUsage() {
|
||||
fmt.Println(" SERVER_URL - Backup server URL (default: http://localhost:8080)")
|
||||
fmt.Println(" LOCAL_DATASET - ZFS dataset to backup (default: tank/data)")
|
||||
fmt.Println(" COMPRESS - Enable LZ4 compression (default: true)")
|
||||
fmt.Println("\nS3 Configuration (for direct S3 uploads):")
|
||||
fmt.Println(" S3_ENDPOINT - S3 endpoint URL (e.g., https://s3.amazonaws.com)")
|
||||
fmt.Println(" S3_REGION - AWS region (default: us-east-1)")
|
||||
fmt.Println(" S3_BUCKET - S3 bucket name (default: zfs-backups)")
|
||||
fmt.Println(" S3_ACCESS_KEY - AWS access key")
|
||||
fmt.Println(" S3_SECRET_KEY - AWS secret key")
|
||||
fmt.Println("\nExamples:")
|
||||
fmt.Println(" zfs-client snap")
|
||||
fmt.Println(" zfs-client status")
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
// Command zfs-restore is a CLI tool for restoring ZFS snapshots from a backup server.
|
||||
// It provides commands for listing, restoring, and mounting snapshots.
|
||||
// Command zfs-restore is a simple CLI tool for restoring ZFS snapshots from a backup server.
|
||||
package main
|
||||
|
||||
import (
|
||||
@@ -23,7 +22,8 @@ func main() {
|
||||
command := os.Args[1]
|
||||
|
||||
switch command {
|
||||
case "list":
|
||||
case "list", "ls":
|
||||
// List available snapshots
|
||||
snapshots, err := client.ListSnapshots()
|
||||
if err != nil {
|
||||
fmt.Printf("Error: %v\n", err)
|
||||
@@ -32,8 +32,13 @@ func main() {
|
||||
client.DisplaySnapshots(snapshots)
|
||||
|
||||
case "restore":
|
||||
if len(os.Args) < 4 {
|
||||
fmt.Println("Usage: zfs-restore restore <snapshot-number> <target-dataset> [--force]")
|
||||
// Restore snapshot - can use number or "latest" keyword
|
||||
if len(os.Args) < 3 {
|
||||
fmt.Println("Usage: zfs-restore restore <snapshot-number-or-latest> <target-dataset> [--force]")
|
||||
fmt.Println("\nExamples:")
|
||||
fmt.Println(" zfs-restore restore 1 tank/restored")
|
||||
fmt.Println(" zfs-restore restore latest tank/restored")
|
||||
fmt.Println(" zfs-restore restore latest tank/restored --force")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@@ -48,103 +53,56 @@ func main() {
|
||||
return snapshots[i].Timestamp.After(snapshots[j].Timestamp)
|
||||
})
|
||||
|
||||
// Parse snapshot number
|
||||
var snapNum int
|
||||
fmt.Sscanf(os.Args[2], "%d", &snapNum)
|
||||
|
||||
if snapNum < 1 || snapNum > len(snapshots) {
|
||||
fmt.Printf("Invalid snapshot number. Use 'list' to see available snapshots.\n")
|
||||
if len(snapshots) == 0 {
|
||||
fmt.Println("No snapshots available. Run 'zfs-restore list' first.")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
snapshot := snapshots[snapNum-1]
|
||||
targetDataset := os.Args[3]
|
||||
force := len(os.Args) > 4 && os.Args[4] == "--force"
|
||||
snapshotArg := os.Args[2]
|
||||
var snapshot *restore.SnapshotMetadata
|
||||
|
||||
if snapshotArg == "latest" {
|
||||
snapshot = snapshots[0]
|
||||
fmt.Printf("→ Restoring latest snapshot from %s\n", snapshot.Timestamp.Format("2006-01-02 15:04:05"))
|
||||
} else {
|
||||
var snapNum int
|
||||
fmt.Sscanf(snapshotArg, "%d", &snapNum)
|
||||
|
||||
if snapNum < 1 || snapNum > len(snapshots) {
|
||||
fmt.Printf("Invalid snapshot number. Use 'zfs-restore list' to see available snapshots.\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
snapshot = snapshots[snapNum-1]
|
||||
}
|
||||
|
||||
// Get target dataset (either from args or prompt)
|
||||
targetDataset := ""
|
||||
force := false
|
||||
|
||||
for i, arg := range os.Args {
|
||||
if arg == "--force" {
|
||||
force = true
|
||||
}
|
||||
if arg != "restore" && arg != snapshotArg && arg != "--force" && targetDataset == "" && i > 2 && arg != os.Args[0] {
|
||||
targetDataset = arg
|
||||
}
|
||||
}
|
||||
|
||||
if targetDataset == "" {
|
||||
fmt.Printf("Target dataset: ")
|
||||
fmt.Scanln(&targetDataset)
|
||||
}
|
||||
|
||||
if targetDataset == "" {
|
||||
fmt.Println("Error: target dataset is required")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err := client.RestoreSnapshot(snapshot, targetDataset, force); err != nil {
|
||||
fmt.Printf("Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
case "save":
|
||||
if len(os.Args) < 4 {
|
||||
fmt.Println("Usage: zfs-restore save <snapshot-number> <output-file>")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
snapshots, err := client.ListSnapshots()
|
||||
if err != nil {
|
||||
fmt.Printf("Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
sort.Slice(snapshots, func(i, j int) bool {
|
||||
return snapshots[i].Timestamp.After(snapshots[j].Timestamp)
|
||||
})
|
||||
|
||||
var snapNum int
|
||||
fmt.Sscanf(os.Args[2], "%d", &snapNum)
|
||||
|
||||
if snapNum < 1 || snapNum > len(snapshots) {
|
||||
fmt.Printf("Invalid snapshot number.\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
snapshot := snapshots[snapNum-1]
|
||||
outputFile := os.Args[3]
|
||||
|
||||
if err := client.RestoreToFile(snapshot, outputFile); err != nil {
|
||||
fmt.Printf("Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
case "mount":
|
||||
if len(os.Args) < 4 {
|
||||
fmt.Println("Usage: zfs-restore mount <dataset> <mountpoint>")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
dataset := os.Args[2]
|
||||
mountpoint := os.Args[3]
|
||||
|
||||
if err := client.MountSnapshot(dataset, mountpoint); err != nil {
|
||||
fmt.Printf("Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
case "latest":
|
||||
if len(os.Args) < 3 {
|
||||
fmt.Println("Usage: zfs-restore latest <target-dataset> [--force]")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
snapshots, err := client.ListSnapshots()
|
||||
if err != nil {
|
||||
fmt.Printf("Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if len(snapshots) == 0 {
|
||||
fmt.Println("No snapshots available")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Sort and get latest
|
||||
sort.Slice(snapshots, func(i, j int) bool {
|
||||
return snapshots[i].Timestamp.After(snapshots[j].Timestamp)
|
||||
})
|
||||
|
||||
latest := snapshots[0]
|
||||
targetDataset := os.Args[2]
|
||||
force := len(os.Args) > 3 && os.Args[3] == "--force"
|
||||
|
||||
fmt.Printf("Restoring latest snapshot from %s\n", latest.Timestamp.Format("2006-01-02 15:04:05"))
|
||||
|
||||
if err := client.RestoreSnapshot(latest, targetDataset, force); err != nil {
|
||||
fmt.Printf("Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
case "help", "-h", "--help":
|
||||
printUsage()
|
||||
|
||||
@@ -156,21 +114,16 @@ func main() {
|
||||
}
|
||||
|
||||
func printUsage() {
|
||||
fmt.Println("ZFS Snapshot Restore Tool")
|
||||
fmt.Println("\nUsage: zfs-restore [command] [options]")
|
||||
fmt.Println("ZFS Snapshot Restore Tool - Simple Version")
|
||||
fmt.Println("\nUsage: zfs-restore [command]")
|
||||
fmt.Println("\nCommands:")
|
||||
fmt.Println(" list - List available snapshots")
|
||||
fmt.Println(" restore <#> <dataset> [--force] - Restore snapshot to ZFS dataset")
|
||||
fmt.Println(" latest <dataset> [--force] - Restore most recent snapshot")
|
||||
fmt.Println(" save <#> <file> - Save snapshot to file")
|
||||
fmt.Println(" mount <dataset> <mountpoint> - Mount restored dataset")
|
||||
fmt.Println(" help - Show this help message")
|
||||
fmt.Println("\nExamples:")
|
||||
fmt.Println(" zfs-restore list")
|
||||
fmt.Println(" zfs-restore restore 1 tank/restored")
|
||||
fmt.Println(" zfs-restore latest tank/restored --force")
|
||||
fmt.Println(" zfs-restore save 2 backup.zfs.lz4")
|
||||
fmt.Println(" zfs-restore mount tank/restored /mnt/restore")
|
||||
fmt.Println(" list - List available snapshots")
|
||||
fmt.Println(" restore <#|latest> <dataset> [--force] - Restore snapshot to ZFS")
|
||||
fmt.Println(" help - Show this help message")
|
||||
fmt.Println("\nQuick Examples:")
|
||||
fmt.Println(" zfs-restore list - See available backups")
|
||||
fmt.Println(" zfs-restore restore latest tank/data - Restore most recent backup")
|
||||
fmt.Println(" zfs-restore restore 1 tank/restored - Restore snapshot #1")
|
||||
fmt.Println("\nEnvironment Variables (can be set in .env file):")
|
||||
fmt.Println(" CLIENT_ID - Client identifier (default: client1)")
|
||||
fmt.Println(" API_KEY - API key for authentication (default: secret123)")
|
||||
|
||||
@@ -19,7 +19,7 @@ func main() {
|
||||
var err error
|
||||
|
||||
if cfg.S3Enabled {
|
||||
s3Backend, err = server.NewS3Backend(cfg.S3Endpoint, cfg.S3AccessKey, cfg.S3SecretKey, cfg.S3BucketName, cfg.S3UseSSL, cfg.S3Region)
|
||||
s3Backend, err = server.NewS3Backend(cfg.S3Endpoint, cfg.S3AccessKey, cfg.S3SecretKey, cfg.S3BucketName, cfg.S3UseSSL)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to initialize S3 backend: %v", err)
|
||||
}
|
||||
|
||||
14
go.mod
14
go.mod
@@ -30,13 +30,27 @@ require (
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 // indirect
|
||||
github.com/aws/smithy-go v1.24.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/go-ini/ini v1.67.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/klauspost/compress v1.18.2 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.11 // indirect
|
||||
github.com/klauspost/crc32 v1.3.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/minio/crc64nvme v1.1.1 // indirect
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/minio/minio-go/v7 v7.0.98 // indirect
|
||||
github.com/ncruces/go-strftime v1.0.0 // indirect
|
||||
github.com/philhofer/fwd v1.2.0 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
github.com/rs/xid v1.6.0 // indirect
|
||||
github.com/tinylib/msgp v1.6.1 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/crypto v0.46.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect
|
||||
golang.org/x/net v0.48.0 // indirect
|
||||
golang.org/x/sync v0.19.0 // indirect
|
||||
golang.org/x/sys v0.39.0 // indirect
|
||||
golang.org/x/text v0.32.0 // indirect
|
||||
golang.org/x/tools v0.39.0 // indirect
|
||||
modernc.org/libc v1.67.6 // indirect
|
||||
modernc.org/mathutil v1.7.1 // indirect
|
||||
|
||||
30
go.sum
30
go.sum
@@ -40,6 +40,8 @@ github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
|
||||
github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
|
||||
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs=
|
||||
@@ -48,27 +50,55 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
|
||||
github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
|
||||
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.11 h1:0OwqZRYI2rFrjS4kvkDnqJkKHdHaRnCm68/DY4OxRzU=
|
||||
github.com/klauspost/cpuid/v2 v2.2.11/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/klauspost/crc32 v1.3.0 h1:sSmTt3gUt81RP655XGZPElI0PelVTZ6YwCRnPSupoFM=
|
||||
github.com/klauspost/crc32 v1.3.0/go.mod h1:D7kQaZhnkX/Y0tstFGf8VUzv2UofNGqCjnC3zdHB0Hw=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/minio/crc64nvme v1.1.1 h1:8dwx/Pz49suywbO+auHCBpCtlW1OfpcLN7wYgVR6wAI=
|
||||
github.com/minio/crc64nvme v1.1.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
|
||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||
github.com/minio/minio-go/v7 v7.0.98 h1:MeAVKjLVz+XJ28zFcuYyImNSAh8Mq725uNW4beRisi0=
|
||||
github.com/minio/minio-go/v7 v7.0.98/go.mod h1:cY0Y+W7yozf0mdIclrttzo1Iiu7mEf9y7nk2uXqMOvM=
|
||||
github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8=
|
||||
github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
|
||||
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
||||
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||
github.com/philhofer/fwd v1.2.0 h1:e6DnBTl7vGY+Gz322/ASL4Gyp1FspeMvx1RNDoToZuM=
|
||||
github.com/philhofer/fwd v1.2.0/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM=
|
||||
github.com/pierrec/lz4/v4 v4.1.25 h1:kocOqRffaIbU5djlIBr7Wh+cx82C0vtFb0fOurZHqD0=
|
||||
github.com/pierrec/lz4/v4 v4.1.25/go.mod h1:EoQMVJgeeEOMsCqCzqFm2O0cJvljX2nGZjcRIPL34O4=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU=
|
||||
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
|
||||
github.com/tinylib/msgp v1.6.1 h1:ESRv8eL3u+DNHUoSAAQRE50Hm162zqAnBoGv9PzScPY=
|
||||
github.com/tinylib/msgp v1.6.1/go.mod h1:RSp0LW9oSxFut3KzESt5Voq4GVWyS+PSulT77roAqEA=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
|
||||
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
|
||||
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY=
|
||||
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70=
|
||||
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
|
||||
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
|
||||
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
|
||||
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
|
||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
|
||||
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
|
||||
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
|
||||
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
|
||||
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis=
|
||||
modernc.org/cc/v4 v4.27.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
||||
modernc.org/ccgo/v4 v4.30.1 h1:4r4U1J6Fhj98NKfSjnPUN7Ze2c6MnAdL0hWw6+LrJpc=
|
||||
|
||||
@@ -4,27 +4,24 @@ package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/mistifyio/go-zfs"
|
||||
"github.com/pierrec/lz4/v4"
|
||||
)
|
||||
|
||||
// SnapshotResult contains the result of a snapshot creation and send operation.
|
||||
type SnapshotResult struct {
|
||||
FullBackup bool
|
||||
Snapshot *zfs.Dataset
|
||||
}
|
||||
|
||||
// Client handles snapshot backup operations to a remote server.
|
||||
// It manages creating local ZFS snapshots and transmitting them
|
||||
// to the backup server via HTTP or SSH.
|
||||
type Client struct {
|
||||
config *Config
|
||||
}
|
||||
@@ -34,11 +31,51 @@ func New(config *Config) *Client {
|
||||
return &Client{config: config}
|
||||
}
|
||||
|
||||
// CreateAndSend creates a snapshot and sends it to the backup server via HTTP.
|
||||
// It automatically detects if this is a full or incremental backup:
|
||||
// - If no bookmark exists, does a full backup
|
||||
// - If bookmark exists, does an incremental backup from the bookmark
|
||||
func (c *Client) CreateAndSend() (*SnapshotResult, error) {
|
||||
// Check for existing bookmark to determine backup type
|
||||
lastBookmark, err := c.GetLastBookmark()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check bookmarks: %v", err)
|
||||
}
|
||||
|
||||
// Create new snapshot
|
||||
snapshot, err := c.CreateSnapshot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create snapshot: %v", err)
|
||||
}
|
||||
|
||||
isFullBackup := lastBookmark == ""
|
||||
if isFullBackup {
|
||||
fmt.Println("→ No previous backup found, doing FULL backup...")
|
||||
// Send as full (no base)
|
||||
if err := c.SendIncrementalHTTP(snapshot, ""); err != nil {
|
||||
return nil, fmt.Errorf("failed to send snapshot: %v", err)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("→ Found previous backup, doing INCREMENTAL from %s...\n", lastBookmark)
|
||||
// Send as incremental from bookmark
|
||||
if err := c.SendIncrementalHTTP(snapshot, lastBookmark); err != nil {
|
||||
return nil, fmt.Errorf("failed to send incremental: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create bookmark for future incremental backups
|
||||
if err := c.CreateBookmark(snapshot); err != nil {
|
||||
fmt.Printf("Warning: failed to create bookmark: %v\n", err)
|
||||
}
|
||||
|
||||
return &SnapshotResult{
|
||||
FullBackup: isFullBackup,
|
||||
Snapshot: snapshot,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CreateSnapshot creates a local ZFS snapshot of the configured dataset.
|
||||
// The snapshot is named with a timestamp for easy identification.
|
||||
// Returns the created snapshot dataset or an error.
|
||||
func (c *Client) CreateSnapshot() (*zfs.Dataset, error) {
|
||||
// Get the local dataset
|
||||
ds, err := zfs.GetDataset(c.config.LocalDataset)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get dataset: %v", err)
|
||||
@@ -63,12 +100,14 @@ func (c *Client) GetSnapshotSize(snapshot *zfs.Dataset) int64 {
|
||||
return int64(snapshot.Used)
|
||||
}
|
||||
|
||||
// SendSnapshot sends a snapshot to the backup server.
|
||||
// It first requests upload authorization, then streams the snapshot
|
||||
// using the appropriate method (S3 or ZFS receive).
|
||||
func (c *Client) SendSnapshot(snapshot *zfs.Dataset) error {
|
||||
// SendIncrementalHTTP sends a snapshot to the server via HTTP.
|
||||
// The server then handles storage (S3 or local ZFS).
|
||||
func (c *Client) SendIncrementalHTTP(snapshot *zfs.Dataset, base string) error {
|
||||
estimatedSize := c.GetSnapshotSize(snapshot)
|
||||
|
||||
// Determine if this is incremental or full
|
||||
isIncremental := base != ""
|
||||
|
||||
// Request upload authorization from server
|
||||
uploadReq := map[string]interface{}{
|
||||
"client_id": c.config.ClientID,
|
||||
@@ -77,6 +116,8 @@ func (c *Client) SendSnapshot(snapshot *zfs.Dataset) error {
|
||||
"timestamp": time.Now().Format(time.RFC3339),
|
||||
"compressed": c.config.Compress,
|
||||
"estimated_size": estimatedSize,
|
||||
"incremental": isIncremental,
|
||||
"base_snapshot": base,
|
||||
}
|
||||
|
||||
reqBody, _ := json.Marshal(uploadReq)
|
||||
@@ -86,7 +127,6 @@ func (c *Client) SendSnapshot(snapshot *zfs.Dataset) error {
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Parse server response
|
||||
var uploadResp struct {
|
||||
Success bool `json:"success"`
|
||||
Message string `json:"message"`
|
||||
@@ -107,50 +147,24 @@ func (c *Client) SendSnapshot(snapshot *zfs.Dataset) error {
|
||||
fmt.Printf(" Method: %s\n", uploadResp.UploadMethod)
|
||||
fmt.Printf(" Storage key: %s\n", uploadResp.StorageKey)
|
||||
|
||||
// Choose upload method based on server response
|
||||
if uploadResp.UploadMethod == "s3" {
|
||||
return c.streamToS3(snapshot, uploadResp.UploadURL, uploadResp.StorageKey)
|
||||
}
|
||||
return c.sendViaZFS(snapshot, uploadResp.StorageKey)
|
||||
// Stream to server via HTTP
|
||||
return c.streamToServer(snapshot, base, uploadResp.UploadURL, uploadResp.StorageKey)
|
||||
}
|
||||
|
||||
// streamToS3 streams a ZFS snapshot to S3 storage using AWS SDK.
|
||||
// The snapshot is optionally compressed with LZ4 before transmission.
|
||||
func (c *Client) streamToS3(snapshot *zfs.Dataset, uploadURL, storageKey string) error {
|
||||
fmt.Printf("→ Uploading snapshot to S3...\n")
|
||||
|
||||
// Ensure endpoint has valid URI scheme
|
||||
endpoint := c.config.S3Endpoint
|
||||
if endpoint != "" && !strings.HasPrefix(endpoint, "http://") && !strings.HasPrefix(endpoint, "https://") {
|
||||
endpoint = "http://" + endpoint
|
||||
}
|
||||
|
||||
// Create AWS config
|
||||
awsCfg, err := config.LoadDefaultConfig(context.TODO(),
|
||||
config.WithRegion(c.config.S3Region),
|
||||
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(
|
||||
c.config.S3AccessKey,
|
||||
c.config.S3SecretKey,
|
||||
"",
|
||||
)),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load AWS config: %v", err)
|
||||
}
|
||||
|
||||
// Determine if using custom endpoint (non-AWS)
|
||||
customEndpoint := endpoint != "" && endpoint != "http://s3.amazonaws.com" && endpoint != "https://s3.amazonaws.com"
|
||||
|
||||
// Create S3 client
|
||||
s3Client := s3.NewFromConfig(awsCfg, func(o *s3.Options) {
|
||||
if customEndpoint {
|
||||
o.BaseEndpoint = aws.String(endpoint)
|
||||
o.UsePathStyle = true // Required for MinIO compatible storage
|
||||
}
|
||||
})
|
||||
// streamToServer streams a ZFS snapshot to the backup server via HTTP.
|
||||
func (c *Client) streamToServer(snapshot *zfs.Dataset, base, uploadURL, storageKey string) error {
|
||||
fmt.Printf("→ Streaming snapshot to server...\n")
|
||||
|
||||
// Create ZFS send command
|
||||
cmd := exec.Command("zfs", "send", snapshot.Name)
|
||||
var cmd *exec.Cmd
|
||||
if base != "" {
|
||||
// Incremental send from bookmark or snapshot
|
||||
cmd = exec.Command("zfs", "send", "-i", base, snapshot.Name)
|
||||
} else {
|
||||
// Full send
|
||||
cmd = exec.Command("zfs", "send", snapshot.Name)
|
||||
}
|
||||
|
||||
zfsOut, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create pipe: %v", err)
|
||||
@@ -167,12 +181,10 @@ func (c *Client) streamToS3(snapshot *zfs.Dataset, uploadURL, storageKey string)
|
||||
fmt.Printf(" Compressing with LZ4...\n")
|
||||
pr, pw := io.Pipe()
|
||||
lz4Writer := lz4.NewWriter(pw)
|
||||
lz4Writer.Apply(lz4.BlockSizeOption(lz4.BlockSize(4 * 1024 * 1024))) // 4MB blocks for better performance
|
||||
lz4Writer.Apply(lz4.BlockSizeOption(lz4.BlockSize(4 * 1024 * 1024))) // 4MB blocks
|
||||
|
||||
go func() {
|
||||
// Copy zfs output to LZ4 writer
|
||||
io.Copy(lz4Writer, zfsOut)
|
||||
// Close LZ4 writer first to flush, then close pipe
|
||||
lz4Writer.Close()
|
||||
pw.Close()
|
||||
}()
|
||||
@@ -180,113 +192,66 @@ func (c *Client) streamToS3(snapshot *zfs.Dataset, uploadURL, storageKey string)
|
||||
reader = pr
|
||||
}
|
||||
|
||||
// Upload to S3 using PutObject
|
||||
_, err = s3Client.PutObject(context.TODO(), &s3.PutObjectInput{
|
||||
Bucket: aws.String(c.config.S3Bucket),
|
||||
Key: aws.String(storageKey),
|
||||
Body: reader,
|
||||
ContentType: aws.String("application/octet-stream"),
|
||||
})
|
||||
// Create HTTP request to server
|
||||
req, err := http.NewRequest("POST", c.config.ServerURL+uploadURL, reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
// Set headers
|
||||
req.Header.Set("X-API-Key", c.config.APIKey)
|
||||
req.Header.Set("X-Storage-Key", storageKey)
|
||||
req.Header.Set("X-Dataset-Name", c.config.LocalDataset)
|
||||
req.Header.Set("X-Compressed", fmt.Sprintf("%v", c.config.Compress))
|
||||
req.Header.Set("X-Incremental", fmt.Sprintf("%v", base != ""))
|
||||
if base != "" {
|
||||
req.Header.Set("X-Base-Snapshot", base)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
|
||||
// Send request with no timeout for large uploads
|
||||
httpClient := &http.Client{
|
||||
Timeout: 0,
|
||||
}
|
||||
|
||||
httpResp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
cmd.Process.Kill()
|
||||
return fmt.Errorf("failed to upload: %v", err)
|
||||
}
|
||||
defer httpResp.Body.Close()
|
||||
|
||||
if httpResp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(httpResp.Body)
|
||||
return fmt.Errorf("upload failed with status %d: %s", httpResp.StatusCode, body)
|
||||
}
|
||||
|
||||
// Wait for zfs send to complete
|
||||
if err := cmd.Wait(); err != nil {
|
||||
return fmt.Errorf("zfs send failed: %v", err)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to upload to S3: %v", err)
|
||||
// Parse response
|
||||
var result struct {
|
||||
Success bool `json:"success"`
|
||||
Message string `json:"message"`
|
||||
Size int64 `json:"size"`
|
||||
}
|
||||
|
||||
fmt.Printf("✓ Snapshot uploaded to S3 successfully!\n")
|
||||
if err := json.NewDecoder(httpResp.Body).Decode(&result); err != nil {
|
||||
return fmt.Errorf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if !result.Success {
|
||||
return fmt.Errorf("upload failed: %s", result.Message)
|
||||
}
|
||||
|
||||
fmt.Printf("✓ Snapshot uploaded successfully!\n")
|
||||
fmt.Printf(" Size: %.2f MB\n", float64(result.Size)/(1024*1024))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendViaZFS sends a snapshot via traditional ZFS send/receive over SSH.
|
||||
// This method is used when the server uses local ZFS storage.
|
||||
func (c *Client) sendViaZFS(snapshot *zfs.Dataset, receivePath string) error {
|
||||
fmt.Printf("→ Sending via ZFS send/receive...\n")
|
||||
|
||||
// Extract server host from URL
|
||||
serverHost := c.config.ServerURL
|
||||
if len(serverHost) > 7 && strings.HasPrefix(serverHost, "http://") {
|
||||
serverHost = serverHost[7:]
|
||||
} else if len(serverHost) > 8 && strings.HasPrefix(serverHost, "https://") {
|
||||
serverHost = serverHost[8:]
|
||||
}
|
||||
|
||||
// Remove port if present
|
||||
if idx := strings.LastIndex(serverHost, ":"); idx > 0 {
|
||||
serverHost = serverHost[:idx]
|
||||
}
|
||||
|
||||
// Execute ZFS send over SSH
|
||||
cmd := exec.Command("sh", "-c",
|
||||
fmt.Sprintf("zfs send %s | ssh %s 'zfs recv -F %s'",
|
||||
snapshot.Name, serverHost, receivePath))
|
||||
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to send snapshot: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf("✓ Snapshot sent successfully!\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// SnapshotResult contains the result of a snapshot creation and send operation.
|
||||
type SnapshotResult struct {
|
||||
FullBackup bool
|
||||
Snapshot *zfs.Dataset
|
||||
}
|
||||
|
||||
// CreateAndSend creates a snapshot and sends it to the backup server.
|
||||
// It automatically detects if this is a full or incremental backup:
|
||||
// - If no bookmark exists, does a full backup
|
||||
// - If bookmark exists, does an incremental backup from the bookmark
|
||||
func (c *Client) CreateAndSend() (*SnapshotResult, error) {
|
||||
// Check for existing bookmark to determine backup type
|
||||
lastBookmark, err := c.GetLastBookmark()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check bookmarks: %v", err)
|
||||
}
|
||||
|
||||
// Create new snapshot
|
||||
snapshot, err := c.CreateSnapshot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create snapshot: %v", err)
|
||||
}
|
||||
|
||||
isFullBackup := lastBookmark == ""
|
||||
if isFullBackup {
|
||||
fmt.Println("→ No previous backup found, doing FULL backup...")
|
||||
// Send as full (no base)
|
||||
if err := c.SendIncremental(snapshot, ""); err != nil {
|
||||
return nil, fmt.Errorf("failed to send snapshot: %v", err)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("→ Found previous backup, doing INCREMENTAL from %s...", lastBookmark)
|
||||
// Send as incremental from bookmark
|
||||
if err := c.SendIncremental(snapshot, lastBookmark); err != nil {
|
||||
return nil, fmt.Errorf("failed to send incremental: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create bookmark for future incremental backups
|
||||
if err := c.CreateBookmark(snapshot); err != nil {
|
||||
fmt.Printf("Warning: failed to create bookmark: %v\n", err)
|
||||
}
|
||||
|
||||
return &SnapshotResult{
|
||||
FullBackup: isFullBackup,
|
||||
Snapshot: snapshot,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetStatus retrieves and displays the client's backup status from the server.
|
||||
// Shows storage usage, quota, and snapshot count.
|
||||
func (c *Client) GetStatus() error {
|
||||
url := fmt.Sprintf("%s/status?client_id=%s&api_key=%s",
|
||||
c.config.ServerURL, c.config.ClientID, c.config.APIKey)
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
)
|
||||
|
||||
// Config holds client-side configuration for connecting to the backup server.
|
||||
// Note: Storage type is determined by the server, not the client.
|
||||
type Config struct {
|
||||
// ClientID is the unique identifier for this client
|
||||
ClientID string `json:"client_id"`
|
||||
@@ -21,16 +20,6 @@ type Config struct {
|
||||
LocalDataset string `json:"local_dataset"`
|
||||
// Compress enables LZ4 compression for transfers
|
||||
Compress bool `json:"compress"`
|
||||
// S3Endpoint is the S3 endpoint URL (optional, for direct S3 uploads)
|
||||
S3Endpoint string `json:"s3_endpoint"`
|
||||
// S3Region is the AWS region
|
||||
S3Region string `json:"s3_region"`
|
||||
// S3Bucket is the S3 bucket name
|
||||
S3Bucket string `json:"s3_bucket"`
|
||||
// S3AccessKey is the AWS access key
|
||||
S3AccessKey string `json:"s3_access_key"`
|
||||
// S3SecretKey is the AWS secret key
|
||||
S3SecretKey string `json:"s3_secret_key"`
|
||||
}
|
||||
|
||||
// LoadConfig loads client configuration from environment variables and .env file.
|
||||
@@ -42,14 +31,9 @@ func LoadConfig() *Config {
|
||||
return &Config{
|
||||
ClientID: getEnv("CLIENT_ID", "client1"),
|
||||
APIKey: getEnv("API_KEY", "secret123"),
|
||||
ServerURL: getEnv("SERVER_URL", "http://backup-server:8080"),
|
||||
ServerURL: getEnv("SERVER_URL", "http://localhost:8080"),
|
||||
LocalDataset: getEnv("LOCAL_DATASET", "tank/data"),
|
||||
Compress: getEnv("COMPRESS", "true") == "true",
|
||||
S3Endpoint: getEnv("S3_ENDPOINT", ""),
|
||||
S3Region: getEnv("S3_REGION", "us-east-1"),
|
||||
S3Bucket: getEnv("S3_BUCKET", "zfs-backups"),
|
||||
S3AccessKey: getEnv("S3_ACCESS_KEY", ""),
|
||||
S3SecretKey: getEnv("S3_SECRET_KEY", ""),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -3,23 +3,11 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/mistifyio/go-zfs"
|
||||
"github.com/pierrec/lz4/v4"
|
||||
)
|
||||
|
||||
// CreateBookmark creates a ZFS bookmark from a snapshot.
|
||||
@@ -83,203 +71,7 @@ func (c *Client) GetLastSnapshot() (*zfs.Dataset, error) {
|
||||
return snapshots[len(snapshots)-1], nil
|
||||
}
|
||||
|
||||
// SendIncremental sends an incremental stream from a bookmark or snapshot.
|
||||
// If base is empty, sends a full stream.
|
||||
// SendIncremental is kept for API compatibility - now just calls HTTP version
|
||||
func (c *Client) SendIncremental(snapshot *zfs.Dataset, base string) error {
|
||||
estimatedSize := c.GetSnapshotSize(snapshot)
|
||||
|
||||
// Determine if this is incremental or full
|
||||
isIncremental := base != ""
|
||||
var uploadMethod string
|
||||
if isIncremental {
|
||||
uploadMethod = "incremental"
|
||||
} else {
|
||||
uploadMethod = "full"
|
||||
}
|
||||
|
||||
// Request upload authorization from server
|
||||
uploadReq := map[string]interface{}{
|
||||
"client_id": c.config.ClientID,
|
||||
"api_key": c.config.APIKey,
|
||||
"dataset_name": c.config.LocalDataset,
|
||||
"timestamp": time.Now().Format(time.RFC3339),
|
||||
"compressed": c.config.Compress,
|
||||
"estimated_size": estimatedSize,
|
||||
"incremental": isIncremental,
|
||||
"base_snapshot": base,
|
||||
}
|
||||
|
||||
reqBody, _ := json.Marshal(uploadReq)
|
||||
resp, err := http.Post(c.config.ServerURL+"/upload", "application/json", bytes.NewBuffer(reqBody))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to request upload: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var uploadResp struct {
|
||||
Success bool `json:"success"`
|
||||
Message string `json:"message"`
|
||||
UploadURL string `json:"upload_url"`
|
||||
UploadMethod string `json:"upload_method"`
|
||||
StorageKey string `json:"storage_key"`
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&uploadResp); err != nil {
|
||||
return fmt.Errorf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if !uploadResp.Success {
|
||||
return fmt.Errorf("upload not authorized: %s", uploadResp.Message)
|
||||
}
|
||||
|
||||
fmt.Printf("→ Upload authorized\n")
|
||||
fmt.Printf(" Method: %s\n", uploadResp.UploadMethod)
|
||||
fmt.Printf(" Type: %s\n", uploadMethod)
|
||||
fmt.Printf(" Storage key: %s\n", uploadResp.StorageKey)
|
||||
|
||||
// Choose upload method based on server response
|
||||
if uploadResp.UploadMethod == "s3" {
|
||||
return c.streamIncrementalToS3(snapshot, base, uploadResp.UploadURL, uploadResp.StorageKey)
|
||||
}
|
||||
return c.sendIncrementalViaZFS(snapshot, base, uploadResp.StorageKey)
|
||||
}
|
||||
|
||||
// streamIncrementalToS3 streams an incremental ZFS snapshot to S3 using AWS SDK.
|
||||
func (c *Client) streamIncrementalToS3(snapshot *zfs.Dataset, base, uploadURL, storageKey string) error {
|
||||
fmt.Printf("→ Uploading snapshot to S3...\n")
|
||||
|
||||
// Ensure endpoint has valid URI scheme
|
||||
endpoint := c.config.S3Endpoint
|
||||
if endpoint != "" && !strings.HasPrefix(endpoint, "http://") && !strings.HasPrefix(endpoint, "https://") {
|
||||
endpoint = "http://" + endpoint
|
||||
}
|
||||
|
||||
// Create AWS config
|
||||
awsCfg, err := config.LoadDefaultConfig(context.TODO(),
|
||||
config.WithRegion(c.config.S3Region),
|
||||
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(
|
||||
c.config.S3AccessKey,
|
||||
c.config.S3SecretKey,
|
||||
"",
|
||||
)),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load AWS config: %v", err)
|
||||
}
|
||||
|
||||
// Determine if using custom endpoint (non-AWS)
|
||||
customEndpoint := endpoint != "" && endpoint != "http://s3.amazonaws.com" && endpoint != "https://s3.amazonaws.com"
|
||||
|
||||
// Create S3 client
|
||||
s3Client := s3.NewFromConfig(awsCfg, func(o *s3.Options) {
|
||||
if customEndpoint {
|
||||
o.BaseEndpoint = aws.String(endpoint)
|
||||
o.UsePathStyle = true // Required for MinIO compatible storage
|
||||
}
|
||||
})
|
||||
|
||||
// Create ZFS send command
|
||||
var cmd *exec.Cmd
|
||||
if base != "" {
|
||||
// Incremental send from bookmark or snapshot
|
||||
fmt.Printf(" Base: %s\n", base)
|
||||
cmd = exec.Command("zfs", "send", "-i", base, snapshot.Name)
|
||||
} else {
|
||||
// Full send
|
||||
cmd = exec.Command("zfs", "send", snapshot.Name)
|
||||
}
|
||||
|
||||
zfsOut, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create pipe: %v", err)
|
||||
}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return fmt.Errorf("failed to start zfs send: %v", err)
|
||||
}
|
||||
|
||||
var reader io.Reader = zfsOut
|
||||
|
||||
// Apply LZ4 compression if enabled
|
||||
if c.config.Compress {
|
||||
fmt.Printf(" Compressing with LZ4...\n")
|
||||
pr, pw := io.Pipe()
|
||||
lz4Writer := lz4.NewWriter(pw)
|
||||
lz4Writer.Apply(lz4.BlockSizeOption(lz4.BlockSize(4 * 1024 * 1024))) // 4MB blocks for better performance
|
||||
|
||||
go func() {
|
||||
// Copy zfs output to LZ4 writer
|
||||
io.Copy(lz4Writer, zfsOut)
|
||||
// Close LZ4 writer first to flush, then close pipe
|
||||
lz4Writer.Close()
|
||||
pw.Close()
|
||||
}()
|
||||
|
||||
reader = pr
|
||||
}
|
||||
|
||||
// Upload to S3 using PutObject
|
||||
_, err = s3Client.PutObject(context.TODO(), &s3.PutObjectInput{
|
||||
Bucket: aws.String(c.config.S3Bucket),
|
||||
Key: aws.String(storageKey),
|
||||
Body: reader,
|
||||
ContentType: aws.String("application/octet-stream"),
|
||||
})
|
||||
|
||||
// Wait for zfs send to complete
|
||||
if err := cmd.Wait(); err != nil {
|
||||
return fmt.Errorf("zfs send failed: %v", err)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to upload to S3: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf("✓ Snapshot uploaded to S3 successfully!\n")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendIncrementalViaZFS sends an incremental snapshot via ZFS send/receive over SSH.
|
||||
// This method is used when the server uses local ZFS storage.
|
||||
func (c *Client) sendIncrementalViaZFS(snapshot *zfs.Dataset, base, receivePath string) error {
|
||||
fmt.Printf("→ Sending via ZFS send/receive...\n")
|
||||
|
||||
// Extract server host from URL
|
||||
serverHost := c.config.ServerURL
|
||||
if len(serverHost) > 7 && strings.HasPrefix(serverHost, "http://") {
|
||||
serverHost = serverHost[7:]
|
||||
} else if len(serverHost) > 8 && strings.HasPrefix(serverHost, "https://") {
|
||||
serverHost = serverHost[8:]
|
||||
}
|
||||
|
||||
// Remove port if present
|
||||
if idx := strings.LastIndex(serverHost, ":"); idx > 0 {
|
||||
serverHost = serverHost[:idx]
|
||||
}
|
||||
|
||||
// Build zfs send command
|
||||
var zfsSendCmd string
|
||||
if base != "" {
|
||||
// Incremental send
|
||||
fmt.Printf(" Base: %s\n", base)
|
||||
zfsSendCmd = fmt.Sprintf("zfs send -i %s %s", base, snapshot.Name)
|
||||
} else {
|
||||
// Full send
|
||||
zfsSendCmd = fmt.Sprintf("zfs send %s", snapshot.Name)
|
||||
}
|
||||
|
||||
// Execute ZFS send over SSH
|
||||
cmd := exec.Command("sh", "-c",
|
||||
fmt.Sprintf("%s | ssh %s 'zfs recv -F %s'", zfsSendCmd, serverHost, receivePath))
|
||||
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to send snapshot: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf("✓ Snapshot sent successfully!\n")
|
||||
return nil
|
||||
return c.SendIncrementalHTTP(snapshot, base)
|
||||
}
|
||||
|
||||
@@ -317,10 +317,19 @@ func (s *Server) HandleUploadStream(w http.ResponseWriter, r *http.Request) {
|
||||
log.Printf("Error saving snapshot metadata: %v", err)
|
||||
}
|
||||
|
||||
// Run rotation after successful upload
|
||||
deletedCount, reclaimedBytes := s.rotateSnapshots(clientID)
|
||||
if deletedCount > 0 {
|
||||
log.Printf("Rotation: deleted %d snapshots, reclaimed %.2f MB for client %s",
|
||||
deletedCount, float64(reclaimedBytes)/(1024*1024), clientID)
|
||||
}
|
||||
|
||||
respondJSON(w, http.StatusOK, map[string]interface{}{
|
||||
"success": true,
|
||||
"message": "Snapshot uploaded successfully",
|
||||
"size": actualSize,
|
||||
"success": true,
|
||||
"message": "Snapshot uploaded successfully",
|
||||
"size": actualSize,
|
||||
"deleted_count": deletedCount,
|
||||
"reclaimed_bytes": reclaimedBytes,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -7,12 +7,10 @@ import (
|
||||
"log"
|
||||
"net/http"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
"github.com/mistifyio/go-zfs"
|
||||
)
|
||||
|
||||
@@ -25,119 +23,92 @@ type StorageBackend interface {
|
||||
GetSize(ctx context.Context, key string) (int64, error)
|
||||
}
|
||||
|
||||
// S3Backend implements StorageBackend for S3-compatible storage using AWS SDK v2
|
||||
// S3Backend implements StorageBackend for S3-compatible storage using minio-go
|
||||
type S3Backend struct {
|
||||
client *s3.Client
|
||||
client *minio.Client
|
||||
bucketName string
|
||||
}
|
||||
|
||||
// NewS3Backend creates a new S3 storage backend
|
||||
func NewS3Backend(endpoint, accessKey, secretKey, bucketName string, useSSL bool, region string) (*S3Backend, error) {
|
||||
// Ensure endpoint has valid URI scheme
|
||||
if endpoint != "" && !strings.HasPrefix(endpoint, "http://") && !strings.HasPrefix(endpoint, "https://") {
|
||||
if useSSL {
|
||||
endpoint = "https://" + endpoint
|
||||
} else {
|
||||
endpoint = "http://" + endpoint
|
||||
}
|
||||
// NewS3Backend creates a new S3 storage backend using minio-go
|
||||
func NewS3Backend(endpoint, accessKey, secretKey, bucketName string, useSSL bool) (*S3Backend, error) {
|
||||
// Create custom HTTP transport with extended timeouts for large file uploads
|
||||
transport := &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSClientConfig: nil,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
// Connection pooling
|
||||
MaxIdleConns: 10,
|
||||
MaxIdleConnsPerHost: 10,
|
||||
}
|
||||
|
||||
// Determine if using custom endpoint (non-AWS)
|
||||
customEndpoint := endpoint != "" && endpoint != "https://s3.amazonaws.com" && endpoint != "http://s3.amazonaws.com"
|
||||
|
||||
// Load AWS config
|
||||
awsCfg, err := config.LoadDefaultConfig(context.Background(),
|
||||
config.WithRegion(region),
|
||||
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(accessKey, secretKey, "")),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load AWS config: %v", err)
|
||||
}
|
||||
|
||||
// Create S3 client
|
||||
s3Client := s3.NewFromConfig(awsCfg, func(o *s3.Options) {
|
||||
if customEndpoint {
|
||||
o.BaseEndpoint = aws.String(endpoint)
|
||||
o.UsePathStyle = true // Required for MinIO and other S3-compatible storage
|
||||
}
|
||||
// Set HTTP client with extended timeout for large uploads
|
||||
o.HTTPClient = &http.Client{
|
||||
Timeout: 0, // No timeout for large file uploads
|
||||
}
|
||||
client, err := minio.New(endpoint, &minio.Options{
|
||||
Creds: credentials.NewStaticV4(accessKey, secretKey, ""),
|
||||
Secure: useSSL,
|
||||
Transport: transport,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create S3 client: %v", err)
|
||||
}
|
||||
|
||||
// Check if bucket exists (or create it for AWS S3)
|
||||
// Ensure bucket exists
|
||||
ctx := context.Background()
|
||||
_, err = s3Client.HeadBucket(ctx, &s3.HeadBucketInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
})
|
||||
exists, err := client.BucketExists(ctx, bucketName)
|
||||
if err != nil {
|
||||
// Try to create bucket
|
||||
_, err = s3Client.CreateBucket(ctx, &s3.CreateBucketInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
})
|
||||
return nil, fmt.Errorf("failed to check bucket: %v", err)
|
||||
}
|
||||
|
||||
if !exists {
|
||||
err = client.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{})
|
||||
if err != nil {
|
||||
log.Printf("Warning: failed to create bucket: %v", err)
|
||||
} else {
|
||||
log.Printf("Created S3 bucket: %s", bucketName)
|
||||
return nil, fmt.Errorf("failed to create bucket: %v", err)
|
||||
}
|
||||
log.Printf("Created S3 bucket: %s", bucketName)
|
||||
}
|
||||
|
||||
return &S3Backend{
|
||||
client: s3Client,
|
||||
client: client,
|
||||
bucketName: bucketName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Upload uploads data to S3
|
||||
// Upload uploads data to S3 using minio-go
|
||||
func (s *S3Backend) Upload(ctx context.Context, key string, data io.Reader, size int64) error {
|
||||
_, err := s.client.PutObject(ctx, &s3.PutObjectInput{
|
||||
Bucket: aws.String(s.bucketName),
|
||||
Key: aws.String(key),
|
||||
Body: data,
|
||||
ContentType: aws.String("application/octet-stream"),
|
||||
})
|
||||
_, err := s.client.PutObject(ctx, s.bucketName, key, data, size,
|
||||
minio.PutObjectOptions{
|
||||
ContentType: "application/octet-stream",
|
||||
PartSize: 10 * 1024 * 1024, // 10MB parts
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Download retrieves data from S3
|
||||
func (s *S3Backend) Download(ctx context.Context, key string) (io.ReadCloser, error) {
|
||||
resp, err := s.client.GetObject(ctx, &s3.GetObjectInput{
|
||||
Bucket: aws.String(s.bucketName),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
obj, err := s.client.GetObject(ctx, s.bucketName, key, minio.GetObjectOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.Body, nil
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
// Delete removes an object from S3
|
||||
func (s *S3Backend) Delete(ctx context.Context, key string) error {
|
||||
_, err := s.client.DeleteObject(ctx, &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(s.bucketName),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
return err
|
||||
return s.client.RemoveObject(ctx, s.bucketName, key, minio.RemoveObjectOptions{})
|
||||
}
|
||||
|
||||
// List returns all objects with the given prefix
|
||||
func (s *S3Backend) List(ctx context.Context, prefix string) ([]string, error) {
|
||||
var keys []string
|
||||
|
||||
paginator := s3.NewListObjectsV2Paginator(s.client, &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(s.bucketName),
|
||||
Prefix: aws.String(prefix),
|
||||
objectCh := s.client.ListObjects(ctx, s.bucketName, minio.ListObjectsOptions{
|
||||
Prefix: prefix,
|
||||
Recursive: true,
|
||||
})
|
||||
|
||||
for paginator.HasMorePages() {
|
||||
page, err := paginator.NextPage(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, obj := range page.Contents {
|
||||
keys = append(keys, *obj.Key)
|
||||
for object := range objectCh {
|
||||
if object.Err != nil {
|
||||
return nil, object.Err
|
||||
}
|
||||
keys = append(keys, object.Key)
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
@@ -145,14 +116,11 @@ func (s *S3Backend) List(ctx context.Context, prefix string) ([]string, error) {
|
||||
|
||||
// GetSize returns the size of an object in S3
|
||||
func (s *S3Backend) GetSize(ctx context.Context, key string) (int64, error) {
|
||||
info, err := s.client.HeadObject(ctx, &s3.HeadObjectInput{
|
||||
Bucket: aws.String(s.bucketName),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
info, err := s.client.StatObject(ctx, s.bucketName, key, minio.StatObjectOptions{})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return *info.ContentLength, nil
|
||||
return info.Size, nil
|
||||
}
|
||||
|
||||
// LocalBackend implements StorageBackend for local ZFS storage
|
||||
|
||||
Reference in New Issue
Block a user